Loosely based on the code by Justin, here is some other code that probably does what you want:
import wave, audioop
def merge_wav_at_offset(wav_in1, wav_in2, offset, wav_out):
"""Merge two wave files, with the second wave starting at offset seconds
The two input wave files should have the same frame rate, channels, depth
Also, offset should be non-negative and can be floating point."""
wf1= wave.open(wav_in1, 'rb')
wf2= wave.open(wav_in2, 'rb')
wfo= wave.open(wav_out, 'wb')
wfout.setparams(wf1.getparams())
frame_rate = wf1.getframerate()
sample_width= wf1.getsampwidth()
if offset < 0:
offset= 0
prologue_frames= int(frame_rate*offset)
merge_frames= wf2.getnframes()
# prologue
frames_to_read= prologue_frames
while frames_to_read > 0:
chunk_size= min(frame_rate, frames_to_read)
wfo.writeframes(wf1.readframes(chunk_size))
frames_to_read-= chunk_size
# merging
frames_to_read= merge_frames
while frames_to_read > 0:
chunk_size= min(frame_rate, frames_to_read)
frames2= wf2.readframes(chunk_size)
if frames2:
frames1= wf1.readframes(chunk_size)
if len(frames1) != len(frames2): # sanity check
# obviously you should cater for this case too
raise NotImplementedError, "offset+duration(wf2) > duration(wf1)"
merged_frames= audioop.add(frames1, frames2, sample_width)
wfo.writeframes(merged_frames)
else: # early end of wf2 data; improbable but possible
break
frames_to_read-= chunk_size
# epilogue
while True:
frames= wf1.readframes(frame_rate)
if not frames: break
wfo.writeframes(frames)
for wave_file in wf1, wf2, wfo:
wave_file.close()
I just wrote the code without testing, so it's possible that I have a bug (even syntax errors); however, my experience with Python is that often the code runs as-is;-)
If you need anything more, let me know.