How to cut an audio recording in JavaScript - javascript

Say I record audio from my microphone, using Recorder.js. How can I then trim/crop/slice/cut the recording? Meaning, say I record 3.5 seconds, and I want only the first 3 seconds. Any ideas?

You can stop the original recording at 3 seconds or use <audio> element, HTMLMediaElement.captureStream() and MediaRecorder to record 3 seconds of playback of the original recording.
const audio = new Audio(/* Blob URL or URL of recording */);
const chunks = [];
audio.oncanplay = e => {
audio.play();
const stream = audio.captureStream();
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = e => {
if (recorder.state === "recording") {
recorder.stop()
};
chunks.push(e.data)
}
recorder.onstop = e => {
console.log(chunks)
}
recorder.start(3000);
}

Related

Procedural Audio using MediaStreamTrack

I want to encode a video (from a canvas) and add procedural audio to it.
The encoding can be accomplished with MediaRecorder that takes a MediaStream.
For the stream, I want to obtain the video part from a canvas, using the canvas.captureStream() call.
I want to add an audio track to the stream. But instead of microphone input, I want to generate the samples for those on the fly, for simplicity sake, let's assume it writes out a sine-wave.
How can I create a MediaStreamTrack that generates procedural audio?
The Web Audio API has a createMediaStreamDestination() method, which will return a MediaStreamAudioDestinationNode object, on which you'll be able to connect your audio context, and which will give you access to a MediaStream instance fed by the audio context audio output.
document.querySelector("button").onclick = (evt) => {
const duration = 5;
evt.target.remove();
const audioContext = new AudioContext();
const osc = audioContext.createOscillator();
const destNode = audioContext.createMediaStreamDestination();
const { stream } = destNode;
osc.connect(destNode);
osc.connect(audioContext.destination);
osc.start(0);
osc.frequency.value = 80;
osc.frequency.exponentialRampToValueAtTime(440, audioContext.currentTime+10);
osc.stop(duration);
// stream.addTrack(canvasStream.getVideoTracks()[0]);
const recorder = new MediaRecorder(stream);
const chunks = [];
recorder.ondataavailable = ({data}) => chunks.push(data);
recorder.onstop = (evt) => {
const el = new Audio();
const [{ type }] = chunks; // for Safari
el.src = URL.createObjectURL(new Blob(chunks, { type }));
el.controls = true;
document.body.append(el);
};
recorder.start();
setTimeout(() => recorder.stop(), duration * 1000);
console.log(`Started recording, please wait ${duration}s`);
};
<button>begin</button>

How to detect the start of speech on Google Colab

I tried to detect the start of speech on google Colab.
I implemented the detecter which can obtain 1 sec recorded stream using javascript mediarecorder. Thresholding of the amplitude is working, and if it detects the speech, I put the 3-sec recording.
But there is some blank of stream between first 1-sec stream and last 3-sec streams.
I think there might be any delay of evaluating JS.
Could you suggest better idea for this?
I also tried this on Javascript before, but I couldn't find a way get magnitude of waveform stream from Blob event..
from IPython.display import Javascript
from google.colab import output
from base64 import b64decode
import array
RECORD = """
const sleep = time => new Promise(resolve => setTimeout(resolve, time))
const b2text = blob => new Promise(resolve => {
const reader = new FileReader()
reader.onloadend = e => resolve(e.srcElement.result)
reader.readAsDataURL(blob)
})
var record = time => new Promise(async resolve => {
stream = await navigator.mediaDevices.getUserMedia({ audio: true })
recorder = new MediaRecorder(stream)
chunks = []
recorder.ondataavailable = e => {
chunks.push(e.data)
}
recorder.start()
await sleep(time)
recorder.onstop = async ()=>{
blob = new Blob(chunks)
text = await b2text(blob)
resolve(text)
}
recorder.stop()
})
"""
def record(sec=3):
display(Javascript(RECORD))
s = output.eval_js('record(%d)' % (sec*1000))
b = b64decode(s.split(',')[1])
sound = AudioSegment.from_file(BytesIO(b))
# ipd.display(ipd.Audio(data=waveform, rate=sample_rate))
return sound
def AStoWave(sound):
waveform = sound.get_array_of_samples()
sample_rate = sound.frame_rate
return waveform, sample_rate
def detectSpeech(sec=3):
is_started = False
SILENCE_THREASHOLD = 80
while True:
sound = record(1)
vol = sound.max / 1e6
# print(vol)
if not is_started:
if vol >= SILENCE_THREASHOLD:
print('start of speech detected')
is_started = True
if is_started:
return sound + record(sec)
waveform, rate = AStoWave(detectSpeech())
ipd.display(ipd.Audio(data=waveform, rate=rate))

WebRTC transmit high audio stream sample rate

Given a WebRTC PeerConnection between two clients, one client is trying to send an audio MediaStream to another.
If this MediaStream is an Oscillator at 440hz - everything works fine. The audio is very crisp, and the transmission goes through correctly.
However, if the audio is at 20000hz, the audio is very noisy and crackly - I expect to hear nothing, but I hear a lot of noise instead.
I believe this might be a problem of sample rate sent in the connection, maybe its not sending the audio at 48000samples/second like I expect.
Is there a way for me to increase the sample rate?
Here is a fiddle to reproduce the issue:
https://jsfiddle.net/mb3c5gw1/9/
Minimal reproduction code including a visualizer:
<button id="btn">start</button>
<canvas id="canvas"></canvas>
<script>class OscilloMeter{constructor(a){this.ctx=a.getContext("2d")}listen(a,b){function c(){g.getByteTimeDomainData(j),d.clearRect(0,0,e,f),d.beginPath();let a=0;for(let c=0;c<h;c++){const e=j[c]/128;var b=e*f/2;d.lineTo(a,b),a+=k}d.lineTo(canvas.width,canvas.height/2),d.stroke(),requestAnimationFrame(c)}const d=this.ctx,e=d.canvas.width,f=d.canvas.height,g=b.createAnalyser(),h=g.fftSize=256,j=new Uint8Array(h),k=e/h;d.lineWidth=2,a.connect(g),c()}}</script>
btn.onclick = e => {
const ctx = new AudioContext();
const source = ctx.createMediaStreamDestination();
const oscillator = ctx.createOscillator();
oscillator.type = 'sine';
oscillator.frequency.setValueAtTime(20000, ctx.currentTime); // value in hertz
oscillator.connect(source);
oscillator.start();
// a visual cue of AudioNode out (uses an AnalyserNode)
const meter = new OscilloMeter(canvas);
const pc1 = new RTCPeerConnection(),
pc2 = new RTCPeerConnection();
pc2.ontrack = ({
track
}) => {
const endStream = new MediaStream([track]);
const src = ctx.createMediaStreamSource(endStream);
const audio = new Audio();
audio.srcObject = endStream;
meter.listen(src, ctx);
audio.play()
};
pc1.onicecandidate = e => pc2.addIceCandidate(e.candidate);
pc2.onicecandidate = e => pc1.addIceCandidate(e.candidate);
pc1.oniceconnectionstatechange = e => console.log(pc1.iceConnectionState);
pc1.onnegotiationneeded = async e => {
try {
await pc1.setLocalDescription(await pc1.createOffer());
await pc2.setRemoteDescription(pc1.localDescription);
await pc2.setLocalDescription(await pc2.createAnswer());
await pc1.setRemoteDescription(pc2.localDescription);
} catch (e) {
console.error(e);
}
}
const stream = source.stream;
pc1.addTrack(stream.getAudioTracks()[0], stream);
};
Looking around in the webrtc demo i found this: https://webrtc.github.io/samples/src/content/peerconnection/audio/ in the example they show a dropdown where you can setup the audio codec. I think this is your solution.

JavaScript : How to split an audio blob into 1 second chunks and export to wav files using recorder.js?

I want to record voice, split the recorded voice (or the audio blob) automatically into 1 second chunks, export each chunk to a wav file and send to the back end . This should happen asynchronously while the user speaks.
I currently use the following recorder.js library to do the above tasks
https://cdn.rawgit.com/mattdiamond/Recorderjs/08e7abd9/dist/recorder.js
My problem is, with time the blob/wave file becomes bigger in size. I think it is because the data gets accumulated and make the chunk size bigger. So with time I am not actually sending sequential 1 second chunks but accumulated chunks.
I can’t figure our where in my code this issue is caused. May be this happens inside the recorder.js library. If someone has used recorder js or any other JavaScript method for a similar tasks, appreciate if you could go through this code and let me know where it breaks.
This is my JS code
var gumStream; // Stream from getUserMedia()
var rec; // Recorder.js object
var input; // MediaStreamAudioSourceNode we'll be recording
var recordingNotStopped; // User pressed record button and keep talking, still not stop button pressed
const trackLengthInMS = 1000; // Length of audio chunk in miliseconds
const maxNumOfSecs = 1000; // Number of mili seconds we support per recording (1 second)
// Shim for AudioContext when it's not available.
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext //audio context to help us record
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
//Event handlers for above 2 buttons
recordButton.addEventListener("click", startRecording);
stopButton.addEventListener("click", stopRecording);
//Asynchronous function to stop the recoding in each second and export blob to a wav file
const sleep = time => new Promise(resolve => setTimeout(resolve, time));
const asyncFn = async() => {
for (let i = 0; i < maxNumOfSecs; i++) {
if (recordingNotStopped) {
rec.record();
await sleep(trackLengthInMS);
rec.stop();
//stop microphone access
gumStream.getAudioTracks()[0].stop();
//Create the wav blob and pass it on to createWaveBlob
rec.exportWAV(createWaveBlob);
}
}
}
function startRecording() {
console.log("recordButton clicked");
recordingNotStopped = true;
var constraints = {
audio: true,
video: false
}
recordButton.disabled = true;
stopButton.disabled = false;
//Using the standard promise based getUserMedia()
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
//Create an audio context after getUserMedia is called
audioContext = new AudioContext();
// Assign to gumStream for later use
gumStream = stream;
//Use the stream
input = audioContext.createMediaStreamSource(stream);
//Create the Recorder object and configure to record mono sound (1 channel)
rec = new Recorder(input, {
numChannels: 1
});
//Call the asynchronous function to split and export audio
asyncFn();
console.log("Recording started");
}).catch(function(err) {
//Enable the record button if getUserMedia() fails
recordButton.disabled = false;
stopButton.disabled = true;
});
}
function stopRecording() {
console.log("stopButton clicked");
recordingNotStopped = false;
//disable the stop button and enable the record button to allow for new recordings
stopButton.disabled = true;
recordButton.disabled = false;
//Set the recorder to stop the recording
rec.stop();
//stop microphone access
gumStream.getAudioTracks()[0].stop();
}
function createWaveBlob(blob) {
var url = URL.createObjectURL(blob);
//Convert the blob to a wav file and call the sendBlob function to send the wav file to the server
var convertedfile = new File([blob], 'filename.wav');
sendBlob(convertedfile);
}
Recorder.js keeps a record buffer of the audio that it records. When exportWAV is called, the record buffer is encoded but not cleared. You'd need to call clear on the recorder before calling record again so that the previous chunk of audio is cleared from the record buffer.
This is how it was fixed in the above code.
//Extend the Recorder Class and add clear() method
Recorder.prototype.step = function () {
this.clear();
};
//After calling the exportWAV(), call the clear() method
rec.exportWAV(createWaveBlob);
rec.step();

Obtain MediaStream from input device

Looking for experience working with media devices:
I'm working on recording on cache and playback from Microphone source; Firefox & Chrome using HTML5.
This is what I've so far:
var constraints = {audio: true, video: false};
var promise = navigator.mediaDevices.getUserMedia(constraints);
I've been checking on official documentation from MDN on getUserMedia
but nothing related to storage the audio from the constraint to cache.
No such question has been asked previously at Stackoverflow; I'm wondering if's possible.
Thanks you.
You can simply use the MediaRecorder API for such task.
In order to record only the audio from your video+audio gUM stream, you will need to create a new MediaStream, from the gUM's audioTrack:
// using async for brevity
async function doit() {
// first request both mic and camera
const gUMStream = await navigator.mediaDevices.getUserMedia({video: true, audio: true});
// create a new MediaStream with only the audioTrack
const audioStream = new MediaStream(gUMStream.getAudioTracks());
// to save recorded data
const chunks = [];
const recorder = new MediaRecorder(audioStream);
recorder.ondataavailable = e => chunks.push(e.data);
recorder.start();
// when user decides to stop
stop_btn.onclick = e => {
recorder.stop();
// kill all tracks to free the devices
gUMStream.getTracks().forEach(t => t.stop());
audioStream.getTracks().forEach(t => t.stop());
};
// export all the saved data as one Blob
recorder.onstop = e => exportMedia(new Blob(chunks));
// play current gUM stream
vid.srcObject = gUMStream;
stop_btn.disabled = false;
}
function exportMedia(blob) {
// here blob is your recorded audio file, you can do whatever you want with it
const aud = new Audio(URL.createObjectURL(blob));
aud.controls = true;
document.body.appendChild(aud);
document.body.removeChild(vid);
}
doit()
.then(e=>console.log("recording"))
.catch(e => {
console.error(e);
console.log('you may want to try from jsfiddle: https://jsfiddle.net/5s2zabb2/');
});
<video id="vid" controls autoplay></video>
<button id="stop_btn" disabled>stop</button>
And as a fiddle since stacksnippets don't work very well with gUM...

Categories

Resources