Make WAV file from raw data - javascript

I use this examples for capture data from device microphone, but I can't figure how to convert it to WAV file for send to my server.
<script>
var handleSuccess = function(stream) {
var context = new AudioContext();
var source = context.createMediaStreamSource(stream);
var processor = context.createScriptProcessor(1024, 1, 1);
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
// Convert this to WAV and send to server
console.log(e.inputBuffer);
};
};
navigator.mediaDevices.getUserMedia({ audio: true, video: false })
.then(handleSuccess);
</script>

Disclosure synth-js is written by me.
The following script will create a valid WAV file as a Blob, containing the first 5 seconds of audio:
<script src="https://unpkg.com/synth-js/dst/synth.min.js"></script>
<script>
var handleSuccess = function(stream) {
var context = new AudioContext();
var source = context.createMediaStreamSource(stream);
var processor = context.createScriptProcessor(1024, 1, 1);
var data = [];
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
data.push.apply(data, e.inputBuffer.getChannelData(0));
// cut off after 5 seconds
if (data.length >= context.sampleRate * 5) {
context.close();
var track = stream.getAudioTracks()[0];
track.stop();
// Convert this to WAV
var wav = new synth.WAV(1, context.sampleRate, 16, true, data);
var blob = wav.toBlob();
// do something with blob
var src = URL.createObjectURL(blob);
var audio = new Audio();
audio.controls = true;
document.body.appendChild(audio);
// play back audio
audio.addEventListener('canplaythrough', function() { audio.play(); });
audio.src = src;
}
};
};
navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(handleSuccess);
</script>
You can try this on JSFiddle since Stack Snippets do not allow access to the microphone.
The line var wav = new synth.WAV(1, context.sampleRate, 16, true, data); creates a new WAV object with 1 channel, a sample rate that matches the input, 16 bits per sample in the WAV binary, in little endian format (required), with the PCM data collected by the onaudioprocess events.

Related

js get base64 of partial live recording

Am trying to get base64 string of current recorded voice to be sent to server for other processing.
My approach is to push base64 string into recordedChunks to be qued then send to server.
const recordedChunks = [];
var context = null;
var blob = null;
const handler = function(stream) {
if (window.URL) {
player.srcObject = stream;
} else {
//player.src = stream;
}
const context = new AudioContext();
const source = context.createMediaStreamSource(stream);
let bufferSize=1024;
const processor = context.createScriptProcessor(bufferSize, 1, 1);
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
//e.inputBuffer
// Do something with the data, e.g. convert it to mp3
// How to get base64 of what has been recorded without stopping the recorder
// recordedChunks.push('somePrerecodedBase64String');
};
};
navigator.mediaDevices.getUserMedia({ audio: true, video: false })
.then(handler);

Get 16 bit output audio using AudioContext

requirement :
Container WAV
Encoding PCM
Rate 16K
Sample Format 16 bit
Channels Mono
My output :
Container WAV
Encoding PCM
Rate 16K
Sample Format 32 bit float
Channels Mono
I need to get an audio output with a sample format of 16-bit PCM, currently the only output i get is 32-bit FLOAT
My code :
URL = window.URL || window.webkitURL;
var gumStream;
//stream from getUserMedia()
var rec;
//Recorder.js object
var input;
//MediaStreamAudioSourceNode we'll be recording
// shim for AudioContext when it's not avb.
//new audio context to help us record
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
var pauseButton = document.getElementById("pauseButton");
var recordButton_ = document.getElementById("recordButton_");
var stopButton_ = document.getElementById("stopButton_");
var pauseButton_ = document.getElementById("pauseButton_");
//add events to those 3 buttons
recordButton.addEventListener("click", startRecording);
stopButton.addEventListener("click", stopRecording);
pauseButton.addEventListener("click", pauseRecording);
function startRecording() {
var AudioContext = (window.AudioContext) || (window.webkitAudioContext)
var audioContext = new AudioContext({
sampleRate: 16000,
});
console.log("recordButton clicked");
/* Simple constraints object, for more advanced audio features see
https://addpipe.com/blog/audio-constraints-getusermedia/ */
var constraints = {
audio: true,
video: false
}
/* Disable the record button until we get a success or fail from getUserMedia() */
recordButton.disabled = true;
stopButton.disabled = false;
pauseButton.disabled = false
/* We're using the standard promise based getUserMedia()
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia */
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
console.log("getUserMedia() success, stream created, initializing Recorder.js ...");
/* assign to gumStream for later use */
gumStream = stream;
/* use the stream */
input = audioContext.createMediaStreamSource(stream);
/* Create the Recorder object and configure to record mono sound (1 channel) Recording 2 channels will double the file size */
rec = new Recorder(input, {
numChannels: 1
})
//start the recording process
rec.record()
console.log("Recording started");
}).catch(function(err) {
//enable the record button if getUserMedia() fails
recordButton.disabled = false;
stopButton.disabled = true;
pauseButton.disabled = true
});
}
function pauseRecording() {
console.log("pauseButton clicked rec.recording=", rec.recording);
if (rec.recording) {
//pause
rec.stop();
pauseButton.innerHTML = "Resume";
} else {
//resume
rec.record()
pauseButton.innerHTML = "Pause";
}
}
function stopRecording() {
console.log("stopButton clicked");
//disable the stop button, enable the record too allow for new recordings
stopButton.disabled = true;
recordButton.disabled = false;
pauseButton.disabled = true;
//reset button just in case the recording is stopped while paused
pauseButton.innerHTML = "Pause";
//tell the recorder to stop the recording
rec.stop(); //stop microphone access
gumStream.getAudioTracks()[0].stop();
//create the wav blob and pass it on to createDownloadLink
rec.exportWAV(createDownloadLink);
}
function createDownloadLink(blob) {
var url = URL.createObjectURL(blob);
var au = document.createElement('audio');
var li = document.createElement('li');
var link = document.createElement('a');
//add controls to the <audio> element
au.controls = true;
au.src = url;
au.sampleRate = 16000
//link the a element to the blob
link.href = url;
// link.download = new Date().toISOString() + '.wav';
link.innerHTML = link.download;
//add the new audio and a elements to the li element
li.appendChild(au);
li.appendChild(link);
//add the li element to the ordered list
recordingsList.appendChild(li);
var p = document.createElement("br");
recordingsList.appendChild(p);
}
There's nothing wrong on your code, by default the output provided by the Web Audio API is 32 bit, you will need to process it by using the BitCrusher node as described in the documentation:
https://webaudio.github.io/web-audio-api/#the-bitcrusher-node
Hope this helps.

Realtime Transmit Recorded Audio using RTP on websocket - Javascript and WEBRTC

I am new to webRTC. I want to record audio from browser and then send raw audio into RTP over WebSocket to another machine that in my local network.
How Do I form this RTP packets ?
Is is possible to stream this packets to remote machine/server while recording ?
I am using RECORD RTC.
Here is my Code :
const player = document.getElementById('player');
const handleSuccess = function(stream) {
const context = new AudioContext();
var channels = 2;
const source = context.createMediaStreamSource(stream);
const processor = context.createScriptProcessor(1024, 2, 1);
var buffer = context.createBuffer(2, 22050, 44100);
var frameCount = context.sampleRate * 2.0;
var myArrayBuffer = context.createBuffer(2, frameCount, context.sampleRate);
console.log("My Array buffer " , myArrayBuffer );
source.connect(processor);
processor.connect(context.destination);
processor.onaudioprocess = function(e) {
// Do something with the data, e.g. convert it to WAV
console.log(e.inputBuffer);
};
if (window.URL) {
player.srcObject = stream;
console.log("Stream is " , stream.toString());
} else {
player.src = stream;
console.log("Stream is " , stream.toString());
}
};
navigator.mediaDevices.getUserMedia({ audio: true, video: false })
.then(handleSuccess);
navigator.mediaDevices.enumerateDevices().then((devices) => {
devices = devices.filter((d) => d.kind === 'audioinput');
console.log("Devices are " , devices);
});
The code allows me to stream audio , I just want to send the recorded stream, in RAW format using RTP protocol.

Is it possible to mix multiple audio files on top of each other preferably with javascript

I want to combine audio clips, layered on top of each other so that they play synchronously and are saved in a new audio file. Any help would be much appreciated. I've done some digging online, but couldn't find a definitive answer as to whether or not many of the tools available as far as Javascript audio editing librarys go (Mix.js for example) are capable.
Yes, it is possible using OfflineAudioContext() or AudioContext.createChannelMerger() and creating a MediaStream. See Phonegap mixing audio files , Web Audio API.
You can use fetch() or XMLHttpRequest() to retrieve audio resource as an ArrayBuffer, AudioContext.decodeAudioData() to create an AudioBufferSourceNode from response; OfflineAudioContext() to render merged audio, AudioContext, AudioContext.createBufferSource(), AudioContext.createMediaStreamDestination() , MediaRecorder() to record stream; Promise.all(), Promise() constructor, .then() to process asynchronous requests to fetch(), AudioContext.decodeAudioData(), pass resulting mixed audio Blob at stop event of MediaRecorder.
Connect each AudioContext AudioBufferSourceNode to OfflineAudioContext.destination, call .start() on each node; call OfflineAudioContext.startRendering(); create new AudioContext node, connect renderedBuffer; call .createMediaStreamDestination() on AudioContext to create a MediaStream from merged audio buffers, pass .stream to MediaRecorder(), at stop event of MediaRecorder, create Blob URL of Blob of recorded audio mix with URL.createObjectURL(), which can be downloaded using <a> element with download attribute and href set to Blob URL.
var sources = ["https://upload.wikimedia.org/wikipedia/commons/b/be/"
+ "Hidden_Tribe_-_Didgeridoo_1_Live.ogg"
, "https://upload.wikimedia.org/wikipedia/commons/6/6e/"
+ "Micronesia_National_Anthem.ogg"];
var description = "HiddenTribeAnthem";
var context;
var recorder;
var div = document.querySelector("div");
var duration = 60000;
var chunks = [];
var audio = new AudioContext();
var mixedAudio = audio.createMediaStreamDestination();
var player = new Audio();
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
var len = Math.max.apply(Math, data.map(function(buffer) {
return buffer.byteLength
}));
context = new OfflineAudioContext(2, len, 44100);
return Promise.all(data.map(function(buffer) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var source = context.createBufferSource();
source.buffer = bufferSource;
source.connect(context.destination);
return source.start()
})
}))
.then(function() {
return context.startRendering()
})
.then(function(renderedBuffer) {
return new Promise(function(resolve) {
var mix = audio.createBufferSource();
mix.buffer = renderedBuffer;
mix.connect(audio.destination);
mix.connect(mixedAudio);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
mix.start(0);
div.innerHTML = "playing and recording tracks..";
// stop playback and recorder in 60 seconds
stopMix(duration, mix, recorder)
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
console.log("recording complete");
resolve(blob)
};
})
})
.then(function(blob) {
console.log(blob);
div.innerHTML = "mixed audio tracks ready for download..";
var audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
document.body.appendChild(a);
a.insertAdjacentHTML("afterend", "<br>");
player.src = audioDownload;
document.body.appendChild(player);
})
})
.catch(function(e) {
console.log(e)
});
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<div>loading audio tracks.. please wait</div>
</body>
</html>
You can alternatively utilize AudioContext.createChannelMerger(), AudioContext.createChannelSplitter()
var sources = ["/path/to/audoi1", "/path/to/audio2"];
var description = "mix";
var chunks = [];
var channels = [[0, 1], [1, 0]];
var audio = new AudioContext();
var player = new Audio();
var merger = audio.createChannelMerger(2);
var splitter = audio.createChannelSplitter(2);
var mixedAudio = audio.createMediaStreamDestination();
var duration = 60000;
var context;
var recorder;
var audioDownload;
player.controls = "controls";
function get(src) {
return fetch(src)
.then(function(response) {
return response.arrayBuffer()
})
}
function stopMix(duration, ...media) {
setTimeout(function(media) {
media.forEach(function(node) {
node.stop()
})
}, duration, media)
}
Promise.all(sources.map(get)).then(function(data) {
return Promise.all(data.map(function(buffer, index) {
return audio.decodeAudioData(buffer)
.then(function(bufferSource) {
var channel = channels[index];
var source = audio.createBufferSource();
source.buffer = bufferSource;
source.connect(splitter);
splitter.connect(merger, channel[0], channel[1]);
return source
})
}))
.then(function(audionodes) {
merger.connect(mixedAudio);
merger.connect(audio.destination);
recorder = new MediaRecorder(mixedAudio.stream);
recorder.start(0);
audionodes.forEach(function(node) {
node.start(0)
});
stopMix(duration, ...audionodes, recorder);
recorder.ondataavailable = function(event) {
chunks.push(event.data);
};
recorder.onstop = function(event) {
var blob = new Blob(chunks, {
"type": "audio/ogg; codecs=opus"
});
audioDownload = URL.createObjectURL(blob);
var a = document.createElement("a");
a.download = description + "." + blob.type.replace(/.+\/|;.+/g, "");
a.href = audioDownload;
a.innerHTML = a.download;
player.src = audioDownload;
document.body.appendChild(a);
document.body.appendChild(player);
};
})
})
.catch(function(e) {
console.log(e)
});

Converting a Float32Array of decoded samples to an AudioBuffer

Because one of the browsers I'm trying to support doesn't allow me to decode a specific codec using AudioContext.decodeAudioData() I'm using Aurora.js to decode a audio files.
How can I change the decoded samples received from Aurora.js into an AudioBuffer I can actually use to playback the audio?
This is my code so far:
var AudioContext = (window.AudioContext || window.webkitAudioContext);
var context = new AudioContext();
var segmentUrls = [
'/segments/00.wav',
'/segments/05.wav',
'/segments/10.wav',
'/segments/15.wav',
'/segments/20.wav',
'/segments/25.wav',
'/segments/30.wav',
'/segments/35.wav',
'/segments/40.wav',
'/segments/45.wav',
'/segments/50.wav',
'/segments/55.wav'
];
Promise.all(segmentUrls.map(loadSound))
.then(function(buffers) {
var startAt = 0;
buffers.forEach(function(buffer) {
playSound(startAt, buffer);
startAt += buffer.duration;
});
})
.catch(function(err) {
console.error(err);
});
function playSound(offset, buffer) {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(offset);
}
function loadSound(url) {
return new Promise(function(resolve, reject) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function onLoad() {
resolve(decodeAudioData(request.response));
};
request.onerror = function onError() {
reject('Could not request file');
};
request.send();
});
}
function decodeAudioData(audioData) {
return new Promise(function(resolve, reject) {
var asset = AV.Asset.fromBuffer(audioData);
asset.decodeToBuffer(function(buffer) {
// Create an AudioBuffer here
});
});
}
You'll have to create an AudioBuffer of the appropriate size and # of channels, and copy the data from one Float32 buffer to another.
Here is the MDN code snippet to put data in an AudioBuffer and then play it:
https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer
// Stereo
var channels = 2;
// Create an empty two second stereo buffer at the
// sample rate of the AudioContext
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var frameCount = audioCtx.sampleRate * 2.0;
var myArrayBuffer = audioCtx.createBuffer(channels, frameCount, audioCtx.sampleRate);
button.onclick = function() {
// Fill the buffer with white noise;
// just random values between -1.0 and 1.0
for (var channel = 0; channel < channels; channel++) {
// This gives us the actual array that contains the data
var nowBuffering = myArrayBuffer.getChannelData(channel);
for (var i = 0; i < frameCount; i++) {
// Math.random() is in [0; 1.0]
// audio needs to be in [-1.0; 1.0]
nowBuffering[i] = Math.random() * 2 - 1;
}
}
// Get an AudioBufferSourceNode.
// This is the AudioNode to use when we want to play an AudioBuffer
var source = audioCtx.createBufferSource();
// set the buffer in the AudioBufferSourceNode
source.buffer = myArrayBuffer;
// connect the AudioBufferSourceNode to the
// destination so we can hear the sound
source.connect(audioCtx.destination);
// start the source playing
source.start();
}

Categories

Resources