Recently I've been working on a voice platform allowing for users to talk to each other under certain conditions. However, it only seems to be smooth when your ping is really low.
Here's the structure currently:
Raw PCM Audio From User -> Web Socket -> Sent to all clients that meet a certain condition
There's nothing particularly special about my WebSocket. It's made in Java and just sends the data received directly from clients to other clients (just temporary).
Issue:
For users that have a decent amount of ping (>100ms) their audio cuts out (choppy) at certain parts and doesn't seem to load fast enough even with the code I have in place. If a video of this will help, let me know!
This is the code I have for recording and playback currently (using AudioWorkletProcessors)
Recording:
buffers = [];
buffersLength = 0;
process(inputs, _, parameters) {
const [input] = inputs;
if (parameters.muted[0] == 1) {
return true;
}
// Create one buffer from this input
const dataLen = input[0].byteLength;
const channels = input.length;
const bufferForSocket = new Uint8Array(dataLen * channels + 9);
bufferForSocket.set(new Uint8Array([channels]), 0);
bufferForSocket.set(numberToArrayBuffer(sampleRate), 1);
bufferForSocket.set(numberToArrayBuffer(input[0].byteLength), 5);
for (let i = 0; i < channels; i++) {
bufferForSocket.set(new Uint8Array(input[i].buffer), 9 + dataLen * i);
}
// Add buffers to a list
this.buffers.push(bufferForSocket);
this.buffersLength += bufferForSocket.byteLength;
// If we have 25 buffers, send them off to websocket
if (this.buffers.length >= 25) {
const combinedBuffer = new Uint8Array(
this.buffersLength + 4 + 4 * this.buffers.length
);
combinedBuffer.set(numberToArrayBuffer(this.buffers.length), 0);
let offset = 4;
for (let i = 0; i < this.buffers.length; i++) {
const buffer = this.buffers[i];
combinedBuffer.set(numberToArrayBuffer(buffer.byteLength), offset);
combinedBuffer.set(buffer, offset + 4);
offset += buffer.byteLength + 4;
}
this.buffers.length = 0;
this.buffersLength = 0;
// This is what sends to WebSocket
this.port.postMessage(combinedBuffer.buffer);
}
return true;
}
Playback:
class extends AudioWorkletProcessor {
buffers = new LinkedList();
timeTillNextBuffer = 0;
process(_, outputs, __) {
const [output] = outputs;
const linkedBuffers = this.buffers.last();
// If we aren't currently playing a buffer and we cannot play a buffer right now, return
if (
linkedBuffers == null ||
(linkedBuffers.buffers.length === 0 && this.timeTillNextBuffer > Date.now())
)
return true;
const buffer = linkedBuffers.buffers.removeLast();
// Current buffer is finished, remove it
if (linkedBuffers.index === linkedBuffers.buffers.length) {
this.buffers.removeLast();
}
if (buffer === null) {
return true;
}
const inputData = buffer.channels;
// put audio to output
for (let channel = 0; channel < outputs.length; channel++) {
const channelData = inputData[channel];
const outputData = output[channel];
for (let i = 0; i < channelData.length; i++) {
outputData[i] = channelData[i];
}
}
return true;
}
static get parameterDescriptors() {
return [];
}
onBuffer(buffer) {
// buffer is ArrayBuffer
const buffersGiven = new DataView(buffer, 0, 4).getUint32(0, true);
let offset = 4;
const buffers = new LinkedList();
const linkedBuffers = { index: 0, buffers };
// Read buffers from WebSocket (created in the snippet above)
for (let i = 0; i < buffersGiven; i++) {
const bufferLength = new DataView(buffer, offset, 4).getUint32(0, true);
const numberOfChannels = new DataView(buffer, offset + 4, 1).getUint8(0, true);
const sampleRate = new DataView(buffer, offset + 5, 4).getUint32(0, true);
const channelLength = new DataView(buffer, offset + 9, 4).getUint32(0, true);
const channels = [];
for (let i = 0; i < numberOfChannels; i++) {
const start = offset + 13 + i * channelLength;
channels[i] = new Float32Array(buffer.slice(start), 0, channelLength / 4);
}
buffers.push({ channelLength, numberOfChannels, sampleRate, channels });
offset += bufferLength + 4;
}
this.buffers.push(linkedBuffers);
// Jitter buffer
this.timeTillNextBuffer = 50 - this.buffers.length * 25 + Date.now();
}
constructor() {
super();
this.port.onmessage = (e) => this.onBuffer(e.data); // Data directly from WebSocket
}
}
I heard the use of Atomics can help because of how the AudioContext plays blank audio when returning. Any tips would be greatly appreciated! Also, if anything is unclear, please let me know!
My code has somewhat of a Jitter buffer system and it doesn't seem to work at all. The audio that a user receives from me (low ping) is clear. However, they (high ping) send me choppy audio. Furthermore, this choppy audio seems to build up and it gets more delayed the more packets I receive.
Related
I made extensive 2 days research on the topic, but there is no really well explained piece that would work.
So the flow is following:
load mp3 (store bought) cbr 320 into wavesurfer
apply all the changes you need
download processed result back to mp3 file (without usage of server)
Ive seen online apps that can do that, nothing is transmitted to server, all happens in the browser.
when we inspect wavesurfer, we have access to these:
The goal would be to use already available references from wavesurfer to produce the download mp3.
from my understanding this can be done with MediaRecorder, WebCodecs API or some libraries like lamejs.
Ive tried to find working example of how to do it with two first methods but without luck. I also tried to do it with lamejs using their example provided on the git but i am getting errors from the lib that are hard to debug, most likely related to providing wrong input.
So far i only managed to download wav file using following script:
handleCopyRegion = (region, instance) => {
var segmentDuration = region.end - region.start;
var originalBuffer = instance.backend.buffer;
var emptySegment = instance.backend.ac.createBuffer(
originalBuffer.numberOfChannels,
Math.ceil(segmentDuration * originalBuffer.sampleRate),
originalBuffer.sampleRate
);
for (var i = 0; i < originalBuffer.numberOfChannels; i++) {
var chanData = originalBuffer.getChannelData(i);
var emptySegmentData = emptySegment.getChannelData(i);
var mid_data = chanData.subarray(
Math.ceil(region.start * originalBuffer.sampleRate),
Math.ceil(region.end * originalBuffer.sampleRate)
);
emptySegmentData.set(mid_data);
}
return emptySegment;
};
bufferToWave = (abuffer, offset, len) => {
var numOfChan = abuffer.numberOfChannels,
length = len * numOfChan * 2 + 44,
buffer = new ArrayBuffer(length),
view = new DataView(buffer),
channels = [],
i,
sample,
pos = 0;
// write WAVE header
setUint32(0x46464952); // "RIFF"
setUint32(length - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(abuffer.sampleRate);
setUint32(abuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit (hardcoded in this demo)
setUint32(0x61746164); // "data" - chunk
setUint32(length - pos - 4); // chunk length
// write interleaved data
for (i = 0; i < abuffer.numberOfChannels; i++)
channels.push(abuffer.getChannelData(i));
while (pos < length) {
for (i = 0; i < numOfChan; i++) {
// interleave channels
sample = Math.max(-1, Math.min(1, channels[i][offset])); // clamp
sample = (0.5 + sample < 0 ? sample * 32768 : sample * 32767) | 0; // scale to 16-bit signed int
view.setInt16(pos, sample, true); // update data chunk
pos += 2;
}
offset++; // next source sample
}
// create Blob
return new Blob([buffer], { type: "audio/wav" });
function setUint16(data) {
view.setUint16(pos, data, true);
pos += 2;
}
function setUint32(data) {
view.setUint32(pos, data, true);
pos += 4;
}
};
const cutSelection = this.handleCopyRegion(
this.wavesurfer.regions.list.cut,
this.wavesurfer
);
const blob = this.bufferToWave(cutSelection, 0, cutSelection.length);
// you can now download wav from the blob
Is there a way to avoid making wav and right away make mp3 and download it, or if not make mp3 from that wav, if so how it can be done?
I mainly tried to use wavesurfer.backend.buffer as input, because this reference is AudioBuffer and accessing .getChannelData(0|1) gives you left and right channels. But didnt accomplish anything, maybe i am thinking wrong.
Alright, here is the steps we need to do:
Get buffer data from the wavesurfer player
Analyze the buffer to get the number of Channels(STEREO or MONO), channels data and Sample rate.
Use lamejs library to convert buffer to the MP3 blob file
Then we can get that download link from blob
Here is a quick DEMO
and also the JS code:
function downloadMp3() {
var MP3Blob = analyzeAudioBuffer(wavesurfer.backend.buffer);
console.log('here is your mp3 url:');
console.log(URL.createObjectURL(MP3Blob));
}
function analyzeAudioBuffer(aBuffer) {
let numOfChan = aBuffer.numberOfChannels,
btwLength = aBuffer.length * numOfChan * 2 + 44,
btwArrBuff = new ArrayBuffer(btwLength),
btwView = new DataView(btwArrBuff),
btwChnls = [],
btwIndex,
btwSample,
btwOffset = 0,
btwPos = 0;
setUint32(0x46464952); // "RIFF"
setUint32(btwLength - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(aBuffer.sampleRate);
setUint32(aBuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit
setUint32(0x61746164); // "data" - chunk
setUint32(btwLength - btwPos - 4); // chunk length
for (btwIndex = 0; btwIndex < aBuffer.numberOfChannels; btwIndex++)
btwChnls.push(aBuffer.getChannelData(btwIndex));
while (btwPos < btwLength) {
for (btwIndex = 0; btwIndex < numOfChan; btwIndex++) {
// interleave btwChnls
btwSample = Math.max(-1, Math.min(1, btwChnls[btwIndex][btwOffset])); // clamp
btwSample = (0.5 + btwSample < 0 ? btwSample * 32768 : btwSample * 32767) | 0; // scale to 16-bit signed int
btwView.setInt16(btwPos, btwSample, true); // write 16-bit sample
btwPos += 2;
}
btwOffset++; // next source sample
}
let wavHdr = lamejs.WavHeader.readHeader(new DataView(btwArrBuff));
//Stereo
let data = new Int16Array(btwArrBuff, wavHdr.dataOffset, wavHdr.dataLen / 2);
let leftData = [];
let rightData = [];
for (let i = 0; i < data.length; i += 2) {
leftData.push(data[i]);
rightData.push(data[i + 1]);
}
var left = new Int16Array(leftData);
var right = new Int16Array(rightData);
//STEREO
if (wavHdr.channels===2)
return bufferToMp3(wavHdr.channels, wavHdr.sampleRate, left,right);
//MONO
else if (wavHdr.channels===1)
return bufferToMp3(wavHdr.channels, wavHdr.sampleRate, data);
function setUint16(data) {
btwView.setUint16(btwPos, data, true);
btwPos += 2;
}
function setUint32(data) {
btwView.setUint32(btwPos, data, true);
btwPos += 4;
}
}
function bufferToMp3(channels, sampleRate, left, right = null) {
var buffer = [];
var mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
var remaining = left.length;
var samplesPerFrame = 1152;
for (var i = 0; remaining >= samplesPerFrame; i += samplesPerFrame) {
if (!right)
{
var mono = left.subarray(i, i + samplesPerFrame);
var mp3buf = mp3enc.encodeBuffer(mono);
}
else {
var leftChunk = left.subarray(i, i + samplesPerFrame);
var rightChunk = right.subarray(i, i + samplesPerFrame);
var mp3buf = mp3enc.encodeBuffer(leftChunk,rightChunk);
}
if (mp3buf.length > 0) {
buffer.push(mp3buf);//new Int8Array(mp3buf));
}
remaining -= samplesPerFrame;
}
var d = mp3enc.flush();
if(d.length > 0){
buffer.push(new Int8Array(d));
}
var mp3Blob = new Blob(buffer, {type: 'audio/mpeg'});
//var bUrl = window.URL.createObjectURL(mp3Blob);
// send the download link to the console
//console.log('mp3 download:', bUrl);
return mp3Blob;
}
Let me know if you have any question about the code
I'd like to convert an AudioBuffer to a Blob so that I can create an ObjectURL from it and then download the audio file.
let rec = new Recorder(async(chunks) => {
var blob = new Blob(chunks, {
type: 'audio/mp3'
});
var arrayBuffer = await blob.arrayBuffer();
const audioContext = new AudioContext()
await audioContext.decodeAudioData(arrayBuffer, (audioBuffer) => {
// How to I now convert the AudioBuffer into an ArrayBuffer => Blob ?
}
An AudioBuffer contains non-interleaved Float32Array PCM samples for each decoded audio channel. For a stereo AudioBuffer, it will contain 2 channels. Those channels need to be interleaved first, and then the interleaved PCM must have a WAV header appended to it so you can download and play as a WAV.
// Float32Array samples
const [left, right] = [audioBuffer.getChannelData(0), audioBuffer.getChannelData(1)]
// interleaved
const interleaved = new Float32Array(left.length + right.length)
for (let src=0, dst=0; src < left.length; src++, dst+=2) {
interleaved[dst] = left[src]
interleaved[dst+1] = right[src]
}
// get WAV file bytes and audio params of your audio source
const wavBytes = getWavBytes(interleaved.buffer, {
isFloat: true, // floating point or 16-bit integer
numChannels: 2,
sampleRate: 48000,
})
const wav = new Blob([wavBytes], { type: 'audio/wav' })
// create download link and append to Dom
const downloadLink = document.createElement('a')
downloadLink.href = URL.createObjectURL(wav)
downloadLink.setAttribute('download', 'my-audio.wav') // name file
supporting functions below:
// Returns Uint8Array of WAV bytes
function getWavBytes(buffer, options) {
const type = options.isFloat ? Float32Array : Uint16Array
const numFrames = buffer.byteLength / type.BYTES_PER_ELEMENT
const headerBytes = getWavHeader(Object.assign({}, options, { numFrames }))
const wavBytes = new Uint8Array(headerBytes.length + buffer.byteLength);
// prepend header, then add pcmBytes
wavBytes.set(headerBytes, 0)
wavBytes.set(new Uint8Array(buffer), headerBytes.length)
return wavBytes
}
// adapted from https://gist.github.com/also/900023
// returns Uint8Array of WAV header bytes
function getWavHeader(options) {
const numFrames = options.numFrames
const numChannels = options.numChannels || 2
const sampleRate = options.sampleRate || 44100
const bytesPerSample = options.isFloat? 4 : 2
const format = options.isFloat? 3 : 1
const blockAlign = numChannels * bytesPerSample
const byteRate = sampleRate * blockAlign
const dataSize = numFrames * blockAlign
const buffer = new ArrayBuffer(44)
const dv = new DataView(buffer)
let p = 0
function writeString(s) {
for (let i = 0; i < s.length; i++) {
dv.setUint8(p + i, s.charCodeAt(i))
}
p += s.length
}
function writeUint32(d) {
dv.setUint32(p, d, true)
p += 4
}
function writeUint16(d) {
dv.setUint16(p, d, true)
p += 2
}
writeString('RIFF') // ChunkID
writeUint32(dataSize + 36) // ChunkSize
writeString('WAVE') // Format
writeString('fmt ') // Subchunk1ID
writeUint32(16) // Subchunk1Size
writeUint16(format) // AudioFormat https://i.stack.imgur.com/BuSmb.png
writeUint16(numChannels) // NumChannels
writeUint32(sampleRate) // SampleRate
writeUint32(byteRate) // ByteRate
writeUint16(blockAlign) // BlockAlign
writeUint16(bytesPerSample * 8) // BitsPerSample
writeString('data') // Subchunk2ID
writeUint32(dataSize) // Subchunk2Size
return new Uint8Array(buffer)
}
a minor edit to AnthumChri's reply above. The following function adds a check for stereo input.
convertAudioBufferToBlob(audioBuffer) {
var channelData = [],
totalLength = 0,
channelLength = 0;
for (var i = 0; i < audioBuffer.numberOfChannels; i++) {
channelData.push(audioBuffer.getChannelData(i));
totalLength += channelData[i].length;
if (i == 0) channelLength = channelData[i].length;
}
// interleaved
const interleaved = new Float32Array(totalLength);
for (
let src = 0, dst = 0;
src < channelLength;
src++, dst += audioBuffer.numberOfChannels
) {
for (var j = 0; j < audioBuffer.numberOfChannels; j++) {
interleaved[dst + j] = channelData[j][src];
}
//interleaved[dst] = left[src];
//interleaved[dst + 1] = right[src];
}
// get WAV file bytes and audio params of your audio source
const wavBytes = this.getWavBytes(interleaved.buffer, {
isFloat: true, // floating point or 16-bit integer
numChannels: audioBuffer.numberOfChannels,
sampleRate: 48000,
});
const wav = new Blob([wavBytes], { type: "audio/wav" });
return wav;
},
I'm trying to convert one of the javascript functions into Swift 5 function. But even after so much of extensive search I couldn't find anything on it.
function toArrayBuffer(type, ts, x, y, z, sc) {
let userId = userID.getUserId();
//console.log("Found GPS UserID: " + userId);
if (userId == "Unauthor") {
//console.log("GPS did not find the userID")
return null;
}
let buffer = new ArrayBuffer(8 * 4);
// Offset: 0
let float64Bytes = new Float64Array(buffer);
float64Bytes[0] = ts;
// Offset: 8
let bytes = new Float32Array(buffer);
bytes[2] = x;
bytes[3] = y;
bytes[4] = z;
bytes[5] = sc;
// Offset: 24
let uint8Bytes = new Uint8Array(buffer);
uint8Bytes[24] = type;
for (let i = 0; i < 8; i++) {
if (userId.charCodeAt(i)) {
uint8Bytes[i + 25] = userId.charCodeAt(i);
}
}
return buffer;
}
Basically I tried to build the same function with var byteArray = [UInt8](stringVal.utf8) but UInt8 can store only upto 256, but I had to store epoch time stamp. So, it doesn't work too. Any help would be appreciated.
I'm using the Web Audio API to create a simple spectrum analyzer using the computer microphone as the input signal. The basic functionality of my current implementation works fine, using the default sampling rate (usually 48KHz, but could be 44.1KHz depending on the browser).
For some applications, I would like to use a lower sampling rate (~8KHz) for the FFT.
It looks like the Web Audio API is adding support to customize the sample rate, currently only available on FireFox (https://developer.mozilla.org/en-US/docs/Web/API/AudioContextOptions/sampleRate).
Adding sample rate to the context constructor:
// create AudioContext object named 'audioCtx'
var audioCtx = new (AudioContext || webkitAudioContext)({sampleRate: 8000,});
console.log(audioCtx.sampleRate)
The console outputs '8000' (in FireFox), so it appears to be working up to this point.
The microphone is turned on by the user using a pull-down menu. This is the function servicing that pull-down:
var microphone;
function getMicInputState()
{
let selectedValue = document.getElementById("micOffOn").value;
if (selectedValue === "on") {
navigator.mediaDevices.getUserMedia({audio: true})
.then(stream => {
microphone = audioCtx.createMediaStreamSource(stream);
microphone.connect(analyserNode);
})
.catch(err => { alert("Microphone is required."); });
} else {
microphone.disconnect();
}
}
In FireFox, using the pulldown to activate the microphone displays a popup requesting access to the microphone (as normally expected). After clicking to allow the microphone, the console displays:
"Connecting AudioNodes from AudioContexts with different sample-rate is currently not supported".
The display of the spectrum analyzer remains blank.
Any ideas how to overcome this error? If we can get past this, any guidance on how to specify sampleRate when the user's soundcard sampling rate is unknown?
One approach to overcome this is passing audio packets captured from microphone to analyzer node via a script processor node that re-samples the audio packets passing through it.
Brief overview of script processor node
Every script processor node has an input buffer and an output buffer.
When audio enters the input buffer, the script processor node fires
onaudioprocess event.
Whatever is placed in the output buffer of script processor node becomes its output.
For detailed specs, refer : Script processor node
Here is the pseudo-code:
Create live media source, script processor node and analyzer node
Connect live media source to analyzer node via script processor
node
Whenever an audio packet enters the script processor
node, onaudioprocess event is fired
When onaudioprocess event is fired :
4.1) Extract audio data from input buffer
4.2) Re-sample audio data
4.3) Place re-sampled data in output buffer
The following code snippet implements the above pseudocode:
var microphone;
// *** 1) create a script processor node
var scriptProcessorNode = audioCtx.createScriptProcessor(4096, 1, 1);
function getMicInputState()
{
let selectedValue = document.getElementById("micOffOn").value;
if (selectedValue === "on") {
navigator.mediaDevices.getUserMedia({audio: true})
.then(stream => {
microphone = audioCtx.createMediaStreamSource(stream);
// *** 2) connect live media source to analyserNode via script processor node
microphone.connect(scriptProcessorNode);
scriptProcessorNode.connect(analyserNode);
})
.catch(err => { alert("Microphone is required."); });
} else {
microphone.disconnect();
}
}
// *** 3) Whenever an audio packet passes through script processor node, resample it
scriptProcessorNode.onaudioprocess = function(event){
var inputBuffer = event.inputBuffer;
var outputBuffer = event.outputBuffer;
for(var channel = 0; channel < outputBuffer.numberOfChannels; channel++){
var inputData = inputBuffer.getChannelData(channel);
var outputData = outputBuffer.getChannelData(channel);
// *** 3.1) Resample inputData
var fromSampleRate = audioCtx.sampleRate;
var toSampleRate = 8000;
var resampledAudio = downsample(inputData, fromSampleRate, toSampleRate);
// *** 3.2) make output equal to the resampled audio
for (var sample = 0; sample < outputData.length; sample++) {
outputData[sample] = resampledAudio[sample];
}
}
}
function downsample(buffer, fromSampleRate, toSampleRate) {
// buffer is a Float32Array
var sampleRateRatio = Math.round(fromSampleRate / toSampleRate);
var newLength = Math.round(buffer.length / sampleRateRatio);
var result = new Float32Array(newLength);
var offsetResult = 0;
var offsetBuffer = 0;
while (offsetResult < result.length) {
var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
var accum = 0, count = 0;
for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
accum += buffer[i];
count++;
}
result[offsetResult] = accum / count;
offsetResult++;
offsetBuffer = nextOffsetBuffer;
}
return result;
}
Update - 03 Nov, 2020
Script Processor Node is being deprecated and replaced with AudioWorklets.
The approach to changing the sample rate remains the same.
Downsampling from the constructor and connecting an AnalyserNode is now possible in Chrome and Safari.
So the following code, taken from the corresponding MDN documentation, would work:
const audioContext = new (window.AudioContext || window.webkitAudioContext)({
sampleRate: 8000
});
const mediaStream = await navigator.mediaDevices.getUserMedia({
audio: true,
video: false
});
const mediaStreamSource = audioContext.createMediaStreamSource(mediaStream);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 256;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
analyser.getByteFrequencyData(dataArray);
mediaStreamSource.connect(analyser);
const title = document.createElement("div");
title.innerText = `Sampling frequency 8kHz:`;
const wrapper = document.createElement("div");
const canvas = document.createElement("canvas");
wrapper.appendChild(canvas);
document.body.appendChild(title);
document.body.appendChild(wrapper);
const canvasCtx = canvas.getContext("2d");
function draw() {
requestAnimationFrame(draw);
analyser.getByteFrequencyData(dataArray);
canvasCtx.fillStyle = "rgb(0, 0, 0)";
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
var barWidth = canvas.width / bufferLength;
var barHeight = 0;
var x = 0;
for (var i = 0; i < bufferLength; i++) {
barHeight = dataArray[i] / 2;
canvasCtx.fillStyle = "rgb(" + (2 * barHeight + 100) + ",50,50)";
canvasCtx.fillRect(x, canvas.height - barHeight / 2, barWidth, barHeight);
x += barWidth + 1;
}
}
draw();
See here for a demo where both 48kHz and 8kHz sampled signal frequencies are displayed: https://codesandbox.io/s/vibrant-moser-cfex33
I need to layer looping .wav tracks that ultimately I will need to be able to turn on and off and keep in sync.
First I load the tracks and stopped BufferLoader from turning the loaded arraybuffer into an AudioBuffer (hence the false)
function loadTracks(data) {
for (var i = 0; i < data.length; i++) {
trackUrls.push(data[i]['url']);
};
bufferLoader = new BufferLoader(context, trackUrls, finishedLoading);
bufferLoader.load(false);
return loaderDefered.promise;
}
When you click a button on screen it calls startStop().
function startStop(index, name, isPlaying) {
if(!activeBuffer) {
activeBuffer = bufferList[index];
}else{
activeBuffer = appendBuffer(activeBuffer, bufferList[index]);
}
context.decodeAudioData(activeBuffer, function(buffer){
audioBuffer = buffer;
play();
})
function play() {
var scheduledTime = 0.015;
try {
audioSource.stop(scheduledTime);
} catch (e) {}
audioSource = context.createBufferSource();
audioSource.buffer = audioBuffer;
audioSource.loop = true;
audioSource.connect(context.destination);
var currentTime = context.currentTime + 0.010 || 0;
audioSource.start(scheduledTime - 0.005, currentTime, audioBuffer.duration - currentTime);
audioSource.playbackRate.value = 1;
}
Most of the code I found on this guys github.
In the demo you can hear he is layering AudioBuffers.
I have tried the same on my hosting.
Disregarding the argularJS stuff, the Web Audio stuff is happening on the service.js at:
/js/angular/service.js
If you open the console and click the buttons you can see the activeBuffer.byteLength (type ArrayBuffer) is incrementing, however even after being decoded by the context.decodeAudioData method it still only plays the first sound you clicked instead of a merged AudioBuffer
I'm not sure I totally understand your scenario - don't you want these to be playing simultaneously? (i.e. bass gets layered on top of the drums).
Your current code is trying to concatenate an additional audio file whenever you hit the button for that file. You can't just concatenate audio files (in their ENCODED form) and then run it through decode - the decodeAudioData method is decoding the first complete sound in the arraybuffer, then stopping (because it's done decoding the sound).
What you should do is change the logic to concatenate the buffer data from the resulting AudioBuffers (see below). Even this logic isn't QUITE what you should do - this is still caching the encoded audio files, and decoding every time you hit the button. Instead, you should cache the decoded audio buffers, and just concatenate it.
function startStop(index, name, isPlaying) {
// Note we're decoding just the new sound
context.decodeAudioData( bufferList[index], function(buffer){
// We have a decoded buffer - now we need to concatenate it
audioBuffer = buffer;
if(!audioBuffer) {
audioBuffer = buffer;
}else{
audioBuffer = concatenateAudioBuffers(audioBuffer, buffer);
}
play();
})
}
function concatenateAudioBuffers(buffer1, buffer2) {
if (!buffer1 || !buffer2) {
console.log("no buffers!");
return null;
}
if (buffer1.numberOfChannels != buffer2.numberOfChannels) {
console.log("number of channels is not the same!");
return null;
}
if (buffer1.sampleRate != buffer2.sampleRate) {
console.log("sample rates don't match!");
return null;
}
var tmp = context.createBuffer(buffer1.numberOfChannels, buffer1.length + buffer2.length, buffer1.sampleRate);
for (var i=0; i<tmp.numberOfChannels; i++) {
var data = tmp.getChannelData(i);
data.set(buffer1.getChannelData(i));
data.set(buffer2.getChannelData(i),buffer1.length);
}
return tmp;
};
SOLVED:
To get multiple loops of the same duration playing at the same time and keep in sync even when you start and stop them randomly.
First, create all your buffer sources where bufferList is an array of AudioBuffers and the first sound is a sound you are going to read from and overwrite with your other sounds.
function createAllBufferSources() {
for (var i = 0; i < bufferList.length; i++) {
var source = context.createBufferSource();
source.buffer = bufferList[i];
source.loop = true;
bufferSources.push(source);
};
console.log(bufferSources)
}
Then:
function start() {
var rewrite = bufferSources[0];
rewrite.connect(context.destination);
var processNode = context.createScriptProcessor(2048, 2, 2);
rewrite.connect(processNode)
processNode.onaudioprocess = function(e) {
//getting the left and right of the sound we want to overwrite
var left = rewrite.buffer.getChannelData(0);
var right = rewrite.buffer.getChannelData(1);
var overL = [],
overR = [],
i, a, b, l;
l = bufferList.length,
//storing all the loops channel data
for (i = 0; i < l; i++) {
overL[i] = bufferList[i].getChannelData(0);
overR[i] = bufferList[i].getChannelData(1);
}
//looping through the channel data of the sound we are going to overwrite
a = 0, b = overL.length, l = left.length;
for (i = 0; i < l; i++) {
//making sure its a blank before we start to write
left[i] -= left[i];
right[i] -= right[i];
//looping through all the sounds we want to add and assigning the bytes to the old sound, both at the same position
for (a = 0; a < b; a++) {
left[i] += overL[a][i];
right[i] += overR[a][i];
}
left[i] /= b;
right[i] /= b);
}
};
processNode.connect(context.destination);
rewrite.start(0)
}
If you remove a AudioBuffer from bufferList and add it again at any point, it will always be in sync.
EDIT:
Keep in mind that:
-processor node gets garbage collected weirdly.
-This is very taxing, might want to think about using WebWorkers somehow