Gradually Change Web Audio API Panner - javascript

I'm trying to use a simple HTML range input to control the panning of my Web Audio API audio but I can only get 3 "positions" for my audio output:
-Center
-100% to the left
-100% to the right.
I would like to have something in between does positions, like 20% left and 80% right and so on...
The code that I'm using is:
//Creating the node
var pannerNode = context.createPanner();
//Getting the value from the HTML input and using it on the position X value
document.getElementById('panInput').addEventListener('change', function () {
pannerNode.setPosition(this.value, 0, 0);
});
And it refers to this input on my HTML file:
<input id="panInput" type="range" min="-1" max="1" step="0.001" value="0"/>
Does anyone knows what am I doing wrong?

You shouldn't need to use two panners - Panner is stereo. This old answer is a great one to this question:
How to create very basic left/right equal power panning with createPanner();

I've actually found simple left/right panning to be kind of difficult with the Web Audio API. It's really set up for surround / spatial stuff, and I honestly don't understand it very well.
The way that I usually do panning is like this:
var panLeft = context.createGain();
var panRight = context.createGain();
var merger = context.createMerger(2);
source.connect(panLeft);
source.connect(panRight);
panLeft.connect(merger, 0, 0);
panRight.connect(merger, 0, 1);
merger.connect(context.destination);
document.getElementById('panInput').addEventListener('change', function () {
var val = this.value;
panLeft.gain.value = ( val * -0.5 ) + 0.5;
panRight.gain.value = ( val * 0.5 ) + 0.5;
});
Basically, you send the signal to two gain nodes that you're going to use as your left and right channel. Then you take the value from your range element and use it to set the gain on each of the nodes.
This is sort of the lazy version though. In serious audio apps, there's usually a bit more math involved with the panning to make sure there aren't changes in overall level -- but hopefully this is enough to get you started.

I'm quite sure there is a better and easier way to do that but, for now, it definitely works for me.
If anyone else have a better/cleaner way of doing it, please share it here!
Thanks to Kevin Ennis for giving me this hint!
JavaScript File
//Create a splitter to "separete" the stereo audio data to two channels.
var splitter = context.createChannelSplitter(2);
//Connect your source to the splitter (usually, you will do it with the last audio node before context destination)
audioSource.connect(splitter);
//Create two gain nodes (one for each side of the stereo image)
var panLeft = context.createGain();
var panRight = context.createGain();
//Connect the splitter channels to the Gain Nodes that we've just created
splitter.connect(panRight,0);
splitter.connect(panLeft,1);
//Getting the input data from a "range" input from HTML (the code used on this range will be shown right on the end of this code)
var panPosition = document.getElementById("dispPanPositionLiveInput");
document.getElementById('panControl').addEventListener('change', function () {
var val = this.value;
panPosition.value = val;
panLeft.gain.value = ( val * -0.5 ) + 0.5;
panRight.gain.value = ( val * 0.5 ) + 0.5;
});
//Create a merger node, to get both signals back together
var merger = context.createChannelMerger(2);
//Connect both channels to the Merger
panLeft.connect(merger, 0, 0);
panRight.connect(merger, 0, 1);
//Connect the Merger Node to the final audio destination (your speakers)
merger.connect(context.destination);
HTML File
< input id="panControl" type="range" min="-1" max="1" step="0.001" value="0"/>

Related

Can I change the interval of the pitch shift while playing audio using tone.js?

I'll show you my code
audio_music = new Audio();
var track = audioContext.createMediaElementSource(audio_music);
//Import music files from other sources into base64 form.
audio_music.src = "data:audio/ogg;base64,"+ data.music;
var splitter = audioContext.createChannelSplitter(6);
var merger = audioContext.createChannelMerger(2);
track.connect(splitter);
//omitted in addition to 0 and 1 due to repetition of the similar content
gainNode0 = audioContext.createGain(); gainNode0.gain.setValueAtTime((musicvolume*0.1), audioContext.currentTime);
gainNode1 = audioContext.createGain(); gainNode1.gain.setValueAtTime((musicvolume*0.1), audioContext.currentTime);
splitter.connect(gainNode0, 0);
splitter.connect(gainNode1, 1);
var pitchshift0 = new Tone.PitchShift(pitch);
var pitchshift1 = new Tone.PitchShift(pitch);
Tone.connect(gainNode0, pitchshift0);
Tone.connect(gainNode1, pitchshift1);
Tone.connect(pitchshift0, merger, 0, 0);
Tone.connect(pitchshift1, merger, 0, 1);
Tone.connect(merger, audioContext.destination);
I am not familiar with the use of audioContext and tone.js, so I don't know if I understand correctly, but my intention is to separate input sources with six channels and process them in the order of gain adjustment, pitch shift, and marge, respectively.
This will do everything else, but you can't change the value of the pitch shift during playback.
I want a way to function similar to the setValueAtTime used in GainNode in pitch shift.
What should I do?
You can change the pitch by setting the pitch parameter:
pitchshift0.pitch = -12 // Semitone to shift the pitch to.
If you want to set this at a specific time during playback, you can use the Transport class to schedule this:
Tone.Transport.schedule(() => pitchshift0.pitch = -12, time /* The transport time you want to schedule it at */);

How to convert dual channel audio to mono with ability to control each channel volume?

I'm trying to listen 2 channel audio in both headphones(same audio in left and right headphone).
Current situaltion:
Slider is centered - works perfectly well (both channels in both headphones).
Slider is on the right - works perfectly well (right channel in both headphones).
Slider is on the left - doesn't work (left channel only in left headphone).
const splitter = wavesurfer.backend.ac.createChannelSplitter(2);
const merger = wavesurfer.backend.ac.createChannelMerger(2);
const leftGain = wavesurfer.backend.ac.createGain();
const rightGain = wavesurfer.backend.ac.createGain();
const panner = wavesurfer.backend.ac.createPanner();
splitter.connect(leftGain, 0);
splitter.connect(rightGain, 1);
leftGain.connect(merger, 0, 0);
rightGain.connect(merger, 0, 1);
merger.connect(panner);
let slider = document.querySelector('#Slider');
$(slider ).change(function () {
rightGain.gain.value = Number(slider.value);
leftGain.gain.value = 1- (Number(slider.value));
})
wavesurfer.backend.setFilters([splitter, leftGain, rightGain, merger]);
When slider is on the left I want to hear only left channel in both headphones.
Can someone help me?
Problem
When calling setFilters() waversurfer.js will connect all provided nodes in a simple chain. In your case that means it will create additional connections like this:
splitter.connect(leftGain);
leftGain.connect(rightGain);
rightGain.connect(merger);
This is probably not what you want. But it is possible to make use of that behavior. I modified your example a bit.
const input = wavesurfer.backend.ac.createGain();
const splitter = wavesurfer.backend.ac.createChannelSplitter(2);
const merger = wavesurfer.backend.ac.createChannelMerger(2);
const leftGain = wavesurfer.backend.ac.createGain();
const rightGain = wavesurfer.backend.ac.createGain();
// This will make sure that a mono signal gets upmixed to stereo.
// If you always have stereo sound you can remove it.
input.channelCountMode = 'explicit';
// It is only necessary to connect the right channel
// because this is the one which needs optional parameters.
splitter.connect(rightGain, 1);
rightGain.connect(merger);
rightGain.connect(merger, 0, 1);
// Only the one connection which needs an optional parameter
// needs to be done for the left channel
leftGain.connect(merger, 0, 1);
// wavesufer.js will connect everything else.
wavesurfer.backend.setFilters([ input, splitter, leftGain, merger ]);
I also added another GainNode as the first node to make sure the signal is upmixed to stereo in case it is mono. And I removed the PannerNode as it wasn't used in your example.

How to use web audio api to get raw pcm audio?

How usergetmedia to use the microphone in chrome and then stream to get raw audio? I need need to get the audio in linear 16.
Unfortunately, the MediaRecorder doesn't support raw PCM capture. (A sad oversight, in my opinion.) Therefore, you'll need to get the raw samples and buffer/save them yourself.
You can do this with the ScriptProcessorNode. Normally, this Node is used to modify the audio data programmatically, for custom effects and what not. But, there's no reason you can't just use it as a capture point. Untested, but try something like this code:
const captureNode = audioContext.createScriptProcessor(8192, 1, 1);
captureNode.addEventListener('audioprocess', (e) => {
const rawLeftChannelData = inputBuffer.getChannelData(0);
// rawLeftChannelData is now a typed array with floating point samples
});
(You can find a more complete example on MDN.)
Those floating point samples are centered on zero 0 and will ideally be bound to -1 and 1. When converting to an integer range, you'll want to clamp values to this range, clipping anything beyond it. (The values can sometimes exceed -1 and 1 in the event loud sounds are mixed together in-browser. In theory, the browser can also record float32 samples from an external sound device which may also exceed that range, but I don't know of any browser/platform that does this.)
When converting to integer, it matters if the values are signed or unsigned. If signed, for 16-bit, the range is -32768 to 32767. For unsigned, it's 0 to 65535. Figure out what format you want to use and scale the -1 to 1 values up to that range.
One final note on this conversion... endianness can matter. See also: https://stackoverflow.com/a/7870190/362536
The only two examples I've found that are clear and make sense are the following:
AWS Labs: https://github.com/awslabs/aws-lex-browser-audio-capture/blob/master/lib/worker.js
The AWS resource is very good. It shows you how to export your recorded audio to "WAV format encoded as PCM". Amazon Lex, which is a transcription service offered by AWS requires the audio to be PCM encoded and wrapped in a WAV container. You can merely adapt some of the code to make it work for you! AWS has some additional features such as "downsampling" which allows you to change the sample rate without affecting the recording.
RecordRTC: https://github.com/muaz-khan/RecordRTC/blob/master/simple-demos/raw-pcm.html
RecordRTC is a complete library. You can, once again, adapt their code or find the snippet of code that encodes the audio to raw PCM. You could also implement their library and use the code as-is. Using the "desiredSampleRate" option for audio config with this library negatively affects the recording.
They are both excellent resources and you'll definitely be able to solve your question.
You should look into MediaTrackConstraints.sampleSize property for the MediaDevices.getUserMedia() API. Using the sampleSize constraint, if your audio hardware permits you can set the sample size to 16 bits.
As far as the implementation goes, well that's what the links are and google are for...
here is some Web Audio API where it uses the microphone to capture and playback raw audio (turn down your volume before running this page) ... to see snippets of raw audio in PCM format view the browser console ... for kicks it also sends this PCM into a call to FFT to obtain the frequency domain as well as the time domain of the audio curve
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>
<script type="text/javascript">
var webaudio_tooling_obj = function () {
var audioContext = new AudioContext();
console.log("audio is starting up ...");
var BUFF_SIZE_RENDERER = 16384;
var SIZE_SHOW = 3; // number of array elements to show in console output
var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_analysis_node = null,
analyser_node = null;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);
} else { alert('getUserMedia not supported in this browser.'); }
// ---
function show_some_data(given_typed_array, num_row_to_display, label) {
var size_buffer = given_typed_array.length;
var index = 0;
console.log("__________ " + label);
if (label === "time") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
var curr_value_time = (given_typed_array[index] / 128) - 1.0;
console.log(curr_value_time);
}
} else if (label === "frequency") {
for (; index < num_row_to_display && index < size_buffer; index += 1) {
console.log(given_typed_array[index]);
}
} else {
throw new Error("ERROR - must pass time or frequency");
}
}
function process_microphone_buffer(event) {
var i, N, inp, microphone_output_buffer;
// not needed for basic feature set
// microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
}
function start_microphone(stream){
gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );
microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);
script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;
microphone_stream.connect(script_processor_node);
// --- enable volume control for output speakers
document.getElementById('volume').addEventListener('change', function() {
var curr_volume = this.value;
gain_node.gain.value = curr_volume;
console.log("curr_volume ", curr_volume);
});
// --- setup FFT
script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_analysis_node.connect(gain_node);
analyser_node = audioContext.createAnalyser();
analyser_node.smoothingTimeConstant = 0;
analyser_node.fftSize = 2048;
microphone_stream.connect(analyser_node);
analyser_node.connect(script_processor_analysis_node);
var buffer_length = analyser_node.frequencyBinCount;
var array_freq_domain = new Uint8Array(buffer_length);
var array_time_domain = new Uint8Array(buffer_length);
console.log("buffer_length " + buffer_length);
script_processor_analysis_node.onaudioprocess = function() {
// get the average for the first channel
analyser_node.getByteFrequencyData(array_freq_domain);
analyser_node.getByteTimeDomainData(array_time_domain);
// draw the spectrogram
if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {
show_some_data(array_freq_domain, SIZE_SHOW, "frequency");
show_some_data(array_time_domain, SIZE_SHOW, "time"); // store this to record to aggregate buffer/file
}
};
}
}(); // webaudio_tooling_obj = function()
</script>
</head>
<body>
<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.0"/>
<p> </p>
<button onclick="webaudio_tooling_obj()">start audio</button>
</body>
</html>
NOTICE - before running above in your browser first turn down your volume as the code both listens to your microphone and sends real time output to the speakers so naturally you will hear feedback --- as in Jimmy Hendrix feedback

Controlling mixing with an oscillator or lfo

I'have two oscillators with different waveshape (triangular and square):
var oscTri = audioCtx.createOscillator();
var oscSqu = audioCtx.createOscillator();
oscTri.type = 'triangle';
oscSqu.type = 'square';
var mixTri = audioCtx.createGain();
var mixSqu = audioCtx.createGain();
oscTri.connect(this.mixTri);
oscSqu.connect(this.mixSqu);
mixTri.connect(audioCtx.destination);
mixSqu.connect(audioCtx.destination);
I'd like to control the mixing of the two with a third oscillator so the output sound will oscillate between the two (when gain of triangle is 1, square is 0; when triangle is 0.5, square is 0.5, triangle is 0.75, square is 0.25; and so on):
var modOsc = audioCtx.createOscillator();
How I can connect this modulator oscillator to have an "oscillation" between the two previous waveforms?
Set mixtri = 1 and mixSqu = -1 then connect the modOSC to the gains gain value that should to the trick. Personally i would use filters cause i like it more i made and example for you at https://gtube.de my site click on the PUBLISH / SYNTHY DATABASE on your example. Than press the Letter A to hear the effect. You can see the setup on the Synthesizer Tab. My site doesn't work with the gain nodes because they are not fixed => It allows more than one key to be pressed. But with only gain nodes it should work as well.
Cheers
Kilian

Canvas getImageData() For optimal performance. To pull out all data or one at a time?

I need to scan through every pixel in a canvas image and do some fiddling with the colors etc. For optimal performance, should I grab all the data in one go and work on it through the array? Or should I call each pixel as I work on it.
So basically...
data = context.getImageData(x, y, height, width);
VS
data = context.getImageData(x, y, 1, 1); //in a loop height*width times.
You'll get much higher performances by grabbing the image all at once since :
a) a (contiguous) acces to an array is way faster than a function call.
b) especially when this function isa method of a DOM object having some overhead.
c) and there might be buffer refresh issues that might delay response (if canvas is
on sight... or not depending on double buffering implementation).
So go for a one-time grab.
I'll suggest you look into Javascript Typed Arrays to get the most of the
imageData result.
If i may quote myself, look at how you can handle pixels fast in this old post of mine
(look after 2) ):
Nice ellipse on a canvas?
(i quoted the relevant part below : )
You can get a UInt32Array view on your ImageData with :
var myGetImageData = myTempCanvas.getImageData(0,0,sizeX, sizeY);
var sourceBuffer32 = new Uint32Array(myGetImageData.data.buffer);
then sourceBuffer32[i] contains Red, Green, Blue, and transparency packed into one unsigned 32 bit int. Compare it to 0 to know if pixel is non-black ( != (0,0,0,0) )
OR you can be more precise with a Uint8Array view :
var myGetImageData = myTempCanvas.getImageData(0,0,sizeX, sizeY);
var sourceBuffer8 = new Uint8Array(myGetImageData.data.buffer);
If you deal only with shades of grey, then R=G=B, so watch for
sourceBuffer8[4*i]>Threshold
and you can set the i-th pixel to black in one time using the UInt32Array view :
sourceBuffer32[i]=0xff000000;
set to any color/alpha with :
sourceBuffer32[i]= (A<<24) | (B<<16) | (G<<8) | R ;
or just to any color :
sourceBuffer32[i]= 0xff000000 | (B<<16) | (G<<8) | R ;
(be sure R is rounded).
Listening to #Ken's comment, yes endianness can be an issue when you start fighting with bits 32 at a time.
Most computer are using little-endian, so RGBA becomes ABGR when dealing with them 32bits a once.
Since it is the vast majority of systems, if dealing with 32bit integer assume this is the case,
and you can -for compatibility- reverse your computation before writing the 32 bits results on Big endian systems.
Let me share those two functions :
function isLittleEndian() {
// from TooTallNate / endianness.js. https://gist.github.com/TooTallNate/4750953
var b = new ArrayBuffer(4);
var a = new Uint32Array(b);
var c = new Uint8Array(b);
a[0] = 0xdeadbeef;
if (c[0] == 0xef) { isLittleEndian = function() {return true }; return true; }
if (c[0] == 0xde) { isLittleEndian = function() {return false }; return false; }
throw new Error('unknown endianness');
}
function reverseUint32 (uint32) {
var s32 = new Uint32Array(4);
var s8 = new Uint8Array(s32.buffer);
var t32 = new Uint32Array(4);
var t8 = new Uint8Array(t32.buffer);
reverseUint32 = function (x) {
s32[0] = x;
t8[0] = s8[3];
t8[1] = s8[2];
t8[2] = s8[1];
t8[3] = s8[0];
return t32[0];
}
return reverseUint32(uint32);
};
Additionally to what GameAlchemist said, if you want to get or set all the colors of a pixel simultaneously, but you don't want to check endianness, you can use a DataView:
var data = context.getImageData(0, 0, canvas.width, canvas.height);
var view = new DataView(data.data.buffer);
// Read or set pixel (x,y) as #RRGGBBAA (big endian)
view.getUint32(4 * (x + y*canvas.width));
view.setUint32(4 * (x + y*canvas.width), 0xRRGGBBAA);
// Read or set pixel (x,y) as #AABBGGRR (little endian)
view.getUint32(4 * (x + y*canvas.width), true);
view.setUint32(4 * (x + y*canvas.width), 0xAABBGGRR, true);
// Save changes
ctx.putImageData(data, 0, 0);
It depends on what exactly you're doing, but I'd suggest grabbing it all at once, and then looping through it.
Grabbing it all at once is faster than grabbing it pixel by pixel, since searching through an array is a lot faster than searching through a canvas, once for each pixel.
If you're really in need of speed, look into web workers. You can set each one to grab a specific section of the canvas, and since they can run simultaneously, they'll make much better use out of your CPU.
getImageData() isn't really slow enough for you to notice the difference if you were to grab it all at once or individually, in my experiences using the function.

Categories

Resources