I am trying to figure out how to read the current value of an AudioParam. When an AudioParam is being modified by an AudioNode through the AudioNode.connect(AudioParam), it doesn't seem to effect AudioParam.value.
Here is an example:
I have an oscillator (source) connected to a gainNode (gain). I have another oscillator (mod) routed to a gainNode (modAmp). ModAmp is then connected to gain.gain. I also have a meter for gain.gain, changing the textbox to display gain.gain.value When we play the oscillator, the gain is audibly moving up and down, but the meter stays constant to the original setting. How can I get a real-time reading of the AudioParam?
http://jsfiddle.net/eliotw/3o0d0ovs/4/
(please note that you have to run the script every time you want to run an oscillator)
//create audio context
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new window.AudioContext();
//create source and gain, connect them
var source = context.createOscillator();
var gain = context.createGain();
source.connect(gain);
//create modulator and gain for it and connect them
var mod = context.createOscillator();
var modAmp = context.createGain();
mod.connect(modAmp);
//connect modulator gain node to audio param
modAmp.connect(gain.gain);
//connect to audio context
gain.connect(context.destination);
//source values
source.frequency.value = 220;
gain.gain.value = 0.5;
//mod values
mod.frequency.value = 6;
modAmp.gain.value = 0.5;
source.start(0);
mod.start(0);
setInterval(function() {
console.log(gain.gain.value);
},
600
);
You can't, really. The only way is to connect it to a script processor node or Analyzer node and look at the output bits. The latter (using getFloatTimeDomainData) is probably how I'd do it.
Related
Here's my basic example, I couldn't put it in a snippet as it generate a security error.
The problem is the processing rate is a little bit high, compared to my needs which is about 300 to 500 milliseconds between each. is there a way to control it.
And is there a way to pause the processing, until the microphone receives a input.
Thank you for your help.
html out that shows the rate:
<input type='text' id='output' >
the script:`
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({
audio: true
},
function(stream) {
output=document.getElementById("output");
audioContext = new AudioContext();
analyser = audioContext.createAnalyser();
microphone = audioContext.createMediaStreamSource(stream);
javascriptNode = audioContext.createScriptProcessor(256, 1, 1);
analyser.smoothingTimeConstant = 0;// 0.8;
analyser.fftSize = 32;//1024;
microphone.connect(analyser);
analyser.connect(javascriptNode);
javascriptNode.connect(audioContext.destination);
canvasContext = document.querySelector("#canvas").getContext("2d");
javascriptNode.onaudioprocess = function() {
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var values = 0;
var length = array.length;
for (var i = 0; i < length; i++) {
values += (array[i]);
}
var average = values / length;
output.value= average;
} // end fn stream
},
function(err) {
console.log("The following error occured: " + err.name)
});
} else {
console.log("getUserMedia not supported");
}
What I'm trying to do is really simple. All I need is to scroll the page by a bit whenever the the audio volume pass a threshold, if you have a simpler alternative, It would be even better. Like how to access the volume in a setTimeout callback.
You create your ScriptProcessorNode with a buffer size of 256. That means the onaudioprocess event gets call every 256 frames, which is about every 5 ms (at 44.1 kHz). If you want something on the order of 300 ms, use 0.3 * 44100 or 16384 since the buffer size is a power of two.
Note also that you don't need to call the analyser node to get the data. The onaudioprocess function already has the data passed in in the event, which you don't use.
Also, depending on your use case, you could get rid of the script processor altogether and just use the analyser node to get the data you want. But then you'll need a setTimeout or requestAnimationFrame to periodically request data from the analyser node.
I am implementing a synthesizer which uses the nodes of the audio-api to generate sound and my goal is to visualize it using p5.
I currently have a script that analyzes audio with fft and visualizes the frequencies with bars. My audio input at the moment is a locally saved song but I need to change it, so it uses the audiocontext as input.
Currently I can get the audiocontext with p5's own method getAudioContext() but then I have no clue how to set it as input for the visualization.
I know the API has a createBuffer()-Method but I haven't found a way to use it as input for p5.
var fft;
var button;
var song;
var slider;
var audiocontext;
var out;
var prue;
var source;
function preload(){
song = loadSound("src/media/Chopin - Nocturne op.9 No.2.mp3");
button = createButton("Play");
button.mousePressed(togglePlaying);
slider = createSlider(0,1,0.5,0.01);
this.audiocontext = getAudioContext();
}
function setup() {
createCanvas(windowWidth,windowHeight);
fft = new p5.FFT(0.8);
source = context.createBufferSource();
widthBand = (width / 128);
source.connect(context.destination);
}
function draw() {
background(61);
var spectrum = fft.analyze();
noStroke();
for (var i = 0; i<spectrum.length; i++) {
var amp = spectrum[i];
var y = map(amp, 0, 256, height, 0);
fill(i, 255, 255);
rect(i*widthBand,y,widthBand-2, height - y );
}
//Set Volume according to slider
audiocontext.setVolume(slider.value());
}
//Play/Pause Button
function togglePlaying(){
if(!song.isPlaying()){
song.play();
button.html("Pause");
}else if(song.isPlaying()){
song.pause();
button.html("Play");
}
}
Any help would be very appreciated!
Audiocontext is not an input himself but contains one or more input nodes (and output and connections and ...). P5 creates own Audiocontext and operates inside of that.
So, option one: build your app using p5 functionality only. It's a powerful library, all the needed tools (e.g. AudioIn(), MonoSynth() etc.) should be available.
Option two: initialize p5 first and then use p5 created audiocontext to add extra nodes, which can later be used by p5.
var cnv, fft, audiocontext, osc;
//p5 setup.
function setup() {
cnv = createCanvas();
fft = new p5.FFT(0.8);
audiocontext = getAudioContext(); //if p5.Audiocontext doesn't exist
// then new is created. Let's make
// it global.
myCustomSetup(); //now we can create our own input nodes, filters...
fft.setInput(osc); //after which we can connect fft to those created
//nodes
}
function myCustomSetup() {
//p5 audiocontext is usable here, allowing to use full WebAudioApi
//and connect all nodes, created here or by some p5 function.
osc = audiocontext.createOscillator();
}
How could I convert sample rate of a buffer from 44100 to 48000 Hz in a browser?
I found a library https://github.com/taisel/XAudioJS/blob/master/resampler.js that should allow me to do that, but don't have an idea how to use it.
Use an offline audio context. Something like the following may work:
var c = new OfflineAudioContext(1, len, 48000);
var b = c.createBuffer(1, len, 44100);
b.copyToChannel(yourSourceBuffer, 0);
var s = c.createBufferSource();
s.buffer = b;
s.connect(context.destination);
s.start();
c.startRendering().then(function (result) {
// result contains the new buffer resampled to 48000
});
Depending the implementation, the quality of the resampled signal can vary quite a bit.
There seemed to be a bug in mobile safari which didn't decode the loaded audio correctly when the sample rate for the audio context was different than the sample rate for the audio file. Moreover, the sample rate for the audio context changed randomly from 44100 to 48000 usually but not always depending if the website was loading with the iPhone sound switched on or off.
The workaround for this issue is to read the sample rate of the audio context and then to load different audio files for each sample rate, like this:
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var audio_context = new AudioContext();
var actual_sample_rate = audio_context.sampleRate;
if (actual_sample_rate != 48000) {
actual_sample_rate = 44100;
}
function finished_loading_sounds(sounds) {
buffers['piano_do'] = {
buffer: sounds.piano,
rate: 1
};
// ...do something when the sounds are loaded...
}
var buffer_loader = new BufferLoader(
audio_context,
{
piano: "audio/" + actual_sample_rate + "/piano.m4a",
},
finished_loading_sounds
);
buffer_loader.load();
The buffer loader is defined like in this tutorial.
To change the sample rate for the audio file, one can use Audacity.
UPDATE
It seems that even when you try to load a file with the correct sample rate, occasionally the sound still gets distorted on iOS devices.
To fix the issue, I found a hack for your AudioContext:
function createAudioContext(desiredSampleRate) {
var AudioCtor = window.AudioContext || window.webkitAudioContext;
desiredSampleRate = typeof desiredSampleRate === 'number'
? desiredSampleRate
: 44100;
var context = new AudioCtor();
// Check if hack is necessary. Only occurs in iOS6+ devices
// and only when you first boot the iPhone, or play a audio/video
// with a different sample rate
if (/(iPhone|iPad)/i.test(navigator.userAgent) && context.sampleRate !== desiredSampleRate) {
var buffer = context.createBuffer(1, 1, desiredSampleRate);
var dummy = context.createBufferSource();
dummy.buffer = buffer;
dummy.connect(context.destination);
dummy.start(0);
dummy.disconnect();
context.close(); // dispose old context
context = new AudioCtor();
}
return context;
}
Then to use it, create the audio context as follows:
var audio_context = createAudioContext(44100);
I have the following snippet that creates an oscillator and plays it at a certain volume. I keep the oscillator variable outside of the scope of the function so that I can stop it with other functions if I need to.
var oscillator = null;
var isPlaying = false;
function play(freq, gain) {
//stop the oscillator if it's already playing
if (isPlaying) {
o.stop();
isPlaying = false;
}
//re-initialize the oscillator
var context = new AudioContext();
//create the volume node;
var volume = context.createGain();
volume.connect(context.destination);
volume.gain.value = gain;
//connect the oscillator to the nodes
oscillator = context.createOscillator();
oscillator.type = 'sine';
oscillator.frequency.value = freq;
oscillator.connect(volume);
oscillator.connect(context.destination);
//start playing
oscillator.start();
isPlaying = true;
//log
console.log('Playing at frequency ' + freq + ' with volume ' + gain);
}
Trouble is, the gain node volume seems to not work as you'd expect. From what I understand, a gain of 0 is muted, and a gain of 1 is 100% volume. But, in this case, passing 0 as the gain value only plays the sound muffled, as opposed to muting it completely (I hope I'm explaining that properly).
What am I doing wrong? Can anybody help?
The problem is that the oscillator node is connect to both the gain node and the destination node.
+---------------+
| |
oscillator ----+----> gain ----+---> destination
So even if the gain node is attenuated to 0 there is still another path to the destination. The problem can be by deleting the second oscillator.connect line.
oscillator.connect(volume);
//oscillator.connect(context.destination);
For anyone falling here from google. I do it normally like this:
// I create the class with best available
var ctxClass = window.audioContext || window.AudioContext || window.AudioContext || window.webkitAudioContext
// We instance the class, create the context
var ctx = new ctxClass();
// Create the oscillator
var osc = ctx.createOscillator();
// Define type of wave
osc.type = 'sine';
// We create a gain intermediary
var volume = ctx.createGain();
// We connect the oscillator with the gain knob
osc.connect(volume);
// Then connect the volume to the context destination
volume.connect(ctx.destination);
// We can set & modify the gain knob
volume.gain.value = 0.1;
//We can test it with some frequency at current time
osc.frequency.setValueAtTime(440.0, ctx.currentTime);
if (osc.noteOn) osc.noteOn(0);
if (osc.start) osc.start();
// We'll have to stop it at some point
setTimeout(function () {
if (osc.noteOff) osc.noteOff(0);
if (osc.stop) osc.stop();
// We can insert a callback here, let them know you've finished, may be play next note?
//finishedCallback();
}, 5000);
I have been looking at the Web Audio API and am not able to get the audio gain to work. I have a fiddle set up here, so you can understand the application of the function: http://jsfiddle.net/mnu70gy3/
I am hoping to dynamically create a tone on a click event, but am not able to have that tone fade out. Below is the relevant code:
var audioCtx = new AudioContext();
var osc = {}; // set up an object for all the oscillators
var gainNodes = {}; // set up an object for all the gains
var now;
function tone(id,freq) {
// create osc / set gain / connect osc
gainNodes.id = audioCtx.createGain();
osc.id = audioCtx.createOscillator();
osc.id.connect(audioCtx.destination);
// set frequency
osc.id.frequency.value = freq;
// set gain at 1 and fade to 0 in one second
gainNodes.id.gain.value = 1.0;
gainNodes.id.gain.setValueAtTime(0, audioCtx.currentTime + 1);
// start and connect
osc.id.start(0);
osc.id.connect(audioCtx.destination);
}
Any thoughts on if this can be done?
In your code you connect oscillator to the destination twice.
Instead of connecting oscillator -> gain -> destination
gainNodes.id = audioCtx.createGain();
osc.id = audioCtx.createOscillator();
osc.id.connect(gainNodes.id);
// set frequency and gain
osc.id.frequency.value = freq;
gainNodes.id.gain.value = 1.0;
gainNodes.id.gain.setValueAtTime(0, audioCtx.currentTime + 1);
// start and connect
osc.id.start(0);
gainNodes.id.connect(audioCtx.destination);
You need to disconnect your audioCtx.destination when you click on a tile again.
https://jsfiddle.net/2dyq2ajw/
function dismissTone(id,freq) {
gainNodes.id.gain.value = 0;
osc.id.disconnect(audioCtx.destination);
}
if($(this).hasClass('xx'))
tone(thisId,thisFreq);
else
dismissTone(thisId,thisFreq);