I would like to ask if someone could help me with ring modulation using javascript
Here is my code. I am not sure if I am doing this right. On start button it only plays a oscillator with gain. No mix with audio file.
I tried do something like this GitHub source
Thanks
function audioFileLoader(fileDirectory, impulseFileDirectory) {
var audioContext = new AudioContext();
var soundObj = [];
soundObj.fileDirectory = fileDirectory;
soundObj.impulseFileDirectory = impulseFileDirectory;
// buffer loader code
var getSound = new XMLHttpRequest();
getSound.open("GET", soundObj.fileDirectory, true);
getSound.responseType = "arraybuffer";
getSound.onload = function() {
audioContext.decodeAudioData(getSound.response, function(buffer) {
soundObj.soundToPlay = buffer;
});
}
getSound.send();
soundObj.play = function() {
var source = audioContext.createBufferSource();
source.buffer = soundObj.soundToPlay;
var oscillator = audioContext.createOscillator();
oscillator.type = 'sine';
oscillator.frequency.value = 500;
var gainNode = audioContext.createGain();
gainNode.gain.value = 0.5;
oscillator.connect(gainNode);
source.connect(gainNode);
gainNode.connect(audioContext.destination);
oscillator.start(audioContext.currentTime);
};
return soundObj;
};
var example = audioFileLoader("audio/AcGtr.wav");
document.getElementById('ringmodulation').addEventListener("click", example.play,
false);
Your code never plays the buffer source, it's missing source.start(). Also var soundObj = [] should be var soundObj = {}.
Related
I am making audio chat website using WebRTC. I have one problem.
When receiving remote audio from peer. This doesn't work (I can't hear any audio)
var audioContext = new AudioContext();
var audioStream = audioContext.createMediaStreamSource(e.stream);
audioStream.connect(audioContext.destination);
While this works
var audio2 = document.querySelector('audio#audio2');
audio2.srcObject = e.stream;
The reason I need to do it is because I need to be able to control the audio (effects, volume), and as I know, AudioContext provides that. But for some reason, it doesn't work.
Any suggestions?
Thank you!
Use .createMediaStreamSource() with .createGain()
var ctx = new AudioContext();
var source = ctx.createMediaStreamSource(stream);
var gainNode = ctx.createGain();
gainNode.gain.value = .5;
source.connect(gainNode);
source.connect(ctx.destination);
jsfiddle https://jsfiddle.net/tkw13bfg/2
Alternatively, create an AudioNode, use .createGain()
var ctx = new AudioContext();
var audio = new Audio();
audio.srcObject = stream;
var gainNode = ctx.createGain();
gainNode.gain.value = .5;
audio.onloadedmetadata = function() {
var source = ctx.createMediaStreamSource(audio.srcObject);
audio.play();
audio.muted = true;
source.connect(gainNode);
gainNode.connect(ctx.destination);
}
I'm attempting to resample some audio. I have a function that works in Chrome and Firefox, but it crashes in Edge on the statement
audioBuffer.copyToChannel(sampleArray,0,0);
saying that copyToChannel is not defined. That is curious, because the Microsoft documentation specifically defines it: https://dev.windows.com/en-us/microsoft-edge/platform/documentation/apireference/interfaces/audiobuffer/
Anyway, I'm looking for a workaround. Inspecting the audioBuffer object in the developer tools didn't yield any clues for me.
Thanks!
Here is my code:
function reSample(sampleArray, targetSampleRate, onComplete) {
// sampleArray is a Float32Array
// targetSampleRate is an int (22050 in this case)
// onComplete is called with the new buffer when the operation is complete
var audioCtx = new window.AudioContext();
var audioBuffer = audioCtx.createBuffer(1, sampleArray.length, audioCtx.sampleRate);
audioBuffer.copyToChannel(sampleArray,0,0); // Not supported by Microsoft Edge 12, evidently.
var channel = audioBuffer.numberOfChannels;
var samples = audioBuffer.length * targetSampleRate / audioBuffer.sampleRate;
var offlineContext = new window.OfflineAudioContext(channel, samples, targetSampleRate);
var bufferSource = offlineContext.createBufferSource();
bufferSource.buffer = audioBuffer;
bufferSource.connect(offlineContext.destination);
bufferSource.start(0);
offlineContext.startRendering().then(function(renderedBuffer){
onComplete(renderedBuffer);
});
}
I got this working with Edge:
const offlineCtx = new OfflineAudioContext(sourceBuffer.numberOfChannels, sourceBuffer.duration *
this.sampleRate, this.sampleRate);
const cloneBuffer = offlineCtx.createBuffer(sourceBuffer.numberOfChannels, sourceBuffer.length, sourceBuffer.sampleRate);
cloneBuffer.copyToChannel(sourceBuffer.getChannelData(0), 0);
const source = offlineCtx.createBufferSource();
source.buffer = cloneBuffer;
offlineCtx.oncomplete = (e) => {
const left = e.renderedBuffer.getChannelData(0);
this.onAudioProcess(this.float32ToInt16(left, left.length), e.renderedBuffer.duration * 1000);
};
source.connect(offlineCtx.destination);
source.start(0);
offlineCtx.startRendering();
}
I have created a oscillator (as shown below), like MDN said:
// from : https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Using_Web_Audio_API
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var oscillator = audioCtx.createOscillator();
var gainNode = audioCtx.createGain();
oscillator.connect(gainNode);
gainNode.connect(audioCtx.destination);
oscillator.type = 'sine'; // sine wave — other values are 'square', 'sawtooth', 'triangle' and 'custom'
oscillator.frequency.value = 2500; // value in hertz
oscillator.start();
Is there a method to change the volume, like I changed the frequency value?
You can get a variable frequency control with a slider that controls the output frequency:
document.getElementById('slider99').addEventListener('input', slider99change, false);
function slider99change(e) {
var x = document.getElementById("slider99");
var s = x.value.toString();
oz.frequency.value = s;
}
Your oscillator is defined globally elsewhere:
var ozcontext = new (window.AudioContext || window.webkitAudioContext)();
var oz = ozcontext.createOscillator();
var gainNode = ozcontext.createGain();
oz.connect(gainNode);
gainNode.connect(ozcontext.destination);
gainNode.gain.value = 0;
oz.type = 'sine';
oz.frequency.value = 440;
oz.start();
You need to look at modifying the audioContext and gainNode for volume changes. Following link might help:
https://github.com/mdn/voice-change-o-matic/blob/gh-pages/scripts/app.js
When working with audio elements (<audio>) or contexts(AudioContext), you can check their currentTime property to know exactly the play time of your buffer.
All of this is fine an dandy until I created multiple sources (or AudioBufferSourceNode) in a single AudioContext.
The sources could be played at different times, therefore I would need to know their corresponding currentTime's, to illustrate:
Some base code for you to work off:
buffer1 = [0,1,0]; //not real buffers
buffer2 = [1,0,1];
ctx = new AudioContext();
source1 = ctx.createBufferSourceNode();
source1.buffer = buffer1;
source1.connect(ctx.destination);
source1.start(0);
source2 = ctx.createBufferSourceNode();
source2.buffer = buffer2;
source2.connect(ctx.destination);
setTimeout(1000/*some time later*/){
source2.start(0);
}
setTimeout(1500/*some more time later*/){
getCurrentTime();
}
function getCurrentTime(){
/* magic */
/* more magic */
console.log("the sources currentTime values are obviously 1500 (source1) and 500 (source2).");
}
What I usually do is create a wrapper for the audio source node that keeps track of the playback state. I've tried to minimise the code below to show the basics.
The core idea is to keep track of the time the sound is started and the time the sound is 'paused' and use those values to get the current time and to resume playback from the paused position.
I put a working example on codepen
function createSound(buffer, context) {
var sourceNode = null,
startedAt = 0,
pausedAt = 0,
playing = false;
var play = function() {
var offset = pausedAt;
sourceNode = context.createBufferSource();
sourceNode.connect(context.destination);
sourceNode.buffer = buffer;
sourceNode.start(0, offset);
startedAt = context.currentTime - offset;
pausedAt = 0;
playing = true;
};
var pause = function() {
var elapsed = context.currentTime - startedAt;
stop();
pausedAt = elapsed;
};
var stop = function() {
if (sourceNode) {
sourceNode.disconnect();
sourceNode.stop(0);
sourceNode = null;
}
pausedAt = 0;
startedAt = 0;
playing = false;
};
var getPlaying = function() {
return playing;
};
var getCurrentTime = function() {
if(pausedAt) {
return pausedAt;
}
if(startedAt) {
return context.currentTime - startedAt;
}
return 0;
};
var getDuration = function() {
return buffer.duration;
};
return {
getCurrentTime: getCurrentTime,
getDuration: getDuration,
getPlaying: getPlaying,
play: play,
pause: pause,
stop: stop
};
}
Old thread (and very useful! thanks!) , but maybe worth mentioning that in the example above instead of
function update() {
window.requestAnimationFrame(update);
info.innerHTML = sound.getCurrentTime().toFixed(1) + '/' + sound.getDuration().toFixed(1);
}
update();
it may be more precise and less resource intensive to use a createScriptProcessor
like explained in this post
const audioBuffer = await audioContext.decodeAudioData(response.data);
const chan = audioBuffer.numberOfChannels;
const scriptNode = audioContext.createScriptProcessor(4096, chan, chan);
scriptNode.connect(audioContext.destination);
scriptNode.onaudioprocess = (e) => {
// ---> audio loop <----
};
[Update]
Note: As of the August 29 2014 Web Audio API spec publication, this feature has been marked as deprecated, and was replaced by AudioWorklet (see AudioWorkletNode). https://developers.google.com/web/updates/2017/12/audio-worklet
I am experimenting with WebAudio and I am loading in a sound with the following javascript code.
function playAudio(){
var audio = document.getElementById('music');
var audioContext = new webkitAudioContext();
var analyser = audioContext.createAnalyser();
var source = audioContext.createMediaElementSource(audio);
source.connect(analyser);
analyser.connect(audioContext.destination);
audio.play();
}
I also want to analyse the sound can visualise it with canvas, hence the analyser. It works fine the first time but if I run this function twice I get an error.
> playAudio(); // works fine, i can hear a sound
> playAudio(); // error
InvalidStateError: Failed to execute 'createMediaElementSource' on 'AudioContext': invalid HTMLMediaElement.
What is causing this error? I know that the error is caused by this line of code:
var source = audioContext.createMediaElementSource(audio);
I am creating a new audio context, so I would assume that I can re-use the same audio element in my html.
By creating the audio element dynamically (as shown in this fiddle, http://jsfiddle.net/ikerr/WcXHK/), I was able to play the song repeatedly.
function createAudioElement(urls) {
var audioElement = document.createElement("audio");
audioElement.autoplay = true;
audioElement.loop = false;
for (var i = 0; i < urls.length; ++i) {
var typeStr = "audio/" + urls[i].split(".").pop();
if (audioElement.canPlayType === undefined ||
audioElement.canPlayType(typeStr).replace(/no/, "")) {
var sourceElement = document.createElement("source");
sourceElement.type = typeStr;
sourceElement.src = urls[i];
audioElement.appendChild(sourceElement);
console.log("Using audio asset: " + urls[i]);
}
}
return audioElement;
}
var audioContext = new webkitAudioContext();
function playAudio(){
var audio = createAudioElement(['http://www.soundjay.com/button/button-1.mp3' ]);
if(audio){
var source = audioContext.createMediaElementSource(audio);
var analyser = audioContext.createAnalyser();
source.connect(analyser);
analyser.connect(audioContext.destination);
audio.play();
}
}
playAudio(); // works fine, i can hear a sound
playAudio();
//setTimeout(playAudio,2000);
Demo : http://jsfiddle.net/raathigesh/fueg3mk7/10/