I have an "asd.wav" sample with total duration 3 secs and play it:
let source = audioCtx.createBufferSource();
source.buffer = buffer; // recieved buffer of asd.wav
source.connect(audioCtx.destination);
source.start(0);
It plays perfectly from 0.00 to 3.00 second, but how i can play this sample only from 1.00 to 2.00 second?
This should do the trick. May be it can be done in a simpler way, but this what I could come up with.
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioCtx = new AudioContext();
var getSound = new XMLHttpRequest();
getSound.open("GET", "./asd.wav", true);
getSound.responseType = "arraybuffer";
getSound.onload = function() {
audioCtx.decodeAudioData(getSound.response, function(buffer) {
let start_time = 1, end_time = 2, sample_rate = buffer.sampleRate,
channel_number = 0; // assuming a mono (one channel) audio
let source = audioCtx.createBufferSource();
let data = buffer.getChannelData(channel_number);
data = data.slice(start_time * sample_rate, end_time * sample_rate)
let new_buffer = audioCtx.createBuffer(1 /*number of channels =1*/ , data.length, sample_rate);
new_buffer.copyToChannel(data, 0);
source.buffer = new_buffer
source.connect(audioCtx.destination);
source.start(0);
});
};
getSound.send();
In case of multi channel audio, you will need to repeat the steps to copy data to each channel.
In addition to explicitly requesting the resource to be played and slicing out a portion of it, you can also leverage an audio element. Per the docs, by appending #t=[starttime][,endtime] to the URL, you can specify the portion of interest.
From there, it's a trivial matter of creating a source from the media element and playing it, rather than doing it all from scratch.
As with an AJAX request, you're still subject to Cross-Origin restrictions.
Here's an example - just substitute the URL with one on the same domain, one referring to a resource with a CORS header, or one that uses your own server as a proxy to a compatible resource that doesn't come with the CORS header.
As you can see, the code for this method along with its accompanying HTML requires far less code than the AJAX approach does. You can also create and load audio elements dynamically as shown in the button click handler.
<!doctype html>
<html>
<head>
<script>
"use strict";
function byId(id){return document.getElementById(id)}
///////////////////////////////////
window.addEventListener('load', onDocLoaded, false);
function onDocLoaded(evt)
{
//loadAndPlayPortion("3 seconds.wav", 0.0, 1.0);
playAudioElement( byId('myAudioElem') );
};
function onBtnClicked(evt)
{
var audio = document.createElement('audio');
audio.onloadeddata = function(){ playAudioElement(this); };
audio.src = '3 seconds.wav#t=2,3'; // could build this URL from user input
}
function playAudioElement(audioElem)
{
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioCtx = new AudioContext();
var source = audioCtx.createMediaElementSource(audioElem);
source.connect(audioCtx.destination);
audioElem.play();
}
function loadAndPlayPortion(soundUrl, startTimeSecs, endTimeSecs)
{
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioCtx = new AudioContext();
var ajax = new XMLHttpRequest();
ajax.open("GET", soundUrl, true);
ajax.responseType = "arraybuffer";
ajax.onload = onFileLoaded;
ajax.send();
function onFileLoaded()
{
audioCtx.decodeAudioData(this.response, onDataDecoded);
}
function onDataDecoded(sampleBuffer)
{
let source = audioCtx.createBufferSource(), numChannels=sampleBuffer.numberOfChannels,
sampleRate = sampleBuffer.sampleRate,
nRequiredSamples = (endTimeSecs-startTimeSecs)*sampleRate,
newBuffer = audioCtx.createBuffer( numChannels, nRequiredSamples, sampleRate);
for (var curChannel=0; curChannel<numChannels; curChannel++)
{
var channelData = sampleBuffer.getChannelData(curChannel);
channelData = channelData.slice(startTimeSecs*sampleRate, endTimeSecs*sampleRate);
newBuffer.copyToChannel(channelData, curChannel, 0);
}
source.buffer = newBuffer; // chosen portion of received buffer of sound-file
source.connect(audioCtx.destination);
source.start(0);
}
}
</script>
<style>
</style>
</head>
<body>
<button onclick='onBtnClicked()'>Create Audio element</button>
<audio id='myAudioElem' src='3 seconds.wav#t=1,2'></audio>
</body>
</html>
Related
How to get audio buffer from video element? i know a ways from BaseAudioContext.decodeAudioData(). it fetch directly from audio file via request. but i need to get the buffer from video directly to manipulate.
const video = document.createElement('video');
video.src = 'https://www.w3schools.com/html/mov_bbb.mp4';
document.body.appendChild(video);
let audioCtx;
let audioSource;
const play = () => {
audioCtx = new AudioContext();
audioSource = audioCtx.createMediaElementSource(video);
audioSource.connect(audioCtx.destination);
video.play()
};
video.ontimeupdate = () => {
let buffer = new AudioBufferSourceNode(audioCtx);
// Manipulate audio buffer realtime
// Problem is buffer null
buffer.connect(audioSource.context.destination);
console.log(buffer.buffer)
}
<!DOCTYPE html>
<html>
<body>
<button onclick="play()">Play</button>
</body>
</html>
I've created a simple music player, which creates a bufferArray for a particular audio URL to play the music.
It is working fine in many of my cellphone's browser, so I guess there is no cross origin issue for audio URL.
however chrome is not playing audio.
Also I've created a Uint8Array for plotting frequency data inside canvas, while many browsers are plotting frequency graph in canvas successfully, chrome is not doing so!
Take a look at what I've tried so far!
```
<!DOCTYPE html>
<html>
<head>
<title>Page Title</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
</head>
<body>
<center>
<h1>Music Player</h1>
<hr>
<div id="div"></div>
<canvas></canvas>
<p>Frequency plot</p>
</center>
<script>
url = "https://dl.dropbox.com/s/5jyylqps64nyoez/Legends%20never%20die.mp3?dl=0";
const div = document.querySelector("#div");
const cvs = document.querySelector("canvas");
cvs.width = window.innerWidth - 20;
cvs.height = 200;
const c = cvs.getContext("2d");
function loadMusic(url){
div.innerHTML = "Loading music, please wait...";
const context = new AudioContext();
const source = context.createBufferSource();
const analyser = context.createAnalyser();
let request = new XMLHttpRequest();
request.open("GET",url,true);
request.responseType = "arraybuffer";
request.onload = ()=>{
div.innerHTML = "Music loaded, please wait, music will be played soon...";
context.decodeAudioData(request.response,suffer=>{
source.buffer = suffer;
source.connect(context.destination);
source.connect(analyser);
analyser.connect(context.destination);
source.start();
div.innerHTML = "Music is playing... Enjoy!";
setInterval(()=>{
c.clearRect(0,0,cvs.width,cvs.height);
let array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
let m = 0;
for(m = 0; m < array.length; m++){
let x = (parseInt(window.innerWidth -20)*m)/array.length;
c.beginPath();
c.moveTo(x,150-((100*array[m])/255));
c.lineTo((parseInt(window.innerWidth -20)*(m+1))/array.length,150-((100*array[m+1])/255));
c.lineWidth = 1;
c.strokeStyle = "black";
c.stroke();
}
},1);
});
}
request.send();
}
loadMusic(url);
</script>
</body>
</html>
```
This is more a couple of observations than a complete solution.
The code given worked for me on Edge, Chrome and Firefox on Windows 10.
On IOS 14 Safari and IOS 14 Chrome it seemed to stop after putting out the loading message.
This MDN reference used a 'cross browser' method to create audiocontext so I added this line:
var AudioContext = window.AudioContext || window.webkitAudioContext;
before this line:
const context = new AudioContext();
[edit: have just confirmed at caniuse that -webkit prefix needed by Safari]
That seemed to do the trick in as much as the rest of the code was executed. However, there was no sound and it appeared the audio was not playing. The plot also showed just a single horizontal line.
Is this a manifestation of IOS's requirement that there must be some user interaction before audio will actually be played?
I'm pretty sure the audio was loaded as there was a noticeable pause at that point. I suspect that there will have to be a button which when clicked actually starts the playing.
I'm trying to test playing audio using a HTMLAudioElement and a AudioSourceNode. For the later application I need two features:
The pitch must be preserved after the playbackRate changed.
The volume needs to be changed to a value greater than 1.
Because feature 2 I added a workaround with the AudioSourceNode and the GainNode.
I need the audio file as ArrayBuffer in the later app that's why I added the file reading part before.
Problem:
The code works fine with Chrome and Opera, but not with Firefox. The playBackRate is set to 2, but the playbackRate of audio signal did not change. Why is that's the case and how can I fix it?
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Timestretching 2</title>
<script type="text/javascript">
var audioContext = window.AudioContext // Default
|| window.webkitAudioContext // Safari and old versions of Chrome
|| window.mozAudioContext
|| false;
function play() {
var fileInput = document.getElementById("file");
var file = fileInput.files[0];
var reader = new FileReader();
reader.onload = function (event) {
console.log("finished!");
console.log(event.srcElement.result);
var audioCtxt = new audioContext();
var url = URL.createObjectURL(new File([event.srcElement.result], "test.wav"));
var player = new Audio();
const source = audioCtxt.createMediaElementSource(player);
player.src = url;
console.log("wait to play");
player.addEventListener("canplay", function () {
// create a gain node to set volume greater than 1
const gainNode = audioCtxt.createGain();
gainNode.gain.value = 2.0; // double the volume
source.connect(gainNode);
gainNode.connect(audioCtxt.destination);
player.playbackRate = 2.0;
player.play();
player.playbackRate = 2.0;
console.log("playbackRate is " + player.playbackRate);
});
};
reader.onprogress = function (progress) {
console.log(progress.loaded / progress.total);
};
reader.onerror = function (error) {
console.error(error);
};
reader.readAsArrayBuffer(file);
}
</script>
</head>
<body>
<input id="file" type="file" value="Audio Datei auswählen"/>
<button onclick="play()">PLAY</button>
</body>
</html>
Whenever I play a sound using code such as
// binaryData = a wave file from a websocket
let ctx = new AudioContext();
ctx.decodeAudioData(binaryData, function(audioData){
let source = ctx.createBufferSource();
source.buffer = audioData;
source.connect(ctx.destination);
source.start(0);
});
There is a very audible click or pop between each clip played. Forget the fact that I'm trying to play real-time audio with this system; why is it that there is a glitchy noise at the beginning and end of each sound clip played? I'm not understanding how this is acceptable behaviour in 2017 from an audio playing device... Is there any way to mitigate or eliminate this?
Answer
Following the answer below here is a good set of #s to use to reduce clicking to basically nothing. I'm not saying this works great for a tone, but its flawless for voice.
// start of clip
// clipPlayTime may be 0 or your scheduled play time
gain.setValueAtTime(0.01, clipPlayTime);
gain.exponentialRampToValueAtTime(1, clipPlayTime + 0.001);
// end of clip
gain.setValueAtTime(1, clipPlayTime + clipLength - 0.001);
gain.exponentialRampToValueAtTime(0.01, clipPlayTime + clipLength);
This creates a ramp up and a ramp down.
Use a exponentialRampToValueAtTime() to remove (or atleast reduce) the clicking noise.
Here's a great explanation: Web Audio, the ugly click and the human ear
Full Example
Base example taken from: https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/decodeAudioData
<button class="play">Play</button>
<button class="stop">Stop</button>
<script type="text/javascript">
var audioCtx = new(window.AudioContext || window.webkitAudioContext)();
var source;
var play = document.querySelector('.play');
var stop = document.querySelector('.stop');
var gainNode = audioCtx.createGain();
function getData() {
source = audioCtx.createBufferSource();
var request = new XMLHttpRequest();
request.open('GET', './sample.wav', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
source.buffer = buffer;
source.connect(gainNode);
gainNode.connect(audioCtx.destination);
gainNode.gain.setValueAtTime(1, audioCtx.currentTime);
},
function(e) {
console.log("Error with decoding audio data" + e.err);
});
}
request.send();
}
play.onclick = function() {
getData();
source.start(0);
play.setAttribute('disabled', 'disabled');
}
stop.onclick = function() {
gainNode.gain.setValueAtTime(gainNode.gain.value, audioCtx.currentTime);
gainNode.gain.exponentialRampToValueAtTime(0.0001, audioCtx.currentTime + 1);
setTimeout(function() {
source.stop();
}, 1000)
play.removeAttribute('disabled');
}
</script>
The code is supposed to stream any url and provide a visualization of the audio. Unfortunately, the visualizer is not working. The visualization relies on the data from the AnalyzerNode, which is always returning empty data. Why doesn't the AnalyserNode in this code work? The numberOfOutputs on the source node increases after I .connect() them, but the numberOfInputs on the AnalyserNode does not change.
<html>
<head>
<script>
var context;
var source;
var analyser;
var canvas;
var canvasContext;
window.addEventListener('load', init, false);
function init() {
try {
// Fix up for prefixing
window.AudioContext = window.AudioContext || window.webkitAudioContext;
window.requestAnimationFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame ||
window.webkitRequestAnimationFrame || window.msRequestAnimationFrame;
context = new AudioContext();
analyser = context.createAnalyser();
canvas = document.getElementById("analyser");
canvasContext = canvas.getContext('2d');
}
catch(e) {
alert(e);
alert('Web Audio API is not supported in this browser');
}
}
function streamSound(url) {
var audio = new Audio();
audio.src = url;
audio.controls = true;
audio.autoplay = true;
source = context.createMediaElementSource(audio);
source.connect(analyser);
analyser.connect(context.destination);
document.body.appendChild(document.createElement('br'));
document.body.appendChild(audio);
render();
}
function render(){
window.requestAnimationFrame(render);
//Get the Sound data
var freqByteData = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(freqByteData);
//we Clear the Canvas
canvasContext.clearRect(0, 0, canvas.width, canvas.height);
//draw visualization
for(var i=0;i<analyser.frequencyBinCount;i++){
canvasContext.fillRect(i*2,canvas.height,1,-freqByteData[i]);
//Data seems to always be zero
if(freqByteData[i] != 0) {
alert(freqByteData[i]);
}
}
}
</script>
</head>
<body>
<button onclick="streamSound(document.getElementById('url').value)"> Stream sound</button>
<input id="url" size='77' value="Enter a URL to Stream">
<br />
<canvas id="analyser" width="600" height="200"></canvas>
</body>
</html>
You should setup an event listener for the audio's canplay event and only setup the MediaElementSource and connect it after that event fires.
It also won't work in Safari due to a bug in their implementation. AFAIK Chrome is the only browser that properly supports the Web Audio API.