I wrote the following to draw a sound wave from AudioBuffer but what I get is a canvas with a straight horizontal line:
const { audioContext, analyser } = this.getAudioContext();
const source = audioContext.createBufferSource();
source.buffer = audioBuffer;
source.connect(audioContext.destination);
const canvas = document.getElementById('canvas');
const canvasCtx = canvas.getContext("2d");
let sinewaveDataArray = new Uint8Array(analyser.fftSize);
const drawSinewave = function() {
analyser.getByteTimeDomainData(sinewaveDataArray);
requestAnimationFrame(drawSinewave);
canvasCtx.fillStyle = 'white';
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "black";
canvasCtx.beginPath();
const sliceWidth = canvas.width * 1.0 / analyser.fftSize;
let x = 0;
for(let i = 0; i < analyser.fftSize; i++) {
const v = sinewaveDataArray[i] / 128.0; // byte / 2 || 256 / 2
const y = v * canvas.height / 2;
if(i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
};
source.start();
drawSinewave();
getAudioContext = () => {
AudioContext = window.AudioContext || window.webkitAudioContext;
const audioContext = new AudioContext();
const analyser = audioContext.createAnalyser();
return { audioContext, analyser };
};
The end result I am looking for is something like the first image clip here: https://meyda.js.org/
Any idea what I am missing?
I think two little changes are necessary to make it work.
source.start() needs to be executed in response to a user event. A simple button with a click handler can be used to achieve that. This is necessary to deal with the autoplay policy which is present in some browsers.
aReferenceToAButton.addEventListener('click', async () => {
await audioContext.resume();
source.start();
});
Last but not least you need to pipe the signal through the AnalyserNode.
source
.connect(analyser)
.connect(audioContext.destination);
I hope this helps.
Related
This is my first post here as a noob in coding. I'm trying JavaScript tricks with audio visualizers with the tutorial of FrankLaboratory's YouTube channel as a starting point.
I tried to make several zones on the page (overlays) with a click event and a different sound for each one. Everything runs fine but I only had one shape of visualizer for every zone which I found annoying (with one drawVisualizer function called for each overlay).
So I'm trying to modify this to have one sound and one visualizer for each zone.
But it acts strangely: I click a zone and it's ok, I click the second and it's ok too. But if I click the first one again, sound still plays but no more visualizer. Different combinations are possible, I mean if I keep playing the second one, it's working, same if I keep playing the first but as soon as I change, I can't go back on the first which was played.
Here is the code
I just let the two first zones for the example but the goal is to have 16 one the page.
I have the error " Failed to execute 'createMediaElementSource' on 'AudioContext': HTMLMediaElement already connected previously to a different MediaElementSourceNode."
But actually I had it also on the first version of my script with only one visualizer for all zone with the drawVisualizer function and everything run smooth nonetheless, so...
I'm missing something but I don't know what, so any help would be appreciated.
I put the code here too:
const container = document.getElementById('container');
const canvas = document.getElementById('canvas1');
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
const ctx = canvas.getContext('2d');
let audioSource;
let analyser;
const overlay = document.getElementsByClassName('overlay')[0];
overlay.addEventListener('click' , function(){
const audio0 = document.getElementById('audio0');
audio0.src = 'sound1.mp3 which I put in base64 on the jsfiddle';
const audioContext = new AudioContext();
audio0.play();
audioSource = audioContext.createMediaElementSource(audio0);
analyser = audioContext.createAnalyser();
audioSource.connect(analyser);
analyser.connect(audioContext.destination);
analyser.fftSize = 2048;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
const barWidth = canvas.width/bufferLength;
let barHeight;
let x;
function animate(){
x = 0;
ctx.clearRect(0, 0, canvas.width, canvas.height);
analyser.getByteFrequencyData(dataArray);
for (let i = 0; i < bufferLength; i++){
barHeight = dataArray[i];
const red = i * 50;
const green = i ;
const blue = i / 2;
ctx.fillStyle = 'rgb('+ red + ',' + green + ',' + blue + ')';
ctx.fillRect(x, canvas.height - barHeight, barWidth * 4, barHeight / 2);
x += barWidth;
}
requestAnimationFrame(animate);
}
animate();
});
const overlay1 = document.getElementsByClassName('overlay')[1];
overlay1.addEventListener('click' , function(){
const audio1 = document.getElementById('audio1');
audio1.src = 'sound2.mp3 which I put in base64 on the jsfiddle';
const audioContext = new AudioContext();
audio1.play();
audioSource = audioContext.createMediaElementSource(audio1);
analyser = audioContext.createAnalyser();
audioSource.connect(analyser);
analyser.connect(audioContext.destination);
analyser.fftSize = 2048;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
const barWidth = canvas.width/bufferLength;
let barHeight;
let x;
function animate(){
x = 0;
ctx.clearRect(0, 0, canvas.width, canvas.height);
analyser.getByteFrequencyData(dataArray);
for (let i = 0; i < bufferLength; i++){
barHeight = dataArray[i]*10;
ctx.save();
ctx.translate(canvas.width, canvas.height);
ctx.rotate(i * Math.PI *24 / bufferLength);
const red = i * 50;
const green = i ;
const blue = i / 2;
ctx.fillStyle = 'rgb('+ red + ',' + green + ',' + blue + ')';
ctx.fillRect(x, canvas.height - barHeight, barWidth * 4, barHeight / 2);
x += barWidth;
ctx.restore();
}
requestAnimationFrame(animate);
}
animate();
});
Ok, so I know how to animate a canvas using AnalyserNode. I made a demo of how I implemented it.
This is my demo ->
https://codesandbox.io/s/heuristic-lovelace-bmwxo?file=/src/Visualizer.js
What I am trying to understand is how do I make this look similar to this -> https://s3.us-west-1.amazonaws.com/storycreator.rendered/cka4ubx6d0dgb0114ws1rll7p?t=1590039817915
This audio spectrum was generated in After Effects using the audio spectrum effect.
I am using new Uint8Array(analyser.frequencyBinCount) the frequency feedback from the audio api. What is AE using under the hood to create the spectrum effect and is there a difference between spectrum and frequency in this context?
Here is the full code for JavaScript frequency
import React, { useEffect, useRef } from "react";
let frequencyArray = [];
let analyser;
const Visualizer = () => {
const canvasRef = useRef(null);
const requestRef = useRef(null);
const handleInit = () => {
initAudio();
requestRef.current = requestAnimationFrame(drawCanvas);
};
const initAudio = () => {
const audio = new Audio();
audio.src =
"https://s3.us-west-2.amazonaws.com/storycreator.uploads/ck9kpb5ss0xf90132mgf8z893?client_id=d8976b195733c213f3ead34a2d95d1c1";
audio.crossOrigin = "anonymous";
audio.load();
const context = new (window.AudioContext || window.webkitAudioContext)();
analyser = context.createAnalyser();
const source = context.createMediaElementSource(audio);
source.connect(analyser);
analyser.connect(context.destination);
frequencyArray = new Uint8Array(analyser.frequencyBinCount);
audio.play();
};
// draw the whole thing
const drawCanvas = () => {
if (canvasRef.current) {
const canvas = canvasRef.current;
const ctx = canvas.getContext("2d");
const radius = 200;
const bars = Math.round(canvas.width);
ctx.clearRect(0, 0, canvas.width, canvas.height);
analyser.getByteFrequencyData(frequencyArray);
for (var i = 0; i < bars; i++) {
const height = frequencyArray[i] * 0.25;
drawLine(
{
i,
bars,
height,
radius
},
canvas,
ctx
);
}
requestRef.current = requestAnimationFrame(drawCanvas);
}
};
// dray lines around the circle
const drawLine = (opts, canvas, ctx) => {
const { i, radius, bars, height } = opts;
const centerX = canvas.width / 2;
const centerY = canvas.height / 2;
const lineWidth = 10;
// draw the bar
ctx.strokeStyle = "#ddd";
ctx.lineWidth = lineWidth;
ctx.lineCap = "round";
ctx.beginPath();
ctx.moveTo(i, centerY);
ctx.lineTo(i, centerY + height);
ctx.stroke();
ctx.beginPath();
ctx.moveTo(i, centerY);
ctx.lineTo(i, centerY - height);
ctx.stroke();
};
return (
<>
<button onClick={handleInit}>Start Visualizer</button>
<canvas
ref={canvasRef}
style={{ background: "#f5f5f5" }}
width={window.innerWidth}
height={window.innerHeight}
/>
</>
);
};
export default Visualizer;
I think analyzer.getByteTimeDomainData() would be more appropriate.
A slightly modified version of your code(so that I could test it offline and because I am not familiar with React):
let frequencyArray = [];
let analyser;
let request;
var flag=0;
var height=0;
const canvas = document.getElementById("myCanvas");
const ctx = canvas.getContext("2d");
const bars = Math.round(canvas.width);
const lineWidth = 3;
var centerX = canvas.width / 2;
var centerY = canvas.height / 2;
const audio = new Audio();
audio.src =
"https://s3.us-west-2.amazonaws.com/storycreator.uploads/ck9kpb5ss0xf90132mgf8z893?client_id=d8976b195733c213f3ead34a2d95d1c1";
audio.crossOrigin = "anonymous";
audio.load();
const context = new (window.AudioContext || window.webkitAudioContext)();
analyser = context.createAnalyser();
const source = context.createMediaElementSource(audio);
source.connect(analyser);
analyser.connect(context.destination);
frequencyArray = new Uint8Array(analyser.frequencyBinCount);
document.getElementById('button').addEventListener('click', function() {
context.resume().then(() => {
console.log('Playback resumed successfully');
});
});
function begin()
{
audio.play();
requestAnimationFrame(drawCanvas);
};
function end()
{
cancelAnimationFrame(request);
audio.pause();
};
audio.addEventListener("ended", close);
function close()
{
if(flag==0)
{
flag=1;
}
else
{
ctx.clearRect(0, 0, canvas.width, canvas.height);
flag=0;
}
}
const drawCanvas = () => {
ctx.clearRect(0, 0, canvas.width, canvas.height);
analyser.getByteTimeDomainData(frequencyArray);
for (var i = 0; i < bars; i+=3) {
height = frequencyArray[i];
if(height<100)
{
height*=0.05;
}
else
{
if(height<200 && height>100)
{
height=(height-100)+(100*0.05)
}
else
{
height=(height-200)*0.2+(100*1.05);
}
}
drawLine(
{
i,
bars,
height
},
canvas,
ctx
);
}
if(flag==0)
{
request = requestAnimationFrame(drawCanvas);
}
else
{
flag=2;
close();
}
};
const drawLine = (opts, canvas, ctx) => {
const { i, bars, height } = opts;
// draw the bar
ctx.strokeStyle = "#212121";
ctx.lineWidth = lineWidth;
ctx.lineCap = "round";
ctx.beginPath();
ctx.moveTo(i, centerY);
ctx.lineTo(i, centerY + height);
ctx.stroke();
ctx.beginPath();
ctx.moveTo(i, centerY);
ctx.lineTo(i, centerY - height);
ctx.stroke();
};
<!DOCTYPE html>
<html>
<head>
<body>
<button id="button" onClick=begin()>Start</button>
<button onClick=end()>End</button>
<canvas id="myCanvas" width="500" height="500" style="border:1px solid #d3d3d3;">
Your browser does not support the HTML canvas tag.</canvas>
<script src = "wave.js">
</script>
</body>
</html>
Intermittently some sharp spikes emerge which looks bad. Perhaps someone else can fix that.
I draw my webcam stream on a canvas. I will apply my prediction rectangle bounding box on this canvas at each frame. Before starting prediction the canvas stream is prefect but as soon as predictions start the canvas starts to blink; I mean the whole canvas shows up and disappear, shows up and disappear very fast. do U have any idea how could I make it right? thanks a lot.
my prediction function looks like this:
let isPredicting = false;
async function predict() {
ui.isPredicting();
while (isPredicting) {
// Capture the frame from the webcam.
let video = document.getElementById('predwebcam');
let width=224,
height=224,
frameRate=10;
navigator.mediaDevices.getUserMedia({
video: {
width: width,
height:height,
frameRate:frameRate
}
}
).then(function(stream) {
video.srcObject = stream;
video.onloadedmetadata = function(e) {
video.play();
video.addEventListener('play', function() {
}, false);
};
}).catch(function(err) {
console.log('failed to load webcam')
});
var canvas = document.getElementById('predcanvas');
canvas.width = 500;
canvas.height = 500;
var context = canvas.getContext('2d');
/*
context.drawImage(video, 0, 0, canvas.width, canvas.height);
*/
// video 'play' event listener
video.addEventListener('play', function() {
//context.drawImage(this, 0, 0, canvas.width, canvas.height);
//draw(video, canvas, context, frameRate);
}, false);
function draw(video, canvas, context, frameRate) {
context.drawImage(video, 0, 0, canvas.width, canvas.height);
setTimeout(draw, 1/frameRate, video, canvas, context, frameRate);
}
//draw(video, canvas, context, frameRate);
const img = await getImage();
// Make a prediction through mobilenet, getting the internal activation of
// the mobilenet model, i.e., "embeddings" of the input images.
//const embeddings = truncatedMobileNet.predict(img);
console.log('starting to predict')
// Make a prediction through our newly-trained model using the embeddings
// from mobilenet as input.
const predictions = model.predict(img);
const values = predictions.dataSync();
const arr = Array.from(values);
console.log(arr)
let u = document.getElementById("1label").value;
dict.add(classindex1, u);
if (arr[0]>= 0){
arr[0]=1;
}
var Id = arr[0];
let ClassID = dict.findAt(Id);
//console.log(ClassID)
let startX = arr[1];
let startY = arr[2];
let w = arr[3];
let h = arr[4];
// Draw the bounding box.
context.strokeStyle = 'red';
//context.shadowColor = '#d53';
//context.shadowBlur = 20;
context.lineJoin = 'bevel';
context.lineWidth = 10;
context.strokeStyle = '#38f';
context.strokeRect(startX, startY, w, h);
// Draw the label background.
const font = "24px helvetica";
context.font = font;
context.textBaseline = "top";
context.fillStyle = "#2fff00";
const textWidth = context.measureText(ClassID).width;
const textHeight = parseInt(font, 10);
// draw top left rectangle
context.fillRect(startX, startY, textWidth + 10, textHeight + 10);
// draw bottom left rectangle
//context.fillRect(startX, startY + h - textHeight, textWidth + 15, textHeight + 10);
// Draw the text last to ensure it's on top.
context.fillStyle = "#000000";
context.fillText(ClassID, startX, startY);
//context.fillText(prediction.score.toFixed(2), startX, startY + h - textHeight);
// Returns the index with the maximum probability. This number corresponds
// to the class the model thinks is the most probable given the input.
//const predictedClass = predictions.as1D().argMax();
//const classId = (await predictedClass.data())[0];
img.dispose();
//ui.predictClass(metaClasses[classId]);
await tf.nextFrame();
}
ui.donePredicting();
}
I am using a face tracking library (tracking.js) to capture a video stream and place an image on top of the face.
The image is drawn on a canvas, which has the same width and height as the video therefore, the overlay.
I am trying to take a picture and video of the stream + canvas image, however O can only get a crude implementation of the stream and image that is distorted.
Here is a CodePen
const canvas = document.getElementById('canvas');
const context = canvas.getContext('2d');
const tracker = new tracking.ObjectTracker('face');
const flowerCrownButton = document.getElementById('flower-crown');
tracker.setInitialScale(1);
tracker.setStepSize(2.7);
tracker.setEdgesDensity(.2);
const img = document.createElement("img");
img.setAttribute("id", "pic");
img.src = canvas.toDataURL();
let filterX = 0;
let filterY = 0;
let filterWidth = 0;
let filterHeight = 0;
function changePic(x, y, width, height, src) {
img.src = src;
filterX = x;
filterY = y;
filterWidth = width;
filterHeight = height;
}
function flowerCrown() {
changePic(0, -0.5, 1, 1, 'https://s3-us-west-
2. amazonaws.com / s.cdpn.io / 450347 / flower - crown.png ')
}
flowerCrownButton.addEventListener('click', flowerCrown);
//listen for track events
tracker.on('track', function(event) {
//if (event.data.length === 0) {
//alert("No objects were detected in this frame.");
//} else {
context.clearRect(0, 0, canvas.width, canvas.height)
event.data.forEach(rect => {
context.drawImage(img, rect.x + (filterX * rect.width),
rect.y + (filterY * rect.height),
rect.width * filterWidth,
rect.height * filterHeight
)
})
//}// end of else
});
//start tracking
tracking.track('#video', tracker, {
camera: true
})
const canvas2 = document.getElementById('canvas2');
const context2 = canvas2.getContext('2d');
const video = document.getElementById("video");
video.addEventListener("loadedmetadata", function() {
ratio = video.videoWidth / video.videoHeight;
w = video.videoWidth - 100;
h = parseInt(w / ratio, 10);
canvas2.width = w;
canvas2.height = h;
}, false);
function snap() {
context2.drawImage(video, 10, 5);
context2.drawImage(img, 10, 10)
}
}
Any ideas? I prefer to use the media recorder API and have tried it, but again could not get a stream or picture with the image filter overlay.
Thanks and please don't be snarky :)
I've been searching around looking for some solutions but haven't really found much. I'd like something really simple along the lines of the image below. Has anyone ever used one in a project? Any advice or any API's I could use? Thanks.
Here is base:
You need a canvas
You need canvas context
You need audio context
var canvas = document.createElement("canvas");
canvas.width = 500;
canvas.height = 180;
var ctx = canvas.getContext("2d");
ctx.fillStyle = "black";
ctx.strokeStyle = "white";
ctx.lineCap = "round";
var auctx;
window.onload = () => {
document.body.appendChild(canvas);
auctx = new(window.AudioContext || window.webkitAudioContext)();
startAudio();
}
var buffer, src, analyser, buffLen;
var barWidth, dataArray;
function startAudio() {
var url = "https://cf-media.sndcdn.com/cTGZiRbnSouE.128.mp3?Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiKjovL2NmLW1lZGlhLnNuZGNkbi5jb20vY1RHWmlSYm5Tb3VFLjEyOC5tcDMiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjE1MTk5NTQ5NjB9fX1dfQ__&Signature=JmNkAHzih0~f3lQVwvPXFeTIUVuMXbwlbqizsXbCtc6lFIxjRlqa3wUGp5-xAkt7AUlhiYxu~Wscc6MfQTTc527DHJURMpdqvdXv61ll-WJqoV1V-tpWSa~qR-NEAWGCGBvrge0BkRRAsOHFljeLNCvO3DjzH7lSTPMlV-MtbFV2k-PiY0vrY1LuicAOcfEtXYTiMBkg-rhzkeHFcNHYt2Nb2hmIvmWFI1cFG74FBIXTnVPAg2Yo0r-LeiirWvSgewkIu~zPzaVYjnPaN1y-ZGnPBFiBSC1mpVhtB5wkhTXF5LFthkGUHnUK2ybESr-1uOH9GLye-7dxdIXx~A1LDA__&Key-Pair-Id=APKAJAGZ7VMH2PFPW6UQ"; // nice url
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function() {
auctx.decodeAudioData(request.response, function(buffer) {
buffer = buffer;
src = auctx.createBufferSource();
src.buffer = buffer;
src.loop = false;
src.connect(auctx.destination);
src.start(0);
analyser = auctx.createAnalyser();
src.connect(analyser);
analyser.connect(auctx.destination);
analyser.fftSize = 256;
buffLen = analyser.frequencyBinCount;
dataArray = new Uint8Array(buffLen);
barWidth = (500 - 2 * buffLen - 4) / buffLen * 2.5;
ctx.lineWidth = barWidth;
draw();
});
}
request.send();
}
function draw() {
ctx.fillRect(0, 0, 500, 180);
analyser.getByteFrequencyData(dataArray);
for (var i = 0; i < buffLen; i++) {
ctx.beginPath();
ctx.moveTo(4 + 2 * i * barWidth + barWidth / 2, 178 - barWidth / 2);
ctx.lineTo(4 + 2 * i * barWidth + barWidth / 2, 178 - dataArray[i] * 0.65 - barWidth / 2);
ctx.stroke();
}
requestAnimationFrame(draw);
}
canvas {
background: black;
}
This code should work. You can add some images and tweak settings.
I think that this is maybe what you’re looking for. Sorry I’m a bit late.
// AUDIO CONTEXT
window.AudioContext = (window.AudioContext ||
window.webkitAudioContext ||
window.mozAudioContext ||
window.oAudioContext ||
window.msAudioContext);
if (!AudioContext) alert('This site cannot be run in your Browser. Try a recent Chrome or Firefox. ');
var audioContext = new AudioContext();
var currentBuffer = null;
// CANVAS
var canvasWidth = window.innerWidth, canvasHeight = 120 ;
var newCanvas = createCanvas (canvasWidth, canvasHeight);
var context = null;
window.onload = appendCanvas;
function appendCanvas() { document.body.appendChild(newCanvas);
context = newCanvas.getContext('2d'); }
// the function that loads the sound file
//NOTE this program for some reason won’t load sound files from like a weebly website so you’ll have to add the files to your github or whatever and use that raw audio file
function loadMusic(url) {
var req = new XMLHttpRequest();
req.open( "GET", url, true );
req.responseType = "arraybuffer";
req.onreadystatechange = function (e) {
if (req.readyState == 4) {
if(req.status == 200)
audioContext.decodeAudioData(req.response,
function(buffer) {
currentBuffer = buffer;
displayBuffer(buffer);
}, onDecodeError);
else
alert('error during the load.Wrong url or cross origin issue');
}
} ;
req.send();
}
function onDecodeError() { alert('error while decoding your file.'); }
// MUSIC DISPLAY
function displayBuffer(buff /* is an AudioBuffer */) {
var drawLines = 500;
var leftChannel = buff.getChannelData(0); // Float32Array describing left channel
var lineOpacity = canvasWidth / leftChannel.length ;
context.save();
context.fillStyle = '#080808' ;
context.fillRect(0,0,canvasWidth,canvasHeight );
context.strokeStyle = '#46a0ba';
context.globalCompositeOperation = 'lighter';
context.translate(0,canvasHeight / 2);
//context.globalAlpha = 0.6 ; // lineOpacity ;
context.lineWidth=1;
var totallength = leftChannel.length;
var eachBlock = Math.floor(totallength / drawLines);
var lineGap = (canvasWidth/drawLines);
context.beginPath();
for(var i=0;i<=drawLines;i++){
var audioBuffKey = Math.floor(eachBlock * i);
var x = i*lineGap;
var y = leftChannel[audioBuffKey] * canvasHeight / 2;
context.moveTo( x, y );
context.lineTo( x, (y*-1) );
}
context.stroke();
context.restore();
}
// Creates the Canvas
function createCanvas ( w, h ) {
var newCanvas = document.createElement('canvas');
newCanvas.width = w; newCanvas.height = h;
return newCanvas;
};
// The program runs the url you put into the line below
loadMusic('||YOUR LINK||');
Happy Coding!!
If you by chance have a better solution on this, may you send me the code because I’m also having a bit of trouble with this. I’m trying to create one like soundcloud without using external libraries.
EDIT 1
So i thought that it would be nice if i give an example of what it would look like in action so, here —>
// AUDIO CONTEXT
window.AudioContext = (window.AudioContext ||
window.webkitAudioContext ||
window.mozAudioContext ||
window.oAudioContext ||
window.msAudioContext);
if (!AudioContext) alert('This site cannot be run in your Browser. Try a recent Chrome or Firefox. ');
var audioContext = new AudioContext();
var currentBuffer = null;
// CANVAS
var canvasWidth = window.innerWidth, canvasHeight = 120 ;
var newCanvas = createCanvas (canvasWidth, canvasHeight);
var context = null;
window.onload = appendCanvas;
function appendCanvas() { document.body.appendChild(newCanvas);
context = newCanvas.getContext('2d'); }
// the function that loads the sound file
//NOTE this program for some reason won’t load sound files from like a weebly website so you’ll have to add the files to your github or whatever and use that raw audio file
function loadMusic(url) {
var req = new XMLHttpRequest();
req.open( "GET", url, true );
req.responseType = "arraybuffer";
req.onreadystatechange = function (e) {
if (req.readyState == 4) {
if(req.status == 200)
audioContext.decodeAudioData(req.response,
function(buffer) {
currentBuffer = buffer;
displayBuffer(buffer);
}, onDecodeError);
else
alert('error during the load.Wrong url or cross origin issue');
}
} ;
req.send();
}
function onDecodeError() { alert('error while decoding your file.'); }
// MUSIC DISPLAY
function displayBuffer(buff /* is an AudioBuffer */) {
var drawLines = 500;
var leftChannel = buff.getChannelData(0); // Float32Array describing left channel
var lineOpacity = canvasWidth / leftChannel.length ;
context.save();
context.fillStyle = '#080808' ;
context.fillRect(0,0,canvasWidth,canvasHeight );
context.strokeStyle = '#46a0ba';
context.globalCompositeOperation = 'lighter';
context.translate(0,canvasHeight / 2);
//context.globalAlpha = 0.6 ; // lineOpacity ;
context.lineWidth=1;
var totallength = leftChannel.length;
var eachBlock = Math.floor(totallength / drawLines);
var lineGap = (canvasWidth/drawLines);
context.beginPath();
for(var i=0;i<=drawLines;i++){
var audioBuffKey = Math.floor(eachBlock * i);
var x = i*lineGap;
var y = leftChannel[audioBuffKey] * canvasHeight / 2;
context.moveTo( x, y );
context.lineTo( x, (y*-1) );
}
context.stroke();
context.restore();
}
// Creates the Canvas
function createCanvas ( w, h ) {
var newCanvas = document.createElement('canvas');
newCanvas.width = w; newCanvas.height = h;
return newCanvas;
};
// The program runs the url you put into the line below
loadMusic('https://raw.githubusercontent.com/lightning417techa/Music/master/images/lil%20dicky%20-%20freaky%20friday%20(lyrics)%20ft.%20chris%20brown.mp3');
Note: that’s the kind of file that this thing needs, it’s annoying at times.