How can you detect the pitch of a live audio input in the browser?
The below code will get you 1,024 frequency values. However I don't know how to go from this to actual pitches (e.g. A#).
const audioContext = new window.AudioContext();
const analyser = audioContext.createAnalyser();
navigator.getUserMedia(
{ audio: true },
stream => {
audioContext.createMediaStreamSource(stream).connect(analyser);
const dataArray = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteTimeDomainData(dataArray);
// Log the contents of the analyzer ever 500ms.
setInterval(() => {
console.log(dataArray.length);
}, 500);
},
err => console.log(err)
);
What you are currently accessing is the Time Domain Data, and can not be used to retrieve a note (which seems to be what you want).
What you'd want is the Frequency Domain, using AnalyserNode.get[XXX]FrequencyData, from which you could get which frequencies are louder or more quiet.
However, since most sound is made of harmonies, you can't retrieve what note was played from a microphone, add to this that we only have access to limited resolution, and not only will you be unable to retrieve a note from a microphone, but not even from a virtual oscillator either.
Below example has been made from this Q/A and examples from MDN;
const canvasCtx = canvas.getContext('2d');
const WIDTH = canvas.width = 500;
const HEIGHT = canvas.height = 150;
const audioCtx = new (window.AudioContext || window.webkitAudioContext);
const analyser = audioCtx.createAnalyser();
const nyquist = audioCtx.sampleRate / 2;
// highest precision
analyser.fftSize = 32768;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
const osc = audioCtx.createOscillator();
osc.frequency.value = 400;
osc.connect(analyser);
osc.connect(audioCtx.destination);
range.oninput = e => {
osc.frequency.value = range.value;
};
if(!audioCtx.state || audioCtx.state === 'running') {
begin();
}
else {
log.textContent = 'click anywhere to begin';
onclick = e => {
onclick = null;
begin()
}
}
function begin() {
osc.start(0);
draw();
}
function draw() {
requestAnimationFrame(draw);
// get the Frequency Domain
analyser.getByteFrequencyData(dataArray);
canvasCtx.fillStyle = 'rgb(0, 0, 0)';
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
const barWidth = (WIDTH / bufferLength) * 2.5;
let max_val = -Infinity;
let max_index = -1;
let x = 0;
for(let i = 0; i < bufferLength; i++) {
let barHeight = dataArray[i];
if(barHeight > max_val) {
max_val = barHeight;
max_index = i;
}
canvasCtx.fillStyle = 'rgb(' + (barHeight+100) + ',50,50)';
canvasCtx.fillRect(x,HEIGHT-barHeight/2,barWidth,barHeight/2);
x += barWidth;
}
log.textContent = `loudest freq: ${max_index * (nyquist / bufferLength)}
real value: ${range.value}`;
}
#log{display: inline-block; margin:0 12px}
#canvas{display: block;}
<input id="range" type="range" min="0" max="1000" value="400"><pre id="log"></pre>
<canvas id="canvas"></canvas>
Related
I'm making a small recording feature using the user/browser microphone. When the microphone is getting sound an audio visualization is shown (like an equalizer). So fare so good.
But i really want to change the way the visualization looks to something like the image below. But i have never worked with this area before and don't know how to go about it.
I imagine something like this:
https://images.app.goo.gl/pfKgnGnQz3MJVkbW6
I have two questions:
Is it possible to get a result like the attached?
How do you get started on something like that? (or has anyone done something like this that can share examples?)
My current code for the equlizer visualization
audioContext = new AudioContext();
gumStream = stream;
input = audioContext.createMediaStreamSource(stream);
rec = new Recorder(input,{numChannels:1})
rec.record()
inputPoint = audioContext.createGain();
audioInput = input;
audioInput.connect(inputPoint);
analyserNode = audioContext.createAnalyser();
analyserNode.fftSize = 1024;
inputPoint.connect( analyserNode );
updateAnalysers();
function updateAnalysers(time) {
if (!analyserContext) {
var canvas = document.getElementById("analyser");
canvasWidth = canvas.width;
canvasHeight = canvas.height;
analyserContext = canvas.getContext('2d');
}
{
var SPACING = 5;
var BAR_WIDTH = 5;
var numBars = Math.round(canvasWidth / SPACING);
var freqByteData = new Uint8Array(analyserNode.frequencyBinCount);
analyserNode.getByteFrequencyData(freqByteData);
analyserContext.clearRect(0, 0, canvasWidth, canvasHeight);
analyserContext.fillStyle = '#D5E9EB';
analyserContext.lineCap = 'round';
var multiplier = analyserNode.frequencyBinCount / numBars;
// Draw rectangle for each frequency bin.
for (var i = 0; i < numBars; ++i) {
var magnitude = 0;
var offset = Math.floor( i * multiplier );
for (var j = 0; j< multiplier; j++)
magnitude += freqByteData[offset + j];
magnitude = magnitude / multiplier;
var magnitude2 = freqByteData[i * multiplier];
analyserContext.fillRect(i * SPACING, canvasHeight, BAR_WIDTH, -magnitude);
}
}
rafID = window.requestAnimationFrame( updateAnalysers );
}
Ans 1 :
Your image is broken so can't answer but as far as I know, you can visualize any waveform using audio data
How do you get started on something like that? (or has anyone done something like this that can share examples?)
Ans 2:
So I did use the customized waveform. I am sharing my code
import React, { Component } from "react";
import AudioVisualiser from "./AudioVisualiser";
class AudioAnalyser extends Component {
constructor(props) {
super(props);
this.state = { audioData: new Uint8Array(0) };
this.tick = this.tick.bind(this);
}
componentDidMount() {
this.audioContext = new (window.AudioContext ||
window.webkitAudioContext)();
this.analyser = this.audioContext.createAnalyser();
this.dataArray = new Uint8Array(this.analyser.frequencyBinCount);
this.source = this.audioContext.createMediaStreamSource(this.props.audio);
this.source.connect(this.analyser);
this.rafId = requestAnimationFrame(this.tick);
}
tick() {
this.analyser.getByteTimeDomainData(this.dataArray);
this.setState({ audioData: this.dataArray });
this.rafId = requestAnimationFrame(this.tick);
}
componentWillUnmount() {
cancelAnimationFrame(this.rafId);
// this.analyser.disconnect();
// this.source.disconnect();
}
render() {
return <AudioVisualiser audioData={this.state.audioData} />;
}
}
export default AudioAnalyser;
import React, { Component } from 'react';
class AudioVisualiser extends Component {
constructor(props) {
super(props);
this.canvas = React.createRef();
}
componentDidUpdate() {
this.draw();
}
draw() {
const { audioData } = this.props;
const canvas = this.canvas.current;
const height = canvas.height;
const width = canvas.width;
const context = canvas.getContext('2d');
let x = 0;
const sliceWidth = (width * 1.0) / audioData.length;
context.lineWidth = 2;
context.strokeStyle = '#000000';
context.clearRect(0, 0, width, height);
context.beginPath();
context.moveTo(0, height / 2);
for (const item of audioData) {
const y = (item / 255.0) * height;
context.lineTo(x, y);
x += sliceWidth;
}
context.lineTo(x, height / 2);
context.stroke();
}
render() {
return <canvas width="300" height="300" ref={this.canvas} />;
}
}
export default AudioVisualiser;
I am trying to change the color of an SVG image using audio from the microphone.
I have got the logic written, the color is updated on the SVG element, but nothing changes visually.
window.onload = function () {
if (navigator.getUserMedia) {
navigator.getUserMedia(
{ audio: true },
(stream) => {
var context = new AudioContext();
var src = context.createMediaStreamSource(stream);
var analyser = context.createAnalyser();
var svg = document.querySelector("#rangoli");
src.connect(analyser);
analyser.connect(context.destination);
analyser.fftSize = 256;
var bufferLength = analyser.frequencyBinCount;
var dataArray = new Uint8Array(bufferLength);
function renderFrame() {
requestAnimationFrame(renderFrame);
analyser.getByteFrequencyData(dataArray);
for (var i = 0; i < bufferLength; i++) {
barHeight = dataArray[i] / 2;
var r = barHeight + 25 * (i / bufferLength);
var g = 250 * (i / bufferLength);
var b = 50;
svg.setAttribute("fill", `rgb(${r}, ${g}, ${b})`);
}
}
renderFrame();
},
() => console.log("Error")
);
}
};
Here is the SVG updating the fill value:
Here is a CodePen demo:
https://codepen.io/GR8z/pen/qBjopZG
I am working on an audio visualizer for the web that also lets the user "tune" the raw audio signal visualizer to a frequency. This is a feature of many hardware oscilloscopes. Basically, when a user centers on 440Hz and I have a 440Hz sine wave, the wave should stay still on the canvas and not move left or right. My plan was to move the graph to the left according to the frequency (440Hz = 1/440s to the left per second because the wave should repeat every 1/440s), but this does not work as it seems.
I could not find the units used by the Audio Analyzer Node's time domain data. I guess that it's in milliseconds, but I am not certain.
"use strict";
// Oscillator instead of mic for debugging
const USE_OSCILLATOR = true;
// Compatibility
if (!window.AudioContext)
window.AudioContext = window.webkitAudioContext;
if (!navigator.getUserMedia)
navigator.getUserMedia =
navigator.mozGetUserMedia ||
navigator.webkitGetUserMedia ||
navigator.msGetUserMedia;
// Main
class App {
constructor(visualizerElement, optionsElement) {
this.visualizerElement = visualizerElement;
this.optionsElement = optionsElement;
// HTML elements
this.canvas = document.createElement("canvas");
// Context
this.context = new AudioContext({
// Low latency
latencyHint: "interactive",
});
this.canvasCtx = this.canvas.getContext("2d", {
// Low latency
desynchronized: true,
alpha: false,
});
// Audio nodes
this.audioAnalyser = this.context.createAnalyser();
this.audioBuffer = new Uint8Array(this.audioAnalyser.frequencyBinCount);
this.audioInputStream = null;
this.audioInputNode = null;
if (this.canvasCtx === null)
throw new Error("2D rendering Context not supported by browser.");
this.updateCanvasSize();
window.addEventListener("resize", () => this.updateCanvasSize());
this.drawVisualizer();
this.visualizerElement.appendChild(this.canvas);
if (USE_OSCILLATOR) {
let oscillator = this.context.createOscillator();
oscillator.type = "sine";
oscillator.frequency.setValueAtTime(440, this.context.currentTime);
oscillator.connect(this.audioAnalyser);
oscillator.start();
}
else {
navigator.getUserMedia({ audio: true }, (stream) => {
this.audioInputStream = stream;
this.audioInputNode = this.context.createMediaStreamSource(stream);
this.audioInputNode.channelCountMode = "explicit";
this.audioInputNode.channelCount = 1;
this.audioBuffer = new Uint8Array(this.audioAnalyser.frequencyBinCount);
this.audioInputNode.connect(this.audioAnalyser);
}, (err) => console.error(err));
}
}
updateCanvasSize() {
var _a;
this.canvas.width = window.innerWidth;
this.canvas.height = window.innerHeight;
(_a = this.canvasCtx) === null || _a === void 0 ? void 0 : _a.setTransform(1, 0, 0, -1, 0, this.canvas.height * 0.5);
}
drawVisualizer() {
if (this.canvasCtx === null)
return;
const ctx = this.canvasCtx;
ctx.fillStyle = "black";
ctx.fillRect(0, -0.5 * this.canvas.height, this.canvas.width, this.canvas.height);
// Draw FFT
this.audioAnalyser.getByteFrequencyData(this.audioBuffer);
const step = this.canvas.width / this.audioBuffer.length;
const scale = this.canvas.height / (2 * 255);
ctx.beginPath();
ctx.moveTo(-step, this.audioBuffer[0] * scale);
this.audioBuffer.forEach((sample, index) => {
ctx.lineTo(index * step, scale * sample);
});
ctx.strokeStyle = "white";
ctx.stroke();
// Get the highest dominant frequency
let highestFreqHalfHz = 0;
{
/**
* Highest frequency in 0.5Hz
*/
let highestFreq = NaN;
let highestFreqAmp = NaN;
let remSteps = NaN;
for (let i = this.audioBuffer.length - 1; i >= 0; i--) {
const sample = this.audioBuffer[i];
if (sample > 20 && (isNaN(highestFreqAmp) || sample > highestFreqAmp)) {
highestFreq = i;
highestFreqAmp = sample;
if (isNaN(remSteps))
remSteps = 500;
}
if (!isNaN(remSteps)) {
if (remSteps-- < 0)
break;
}
}
if (!isNaN(highestFreq)) {
ctx.beginPath();
ctx.moveTo(highestFreq * step, 0);
ctx.lineTo(highestFreq * step, scale * 255);
ctx.strokeStyle = "green";
ctx.stroke();
highestFreqHalfHz = highestFreq;
}
}
// Draw Audio
this.audioAnalyser.getByteTimeDomainData(this.audioBuffer);
{
const bufferSize = this.audioBuffer.length;
const offsetY = -this.canvas.height * 0.5;
// I don't know what I am doing here:
const offsetX = highestFreqHalfHz == 0
? 0
: bufferSize -
Math.round(((this.context.currentTime * 1000) % (1 / 440)) % bufferSize);
// Draw the audio graph with the given offset
ctx.beginPath();
ctx.moveTo(-step, this.audioBuffer[0] * scale + offsetY);
for (let i = 0; i < bufferSize; i++) {
const index = (offsetX + i) % bufferSize;
const sample = this.audioBuffer[index];
ctx.lineTo(i * step, scale * sample + offsetY);
}
ctx.strokeStyle = "white";
ctx.stroke();
}
}
}
window.addEventListener("load", () => {
const app = new App(document.getElementById("visualizer"), document.getElementById("options"));
requestAnimationFrame(draw);
function draw() {
requestAnimationFrame(draw);
app.drawVisualizer();
}
});
html {
background: black;
}
body {
width: 100vw;
height: 100vh;
margin: 0;
overflow: hidden;
}
#visualizer {
position: fixed;
inset: 0;
}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Equalizer</title>
</head>
<body>
<div id="visualizer"></div>
<div id="options"></div>
</body>
</html>
The above snippet was generated from TypeScript. You can find the source here. If it worked as intended, the oscillating graph (bottom) would not be moving.
I was able to solve this problem thanks to Raymond Toy's comment and my maths teacher (thank you Mr. Klein). The solution was Math.round((this.context.currentTime % iv) * sampleRate) where iv is the interval of the frequency (1/Hz). The wave is not perfectly centered. The FFT approximation is not very accurate though. In the following example I forced the detected frequency to be the specified one.
"use strict";
// Oscillator instead of mic for debugging
const USE_OSCILLATOR = true;
const OSCILLATOR_HZ = 1000;
// Compatibility
if (!window.AudioContext)
window.AudioContext = window.webkitAudioContext;
if (!navigator.getUserMedia)
navigator.getUserMedia =
navigator.mozGetUserMedia ||
navigator.webkitGetUserMedia ||
navigator.msGetUserMedia;
// Main
class App {
constructor(visualizerElement, optionsElement) {
this.visualizerElement = visualizerElement;
this.optionsElement = optionsElement;
// HTML elements
this.canvas = document.createElement("canvas");
// Context
this.context = new AudioContext({
// Low latency
latencyHint: "interactive",
});
this.canvasCtx = this.canvas.getContext("2d", {
// Low latency
desynchronized: true,
alpha: false,
});
// Audio nodes
this.audioAnalyser = this.context.createAnalyser();
this.audioBuffer = new Uint8Array(0);
this.audioInputStream = null;
this.audioInputNode = null;
if (this.canvasCtx === null)
throw new Error("2D rendering Context not supported by browser.");
this.updateCanvasSize();
window.addEventListener("resize", () => this.updateCanvasSize());
this.drawVisualizer();
this.visualizerElement.appendChild(this.canvas);
this.audioAnalyser.fftSize = 2048;
this.audioAnalyser.maxDecibels = -10;
this.audioBuffer = new Uint8Array(this.audioAnalyser.frequencyBinCount * 2);
this.audioFilter = this.context.createBiquadFilter();
this.audioFilter.type = "bandpass";
this.audioFilter.frequency.value = 900;
this.audioFilter.Q.value = 20;
this.audioAmplifier = this.context.createGain();
this.audioAmplifier.gain.value = 5;
this.audioFilter.connect(this.audioAmplifier);
this.audioAmplifier.connect(this.audioAnalyser);
if (USE_OSCILLATOR) {
let oscillator = this.context.createOscillator();
oscillator.type = "sine";
oscillator.frequency.setValueAtTime(OSCILLATOR_HZ, this.context.currentTime);
oscillator.connect(this.audioFilter);
oscillator.start();
}
else {
navigator.getUserMedia({ audio: true }, (stream) => {
this.audioInputStream = stream;
this.audioInputNode = this.context.createMediaStreamSource(stream);
this.audioInputNode.channelCountMode = "explicit";
this.audioInputNode.channelCount = 1;
this.audioBuffer = new Uint8Array(this.audioAnalyser.frequencyBinCount);
this.audioInputNode.connect(this.audioFilter);
}, (err) => console.error(err));
}
}
updateCanvasSize() {
var _a;
this.canvas.width = window.innerWidth;
this.canvas.height = window.innerHeight;
(_a = this.canvasCtx) === null || _a === void 0 ? void 0 : _a.setTransform(1, 0, 0, -1, 0, this.canvas.height * 0.5);
}
drawVisualizer() {
if (this.canvasCtx === null)
return;
const ctx = this.canvasCtx;
ctx.globalAlpha = 0.5;
ctx.fillStyle = "black";
ctx.fillRect(0, -0.5 * this.canvas.height, this.canvas.width, this.canvas.height);
ctx.globalAlpha = 1;
// Draw FFT
this.audioAnalyser.getByteFrequencyData(this.audioBuffer);
const scale = this.canvas.height / (2 * 255);
const { frequencyBinCount } = this.audioAnalyser;
const { sampleRate } = this.context;
{
const step = this.canvas.width / frequencyBinCount;
ctx.beginPath();
ctx.moveTo(-step, this.audioBuffer[0] * scale);
for (let index = 0; index < frequencyBinCount; index++) {
ctx.lineTo(index * step, scale * this.audioBuffer[index]);
}
ctx.strokeStyle = "white";
ctx.stroke();
}
// Get the highest dominant frequency
const step = this.canvas.width / frequencyBinCount;
let highestFreqHz = 0;
{
/**
* Highest frequency index in the buffer
*/
let highestFreqIndex = NaN;
let highestFreqAmp = NaN;
let remSteps = NaN;
for (let i = frequencyBinCount - 1; i >= 0; i--) {
const sample = this.audioBuffer[i];
if (sample > 30) {
if (isNaN(highestFreqAmp)) {
highestFreqIndex = i;
highestFreqAmp = sample;
}
else {
if (sample > highestFreqAmp) {
highestFreqIndex = i;
highestFreqAmp = sample;
}
}
//if (isNaN(remSteps)) remSteps = 100;
}
if (!isNaN(remSteps)) {
if (remSteps-- < 0)
break;
}
}
if (!isNaN(highestFreqIndex)) {
// Force exact value: (not necessary)
highestFreqIndex =
(OSCILLATOR_HZ * (2 * frequencyBinCount)) / sampleRate;
ctx.beginPath();
ctx.moveTo(highestFreqIndex * step, 0);
ctx.lineTo(highestFreqIndex * step, scale * 255);
ctx.strokeStyle = "green";
ctx.stroke();
highestFreqHz =
(highestFreqIndex * sampleRate) / (2 * frequencyBinCount);
window.HZ = highestFreqHz;
}
}
// Draw Audio
this.audioAnalyser.getByteTimeDomainData(this.audioBuffer);
{
const iv = highestFreqHz == 0 ? 0 : 1 / highestFreqHz;
const bufferSize = this.audioBuffer.length;
const offsetY = -this.canvas.height / 2.4;
const startIndex = Math.round(iv * sampleRate);
const step = this.canvas.width / (this.audioBuffer.length - startIndex);
const scale = this.canvas.height / (3 * 255);
const offsetX = highestFreqHz == 0
? 0
: Math.round((this.context.currentTime % iv) * sampleRate) %
bufferSize;
// Draw the audio graph with the given offset
ctx.beginPath();
ctx.moveTo(-step, this.audioBuffer[startIndex - offsetX] * scale + offsetY);
for (let i = startIndex; i < bufferSize; i += 4) {
const index = (i - offsetX) % bufferSize;
const sample = this.audioBuffer[index];
ctx.lineTo((i - startIndex) * step, scale * sample + offsetY);
}
ctx.strokeStyle = "white";
ctx.stroke();
}
}
}
window.addEventListener("load", () => {
const app = new App(document.getElementById("visualizer"), document.getElementById("options"));
requestAnimationFrame(draw);
function draw() {
requestAnimationFrame(draw);
app.drawVisualizer();
}
});
html {
background: black;
}
body {
width: 100vw;
height: 100vh;
margin: 0;
overflow: hidden;
}
#visualizer {
position: fixed;
inset: 0;
}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Equalizer</title>
</head>
<body>
<div id="visualizer"></div>
<div id="options"></div>
</body>
</html>
I am trying to make a React app for recording voice samples and save them as wav files, I used recorder-js to make the record but I am still struggling to make the saving part as wav file, and I got the exporWAV is not a function everytime.
If you've been there please help
Sample code is here
//create
var record_recorder;
record_recorder = new RecorderV2(input,{sampleRate:44100, numChannels:2});
//start
record_recorder.record();
//stop
record_recorder.stop();
//export
record_recorder.exportWAV(function(blob) {
var url = URL.createObjectURL(blob);
var li = document.createElement('div');
var au = document.createElement('audio');
var hf = document.createElement('a');
var br = document.createElement('br');
record_result_blob = blob;
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'record.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(br);
li.appendChild(hf);
if (result)
result(li);
});
See the example website:
https://voice-recorder-online.com
code:
https://voice-recorder-online.com/js/index_editor.js
Here's How u can make waves depending on your voice frequency and record and save files in wav format.
(async() => {
let leftchannel = [];
let rightchannel = [];
let recorder = null;
let recording = false;
let recordingLength = 0;
let volume = null;
let audioInput = null;
let sampleRate = null;
let AudioContext = window.AudioContext || window.webkitAudioContext;
let context = null;
let analyser = null;
let canvas = document.querySelector("canvas");
let canvasCtx = canvas.getContext("2d");
let visualSelect = document.querySelector("#visSelect");
let micSelect = document.querySelector("#micSelect");
let stream = null;
let tested = false;
try {
window.stream = stream = await getStream();
console.log("Got stream");
} catch (err) {
alert("Issue getting mic", err);
}
const deviceInfos = await navigator.mediaDevices.enumerateDevices();
var mics = [];
for (let i = 0; i !== deviceInfos.length; ++i) {
let deviceInfo = deviceInfos[i];
if (deviceInfo.kind === "audioinput") {
mics.push(deviceInfo);
let label = deviceInfo.label || "Microphone " + mics.length;
console.log("Mic ", label + " " + deviceInfo.deviceId);
const option = document.createElement("option");
option.value = deviceInfo.deviceId;
option.text = label;
micSelect.appendChild(option);
}
}
function getStream(constraints) {
if (!constraints) {
constraints = {
audio: true,
video: false
};
}
return navigator.mediaDevices.getUserMedia(constraints);
}
setUpRecording();
function setUpRecording() {
context = new AudioContext();
sampleRate = context.sampleRate;
// creates a gain node
volume = context.createGain();
// creates an audio node from teh microphone incoming stream
audioInput = context.createMediaStreamSource(stream);
// Create analyser
analyser = context.createAnalyser();
// connect audio input to the analyser
audioInput.connect(analyser);
// connect analyser to the volume control
// analyser.connect(volume);
let bufferSize = 2048;
let recorder = context.createScriptProcessor(bufferSize, 2, 2);
// we connect the volume control to the processor
// volume.connect(recorder);
analyser.connect(recorder);
// finally connect the processor to the output
recorder.connect(context.destination);
recorder.onaudioprocess = function(e) {
// Check
if (!recording) return;
// Do something with the data, i.e Convert this to WAV
console.log("recording");
let left = e.inputBuffer.getChannelData(0);
let right = e.inputBuffer.getChannelData(1);
if (!tested) {
tested = true;
// if this reduces to 0 we are not getting any sound
if (!left.reduce((a, b) => a + b)) {
alert("There seems to be an issue with your Mic");
// clean up;
stop();
stream.getTracks().forEach(function(track) {
track.stop();
});
context.close();
}
}
// we clone the samples
leftchannel.push(new Float32Array(left));
rightchannel.push(new Float32Array(right));
recordingLength += bufferSize;
};
visualize();
}
function mergeBuffers(channelBuffer, recordingLength) {
let result = new Float32Array(recordingLength);
let offset = 0;
let lng = channelBuffer.length;
for (let i = 0; i < lng; i++) {
let buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel) {
let length = leftChannel.length + rightChannel.length;
let result = new Float32Array(length);
let inputIndex = 0;
for (let index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string) {
let lng = string.length;
for (let i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function start() {
recording = true;
document.querySelector("#msg").style.visibility = "visible";
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
console.log("context: ", !!context);
if (!context) setUpRecording();
}
function stop() {
console.log("Stop");
recording = false;
document.querySelector("#msg").style.visibility = "hidden";
// we flat the left and right channels down
let leftBuffer = mergeBuffers(leftchannel, recordingLength);
let rightBuffer = mergeBuffers(rightchannel, recordingLength);
// we interleave both channels together
let interleaved = interleave(leftBuffer, rightBuffer);
///////////// WAV Encode /////////////////
// from http://typedarray.org/from-microphone-to-wav-with-getusermedia-and-web-audio/
//
// we create our wav file
let buffer = new ArrayBuffer(44 + interleaved.length * 2);
let view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, "RIFF");
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, "WAVE");
// FMT sub-chunk
writeUTFBytes(view, 12, "fmt ");
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, "data");
view.setUint32(40, interleaved.length * 2, true);
// write the PCM samples
let lng = interleaved.length;
let index = 44;
let volume = 1;
for (let i = 0; i < lng; i++) {
view.setInt16(index, interleaved[i] * (0x7fff * volume), true);
index += 2;
}
// our final binary blob
const blob = new Blob([view], {
type: "audio/wav"
});
const audioUrl = URL.createObjectURL(blob);
console.log("BLOB ", blob);
console.log("URL ", audioUrl);
document.querySelector("#audio").setAttribute("src", audioUrl);
const link = document.querySelector("#download");
link.setAttribute("href", audioUrl);
link.download = "output.wav";
}
// Visualizer function from
// https://webaudiodemos.appspot.com/AudioRecorder/index.html
//
function visualize() {
WIDTH = canvas.width;
HEIGHT = canvas.height;
CENTERX = canvas.width / 2;
CENTERY = canvas.height / 2;
let visualSetting = visualSelect.value;
console.log(visualSetting);
if (!analyser) return;
if (visualSetting === "sinewave") {
analyser.fftSize = 2048;
var bufferLength = analyser.fftSize;
console.log(bufferLength);
var dataArray = new Uint8Array(bufferLength);
canvasCtx.clearRect(0, 0, WIDTH, HEIGHT);
var draw = function() {
drawVisual = requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
canvasCtx.fillStyle = "rgb(200, 200, 200)";
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "rgb(0, 0, 0)";
canvasCtx.beginPath();
var sliceWidth = (WIDTH * 1.0) / bufferLength;
var x = 0;
for (var i = 0; i < bufferLength; i++) {
var v = dataArray[i] / 128.0;
var y = (v * HEIGHT) / 2;
if (i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
};
draw();
} else if (visualSetting == "frequencybars") {
analyser.fftSize = 64;
var bufferLengthAlt = analyser.frequencyBinCount;
console.log(bufferLengthAlt);
var dataArrayAlt = new Uint8Array(bufferLengthAlt);
canvasCtx.clearRect(0, 0, WIDTH, HEIGHT);
var drawAlt = function() {
drawVisual = requestAnimationFrame(drawAlt);
analyser.getByteFrequencyData(dataArrayAlt);
canvasCtx.fillStyle = "rgb(0, 0, 0)";
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
var barWidth = WIDTH / bufferLengthAlt;
var barHeight;
var x = 0;
for (var i = 0; i < bufferLengthAlt; i++) {
barHeight = dataArrayAlt[i];
canvasCtx.fillStyle = "rgb(" + (barHeight + 100) + ",50,50)";
canvasCtx.fillRect(
x,
HEIGHT - barHeight / 2,
barWidth,
barHeight / 2
);
x += barWidth + 1;
}
};
drawAlt();
} else if (visualSetting == "circle") {
analyser.fftSize = 32;
let bufferLength = analyser.frequencyBinCount;
console.log(bufferLength);
let dataArray = new Uint8Array(bufferLength);
canvasCtx.clearRect(0, 0, WIDTH, HEIGHT);
let draw = () => {
drawVisual = requestAnimationFrame(draw);
analyser.getByteFrequencyData(dataArray);
canvasCtx.fillStyle = "rgb(0, 0, 0)";
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
// let radius = dataArray.reduce((a,b) => a + b) / bufferLength;
let radius = dataArray[2] / 2;
if (radius < 20) radius = 20;
if (radius > 100) radius = 100;
// console.log('Radius ', radius)
canvasCtx.beginPath();
canvasCtx.arc(CENTERX, CENTERY, radius, 0, 2 * Math.PI, false);
// canvasCtx.fillStyle = 'rgb(50,50,' + (radius+100) +')';
// canvasCtx.fill();
canvasCtx.lineWidth = 6;
canvasCtx.strokeStyle = "rgb(50,50," + (radius + 100) + ")";
canvasCtx.stroke();
};
draw();
}
}
#msg {
visibility: hidden;
color: red;
font-weight: bold;
font-size: 22px;
font-family: Verdana;
}
button {
padding: 5px 10px;
border: 1px solid grey;
font-size: 18px;
background: white;
}
.audio-controls {
display: flex;
align-items: center;
padding-top: 20px;
justify-content: center;
}
.audio-controls button {
margin: 0px 5px;
}
canvas {
margin-top: 10px;
background-color: black;
}
select {
height: 25px;
margin: 0px 5px;
}
a {
margin-left: 20px;
}
.app {
text-align: center;
padding-top: 20px;
}
<div class="app">
<select name="" id="micSelect"></select>
<select id="visSelect">
<option value="frequencybars">Bar</option>
<option value="sinewave">Wave</option>
<option value="circle">Circle</option>
</select>
<a id="download">Download</a>
<div class="audio-controls">
<button id="record">Record</button>
<button id="stop">Stop</button>
<audio id="audio" controls></audio>
</div>
<div id="msg">Recording...</div>
<canvas width="500" height="300"></canvas>
<div>
Live Demo: https://codepen.io/furki911/pen/jOYpvMx
I'm trying to do some audio visualisation on the Chromecast receiver using Web Audio API.
Unfortunately, the following code, that works well on Chrome, always returns an array of zeros for getByteFrequencyData on the Chromecast.
$(function () {
var context = new webkitAudioContext();
var analyser = context.createAnalyser();
analyser.fftSize = 64;
analyser.minDecibels = -100;
analyser.maxDecibels = -30;
analyser.smoothingTimeConstant = 0.9;
var frequencyData = new Uint8Array(analyser.frequencyBinCount);
var visualisation = $("#visualisation");
var barSpacingPercent = 100 / analyser.frequencyBinCount;
for (var i = 0; i < analyser.frequencyBinCount; i++) {
$("<div/>").css("left", i * barSpacingPercent + "%")
.appendTo(visualisation);
}
var bars = $("#visualisation > div");
function update() {
requestAnimationFrame(update);
analyser.getByteFrequencyData(frequencyData);
bars.each(function (index, bar) {
bar.style.height = frequencyData[index] + 'px';
});
};
$('audio').bind('canplay', function() {
var source = context.createMediaElementSource(this);
source.connect(analyser);
analyser.connect(context.destination);
update();
});
});
Am I missing something or is this particular feature of Web Audio not supported on Chromecast?