I want a tone from an oscillator to play under an audio clip of a song. How do I make the tones loudness match the loudness of the sample? I want the tone to be quieter when the song is quieter, and the tone to be louder at the louder parts. I have used the createAnalyzer to detect the volume of the sample, but the code does not work.
const audioContext = new AudioContext();
const audiodiv = document.querySelector('.audiodiv')
const buffer = audioContext.createBuffer(
1,
audioContext.sampleRate * 1,
audioContext.sampleRate
);
const channelData = buffer.getChannelData(0)
for (let i = 0; i < buffer.length; i++){
channelData[i] = Math.random() * 2 - 1;
}
const primaryGainControl = audioContext.createGain()
primaryGainControl.gain.setValueAtTime(0.005, 0);
primaryGainControl.connect(audioContext.destination);
const samplebutton = document.createElement('button')
samplebutton.innerText = 'sample'
samplebutton.addEventListener('click', async () => {
let volume = 0
const response = await fetch('testsong.wav')
const soundBuffer = await response.arrayBuffer()
const sampleBuffer = await audioContext.decodeAudioData(soundBuffer)
const analyzer = audioContext.createAnalyser()
volume = analyzer.frequencyBinCount
const sampleSource = audioContext.createBufferSource()
sampleSource.buffer = sampleBuffer
sampleSource.playbackRate.setValueAtTime(1, 0)
sampleSource.connect(primaryGainControl)
sampleSource.start()
})
audiodiv.appendChild(samplebutton)
const toneButton = document.createElement('button')
toneButton.innerText = "tone"
toneButton.addEventListener("click", () => {
const toneOscillator = audioContext.createOscillator();
toneOscillator.frequency.setValueAtTime(150, 0)
toneOscillator.type = "sine"
toneOscillator.connect(oscGain);
toneOscillator.start()
toneOscillator.stop(audioContext.currentTime + 5)
const oscGain = audioContext.createGain()
oscGain.gain.value = volume
oscGain.connect(primaryGainControl)
})
audiodiv.appendChild(toneButton)
Related
I'm using mediasoup and webrtc to create media streams.
async consume(transport) {
const { rtpCapabilities } = this.device;
const data = await this.socket.request('consume', { rtpCapabilities });
const {
producerId,
id,
kind,
rtpParameters,
} = data;
//Get playerID
let playerID = await this.socket.request('getPlayerID', id);
playerID = parseInt(playerID, 10);
let producerIDS = await this.socket.request('getProducerIDS', id);
let consumerIDS = await this.socket.request('getConsumerIDS', id);
console.log("producerIDS", producerIDS);
console.log("consumerIDS", consumerIDS);
let codecOptions = {};
const consumer = await transport.consume({
id: id,
producerId: producerId,
kind: kind,
rtpParameters: rtpParameters,
codecOptions: codecOptions,
});
console.log("First producer ", producerIDS[0]);
console.log("Current producer ", producerId);
console.log("Consumer ID", id)
console.log("Player ID:", playerID);
console.log("Consumer1", consumer, consumer.track);
//stream.addTrack(consumer.track);
let audioTracks = [];
audioTracks.push(consumer.track);
if (playerID === 2 && producerIDS.length === 2) {
const data1 = await this.socket.request('consume2', { rtpCapabilities });
console.log("Data: ", data);
console.log("producerId: ", data.producerId);
const consumer2 = await transport.consume({
id: data1.id,
producerId: data1.producerId,
kind: data1.kind,
rtpParameters: data1.rtpParameters,
codecOptions: codecOptions,
});
console.log("Second producer", data1.producerId);
console.log("Consumer2", consumer2, consumer2.track);
audioTracks.push(consumer2.track);
//stream.addTrack(consumer2.track);
}
console.log("Audio Tracks: ", audioTracks);
//const sources = audioTracks.map(t => ac.createMediaStreamSource(new MediaStream([t])));
//var dest = ac.createMediaStreamDestination();
//var aud1 = ac.createMediaStreamSource(s1);
//aud1.connect(dest);
//sources.forEach(s => s.connect(dest));
//console.log(dest.stream.getAudioTracks());
//stream.addTrack(dest.stream.getAudioTracks()[0]);
//stream = dest.stream;
/*
let stream = new MediaStream();
if (audioTracks.length <= 1) {
stream.addTrack(audioTracks[0]);
}
else {
stream.addTrack(audioTracks[1]);
}
*/
let stream = new MediaStream();
if (audioTracks.length <= 1) {
stream.addTrack(audioTracks[0]);
}
else {
const ac = new AudioContext();
const dest = ac.createMediaStreamDestination();
let aud1stream = new MediaStream();
aud1stream.addTrack(audioTracks[0]);
let aud2stream = new MediaStream();
aud2stream.addTrack(audioTracks[1]);
const aud1 = ac.createMediaStreamSource(aud1stream);
const aud2 = ac.createMediaStreamSource(aud2stream);
var gain = ac.createGain();
gain.gain.value = 10;
gain.connect(dest);
aud1.connect(dest);
aud2.connect(dest);
stream = dest.stream;
}
console.log("Stream tracks: ", stream.getAudioTracks());
return stream;
}
I'm attempting to mix two streams together here.
else {
const ac = new AudioContext();
const dest = ac.createMediaStreamDestination();
let aud1stream = new MediaStream();
aud1stream.addTrack(audioTracks[0]);
let aud2stream = new MediaStream();
aud2stream.addTrack(audioTracks[1]);
const aud1 = ac.createMediaStreamSource(aud1stream);
const aud2 = ac.createMediaStreamSource(aud2stream);
var gain = ac.createGain();
gain.gain.value = 10;
gain.connect(dest);
aud1.connect(dest);
aud2.connect(dest);
stream = dest.stream;
}
However the stream indicates that it is streaming but no audio can be heard. I'd appreciate any help all I want is to mix the two streams. If AudioContext doesn't work does anyone have advice on using Gstreamer to mix the audio or something else? Thanks.
I have input in my Vue component where i upload audiofile via #change, next i need to cut it, for example i have 30 seconds audio and i need to receive audio from 5 to 15 second.
I have installed lamejs package to do that.
But after all my operations with audio i receive cutted audio but without any sound, so i dont know where is the reason of that. Need help!
method which upload file
async onUploadFile(event) {
const fileData = event.target.files[0];
this.file = fileData;
await this.decodeFile(fileData);
},
method which decodes file to audioBuffer
async onUploadFile(event) {
const fileData = event.target.files[0];
this.file = fileData;
await this.decodeFile(fileData);
},
method which cut audio and encode it to mp3 and then receive blob url
async audioBufferSlice(buffer, begin, end) {
const audioContext = new AudioContext();
const channels = buffer.numberOfChannels;
const rate = buffer.sampleRate;
const duration = buffer.duration;
const startOffset = rate * begin;
const endOffset = rate * end;
const frameCount = endOffset - startOffset;
const audioLength = endOffset - startOffset;
let trimmedAudio = audioContext.createBuffer(
buffer.numberOfChannels,
audioLength,
rate
);
for(var i = 0; i < buffer.numberOfChannels; i++){
trimmedAudio.copyToChannel(buffer.getChannelData(i).slice(begin, end), i);
}
var audioData = this.serializeAudioBuffer(trimmedAudio);
let mp3Data = [];
const sampleBlockSize = 1152;
let mp3encoder = new lamejs.Mp3Encoder(2, audioData.sampleRate, 128);
var left = new Int8Array(audioData.channels[0].length);
var right = new Int8Array(audioData.channels[1].length);
for (var i = 0; i < audioData.channels[0].length; i += sampleBlockSize) {
var leftChunk = left.subarray(i, i + sampleBlockSize);
var rightChunk = right.subarray(i, i + sampleBlockSize);
var mp3buf = await mp3encoder.encodeBuffer(leftChunk, rightChunk);
if (mp3buf.length > 0) {
mp3Data.push(mp3buf);
}
}
let buf = await mp3encoder.flush();
if (buf.length > 0) {
mp3Data.push(buf);
}
var blob = new Blob(mp3Data, {type: 'audio/mp3'});
var url = window.URL.createObjectURL(blob);
console.log('MP3 URl: ', url);
},
What did I do wrong that I receive cut audio but without any sound?
I look at that repository as example https://github.com/Vinit-Dantkale/AudioFy
i want to find biggests changerate in the prices in my coins data
like if SDT oldprice is 1.00 and
new price is 1.02 = 2%
and if it was the biggests changerate in the coins my script should printing it
but the script dont working
it only keep give me same coin
const math = require('mathjs');
const fetch = require('node-fetch');
get()
async function get() {
const response = await fetch("https://trade.kucoin.com/_api/currency/prices?base=USD&targets=&lang=en_US");
const coin1 = await response.json();
const olddata = coin1.data
const tokens = Object.keys(olddata)
const oldprice = Object.values(olddata)
get1()
async function get1() {
const response = await fetch("https://trade.kucoin.com/_api/currency/prices?base=USD&targets=&lang=en_US");
const coin2 = await response.json();
const newdata = coin2.data
const tokens = Object.keys(newdata)
const newprice = Object.values(newdata)
function findLargestDifference() {
var large = null;
var index = 0;
for (var i = 0; i < oldprice.length; i++) {
var change = tokens[i].newprice / oldprice[i].oldprice;
if (change > large) {
large = change;
index = i;
}
}
console.log(tokens[index])
return tokens[index];
}
findLargestDifference()
}
}
here how data looks https://prnt.sc/19syjjg
I have a web application of my own, which is based on the peerjs library (It is a video conference).
I'm trying to make a recording with 'MediaRecorder', but I'm facing a very unpleasant case.
The code for capturing my desktop stream is the following:
let chooseScreen = document.querySelector('.chooseScreenBtn')
chooseScreen.onclick = async () => {
let desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: true });
}
I then successfully apply my received desktopStream to videoElement in DOM:
const videoElement = doc.querySelector('.videoElement')
videoElement.srcObject = desktopStream
videoElement.muted = false;
videoElement.onloadedmetadata = ()=>{videoElement.play();}
For example, I get desktopStream on the page with an active conference where everyone hears and sees each other.
To check the video and audio in desktopStream I play some video on the video player on the desktop.
I can hear any audio from my desktop but audio from any participant cannot be heard.
Of course, when I put the desktopStream in MediaRecorder I get a video file with no sound from anyone except my desktop. Any ideas on how to solve it?
Chrome's MediaRecorder API can only output one track.
The createMediaStreamSource can take streams from desktop audio and microphone, by connecting both together into one object created by createMediaStreamDestination it gives you the ability to pipe this one stream into the MediaRecorder API.
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
// Create a couple of sources
const source1 = context.createMediaStreamSource(desktopStream);
const source2 = context.createMediaStreamSource(voiceStream);
const destination = context.createMediaStreamDestination();
const desktopGain = context.createGain();
const voiceGain = context.createGain();
desktopGain.gain.value = 0.7;
voiceGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
// Connect source2
source2.connect(voiceGain).connect(destination);
return destination.stream.getAudioTracks();
};
It is also possible to use two or more audio inputs + video input.
window.onload = () => {
const warningEl = document.getElementById('warning');
const videoElement = document.getElementById('videoElement');
const captureBtn = document.getElementById('captureBtn');
const startBtn = document.getElementById('startBtn');
const stopBtn = document.getElementById('stopBtn');
const download = document.getElementById('download');
const audioToggle = document.getElementById('audioToggle');
const micAudioToggle = document.getElementById('micAudioToggle');
if('getDisplayMedia' in navigator.mediaDevices) warningEl.style.display = 'none';
let blobs;
let blob;
let rec;
let stream;
let voiceStream;
let desktopStream;
const mergeAudioStreams = (desktopStream, voiceStream) => {
const context = new AudioContext();
const destination = context.createMediaStreamDestination();
let hasDesktop = false;
let hasVoice = false;
if (desktopStream && desktopStream.getAudioTracks().length > 0) {
// If you don't want to share Audio from the desktop it should still work with just the voice.
const source1 = context.createMediaStreamSource(desktopStream);
const desktopGain = context.createGain();
desktopGain.gain.value = 0.7;
source1.connect(desktopGain).connect(destination);
hasDesktop = true;
}
if (voiceStream && voiceStream.getAudioTracks().length > 0) {
const source2 = context.createMediaStreamSource(voiceStream);
const voiceGain = context.createGain();
voiceGain.gain.value = 0.7;
source2.connect(voiceGain).connect(destination);
hasVoice = true;
}
return (hasDesktop || hasVoice) ? destination.stream.getAudioTracks() : [];
};
captureBtn.onclick = async () => {
download.style.display = 'none';
const audio = audioToggle.checked || false;
const mic = micAudioToggle.checked || false;
desktopStream = await navigator.mediaDevices.getDisplayMedia({ video:true, audio: audio });
if (mic === true) {
voiceStream = await navigator.mediaDevices.getUserMedia({ video: false, audio: mic });
}
const tracks = [
...desktopStream.getVideoTracks(),
...mergeAudioStreams(desktopStream, voiceStream)
];
console.log('Tracks to add to stream', tracks);
stream = new MediaStream(tracks);
console.log('Stream', stream)
videoElement.srcObject = stream;
videoElement.muted = true;
blobs = [];
rec = new MediaRecorder(stream, {mimeType: 'video/webm; codecs=vp8,opus'});
rec.ondataavailable = (e) => blobs.push(e.data);
rec.onstop = async () => {
blob = new Blob(blobs, {type: 'video/webm'});
let url = window.URL.createObjectURL(blob);
download.href = url;
download.download = 'test.webm';
download.style.display = 'block';
};
startBtn.disabled = false;
captureBtn.disabled = true;
audioToggle.disabled = true;
micAudioToggle.disabled = true;
};
startBtn.onclick = () => {
startBtn.disabled = true;
stopBtn.disabled = false;
rec.start();
};
stopBtn.onclick = () => {
captureBtn.disabled = false;
audioToggle.disabled = false;
micAudioToggle.disabled = false;
startBtn.disabled = true;
stopBtn.disabled = true;
rec.stop();
stream.getTracks().forEach(s=>s.stop())
videoElement.srcObject = null
stream = null;
};
};
Audio capture with getDisplayMedia is only fully supported with Chrome for Windows. Other platforms have a number of limitations:
there is no support for audio capture at all under Firefox or Safari;
on Chrome/Chromium for Linux and Mac OS, only the audio of a Chrome/Chromium tab can be captured, not the audio of a non-browser application window.
I Am Trying To Do Some GreyScale Effect In Pictures But When I Do The Command, It's Show me the error Above...
Any Fix?
The Code :
const jimp = require('jimp')
const {MessageAttachment} = require('discord.js')
module.exports = {
name:'grey',
run:async(client, message, args)=>{
if (message.attachments.size > 0) {
let image = message.attachments.first().url;
if(!image) return;
let readedImage = await jimp.read(image);
let sendedImage = await readedImage.greyscale()
let attch = new MessageAttachment(sendedImage,'Skyy.png');
message.channel.send(attch);
}
}
}
You must convert the jimp image to a buffer before sending it:
const jimp = require('jimp')
const {MessageAttachment} = require('discord.js')
module.exports = {
name:'grey',
run:async(client, message, args)=>{
if (message.attachments.size > 0) {
let image = message.attachments.first().url;
if(!image) return;
let readedImage = await jimp.read(image);
// changed line below
let sendedImage = await readedImage.greyscale().getBufferAsync();
let attch = new MessageAttachment(sendedImage,'Skyy.png');
message.channel.send(attch);
}
}
}