I have to send audio stream from microphone with websocket as wav format,
during convert convert audio buffer array to wav I get this error:
DOMException: Failed to execute 'decodeAudioData' on 'BaseAudioContext': Unable to decode audio data
I tried javascript-processor-node for this but it was deprecated, I think audio worklet for sending simple converted format on websocket stream is too much for task like this!
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Document</title>
</head>
<body id="body">
<button id="button">test</button>
<script defer>
const btn = document.getElementById("button");
btn.addEventListener("click", () => {
navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => {
const recorder = new MediaRecorder(stream);
recorder.start(10);
recorder.addEventListener("dataavailable", (event) => {
const streamBlob = new Blob([event.data]);
streamBlob
.arrayBuffer()
.then((arrayBuffer) => {
const audioCtx = new AudioContext({ sampleRate: 16000 });
audioCtx
?.decodeAudioData(arrayBuffer)
?.then((buffer) => {
console.log("🚀 ?.then wavBlob", buffer);
})
.catch((error) => {
console.log("🚀 . error1", error);
})
.finally(() => {});
})
.catch((error) => {
console.log("🚀 . error2", error);
});
});
}),
function (e) {
alert("Error capturing audio.");
};
});
</script>
</body>
</html>
Related
navigator.mediaDevices.getDisplayMedia({ audio: true, video: true }).then(stream => {});
navigator.mediaDevices.getUserMedia({ audio: true, video: true }).then(stream => {});
How to put these two streams into one?
Merging capture stream with webcam stream example using the video-stream-merger.js lib
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<script src="https://cdn.jsdelivr.net/npm/video-stream-merger#4.0.1/dist/video-stream-merger.js"></script>
<title>Merge screen with webcam</title>
</head>
<body>
<video id="video"></video>
</body>
<script>
async function startCapture() {
let webcamStream = null;
const constraints = { audio: true, video: { width: 720, height: 480 } };
try {
webcamStream = await navigator.mediaDevices.getUserMedia(constraints);
} catch (err) {
/* handle the error */
console.error("Error: " + err);
}
let captureStream = null;
try {
const displayMediaOptions = null; //set it if you need
captureStream = await navigator.mediaDevices.getDisplayMedia(
displayMediaOptions
);
} catch (err) {
/* handle the error */
console.error("Error: " + err);
}
const merger = new VideoStreamMerger();
// Add the screen capture. Position it to fill the whole stream (the default)
merger.addStream(captureStream, {
x: 0, // position of the topleft corner
y: 0,
width: merger.width,
height: merger.height,
mute: true, // we don't want sound from the screen (if there is any)
});
// Add the webcam stream. Position it on the bottom left and resize it to 100x100.
merger.addStream(webcamStream, {
x: 0,
y: merger.height - 100,
width: 100,
height: 100,
mute: false,
});
// Start the merging. Calling this makes the result available to us
merger.start();
// We now have a merged MediaStream!
//merger.result
const video = document.querySelector("video");
video.srcObject = merger.result;
video.onloadedmetadata = function (e) {
video.play();
};
}
startCapture();
</script>
</html>
can help me create a simple PWA for my site. Currently the installation banner appears and it works.
I want that when I update my site, a message is displayed in the application in the form of a banner with the text "An update is available, click on this banner to update".
I want that when the internet network is not available, a banner appears with the message "You are offline, the content of this page may be obsolete".
How can I do this ? Thank you
Here are the files I created :
index.html
<!doctype html>
<html lang="fr" class="h-100">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="/bootstrap.min.css" rel="stylesheet">
<link href="/example/style.css" rel="stylesheet">
</head>
<body class="d-flex flex-column bg-dark text-white text-center">
<div>MY PAGE</div>
<script src="bootstrap.bundle.min.js"></script>
<script src="app.js"></script>
</body>
</html>
app.js
if ('serviceWorker' in navigator) {
window.addEventListener('load', function() {
navigator.serviceWorker.register('sw.js')
.then(reg => {
console.log('Service worker registered! 😎', reg);
})
.catch(err => {
console.log('😥 Service worker registration failed: ', err);
});
});
}
sw.js
const staticCacheName = 'v10';
const filesToCache = [
'/',
'/index.html',
'/style.css',
'/app.js',
'/bootstrap.min.css',
'/bootstrap.bundle.min.js',
'/favicon.ico',
'/logo.png',
'/icon-144.png',
'/icon-192.png',
'/icon-512.png',
'/ipadpro1_splash.png',
'/ipadpro2_splash.png',
'/ipadpro3_splash.png',
'/ipad_splash.png',
'/iphone5_splash.png',
'/iphone6_splash.png',
'/iphoneplus_splash.png',
'/iphonexr_splash.png',
'/iphonexsmax_splash.png',
'/iphonex_splash.png'
];
self.addEventListener('install', event => {
event.waitUntil(
caches.open(staticCacheName).then(cache => {
return cache.addAll(filesToCache);
})
);
});
self.addEventListener('activate', event => {
event.waitUntil(caches.keys().then(function(cacheNames) {
return Promise.all(
cacheNames.filter(function(staticCacheName) {
}).map(function(staticCacheName) {
return caches.delete(staticCacheName);
})
);
}));
});
self.addEventListener('fetch', event => {
event.respondWith(caches.match(event.request).then(cachedResponse => {
if (cachedResponse) {
return cachedResponse;
}
return fetch(event.request);
}));
});
self.addEventListener('message', event => {
if (event.data.action === 'skipWaiting') {
self.skipWaiting();
}
});
The flow I'm working on requires that sending a POST request from the client to the server and the server generates a PDF file and return it back to be downloaded. the server generates the PDF correctly but when I receive it at the client-side it's corrupted (white empty pages).
How can I receive it correctly? or is the problem is on the server side?
server
async createDraft(req, res) {
try {
Log.debug("Server.createDraft", "Trying to create draft file");
const authorized = await this.isAuthorized(req);
if(!authorized)
return res.status(301).redirect(this.cognito.getFederatedLoginURL());
const questionnaire = await this.db.getQuestionnaire(req.params.id);
const fileName = getDraftFileName(questionnaire);
await create(req.body, fileName, questionnaire.id);
Log.debug("Server.createDraft", "Successfully created draft");
const file = path.join(__dirname, `../static/${fileName}`);
if(fs.existsSync(file))
return res.status(200).sendFile(file);
res.status(500);
console.log("File not found");
return res.send("File not found");
} catch(e) {
Log.info("Server.createDraft", `Error while answering POST /draft - ${e.message}...`);
return badRequest(res, e.message);
}
}
const create = async (data, fileName, id) => {
const output = path.join(__dirname, "../../static/");
const filePath = path.join(__dirname, "../draft/");
const tmp = path.join(__dirname, "../../tmp/");
const content = `${tmp}draft-${id}.html`;
const cover = `${tmp}cover-${id}.html`;
await Q.all([
Q.nfcall(fs.writeFile, content, data.content),
Q.nfcall(fs.writeFile, cover, data.cover)
]);
await Q.all([
Q.nfcall(exec, `wkhtmltopdf --enable-local-file-access --encoding 'UTF-8' toc --xsl-style-sheet ${filePath}toc.xsl --header-html ${filePath}header.html --header-spacing 10 --footer-html ${filePath}footer.html --footer-spacing 10 ${content} --header-html ${filePath}header.html --header-spacing 10 --footer-html ${filePath}footer.html --footer-spacing 10 ${output}content-${fileName}`),
Q.nfcall(exec, `wkhtmltopdf --enable-local-file-access --encoding 'UTF-8' -T 0 -B 0 cover ${cover} ${output}cover-${fileName}`)
]);
await Q.nfcall(exec, `pdftk ${output}cover-${fileName} ${output}content-${fileName} cat output ${output}${fileName}`);
await Q.all([
Q.nfcall(fs.unlink, `${tmp}draft-${id}.html`),
Q.nfcall(fs.unlink, `${tmp}cover-${id}.html`),
Q.nfcall(fs.unlink, `${output}content-${fileName}`),
Q.nfcall(fs.unlink, `${output}cover-${fileName}`)
]);
};
client
// eslint-disable-next-line max-statements
export const exportDraft = async (tree, data) => {
try {
tree.set("loading", true);
tree.commit();
document.getElementById("cover-title").innerHTML = data.draft.secoDraft;
const path = window.location.pathname.split("/");
const id = parseInt(path[ path.length - 1 ], 10);
const res = await axios({
url: `${window.location.origin}/draft/${id}`,
method: "POST",
responseType: "blob",
data: {
cover: `<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="../src/draft/wysiwyg.css" />
<link rel="stylesheet" type="text/css" href="../src/draft/test.css" />
</head>
<body>
${document.getElementById("cover").innerHTML}
</body>
</html>`,
content: `<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="../src/draft/wysiwyg.css" />
<link rel="stylesheet" type="text/css" href="../src/draft/test.css" />
</head>
<body>
${document.getElementById("draft").innerHTML}
</body>
</html>
`
},
json: true
});
console.log(res);
tree.set("loading", false);
const file = new Blob([res], { type: "application/pdf" });
return saveAs(file, "PDF-Draft.pdf");
} catch(e) {
tree.set("loading", false);
return add(tree, "error", e.message);
}
};
Using res.data to extract the pdf data solved the problem
const file = new Blob([res.data], { type: "application/pdf" });
I'm trying to create a webm video file from blobs generated by MediaRecorderAPI in a NodeJS server using FFMPEG. I'm able to create the .webm file but it's not playable, I ran this command $ ffmpeg.exe -v error -i lel.webm -f null - >error.log 2>&1 to generate an error log, the error log file contains this:
[null # 000002ce7501de40] Application provided invalid, non monotonically increasing dts to muxer in stream 0: 1 >= 1
[h264 # 000002ce74a727c0] Invalid NAL unit size (804 > 74).
[h264 # 000002ce74a727c0] Error splitting the input into NAL units.
Error while decoding stream #0:0: Invalid data found when processing input
This is my web server code
const app = require("express")();
const http = require("http").createServer(app);
const io = require("socket.io")(http);
const fs = require("fs");
const child_process = require("child_process");
app.get("/", (req, res) => {
res.sendFile(__dirname + "/index.html");
});
io.on("connection", (socket) => {
console.log("a user connected");
const ffmpeg = child_process.spawn("ffmpeg", [
"-i",
"-",
"-vcodec",
"copy",
"-f",
"flv",
"rtmpUrl.webm",
]);
ffmpeg.on("close", (code, signal) => {
console.log(
"FFmpeg child process closed, code " + code + ", signal " + signal
);
});
ffmpeg.stdin.on("error", (e) => {
console.log("FFmpeg STDIN Error", e);
});
ffmpeg.stderr.on("data", (data) => {
console.log("FFmpeg STDERR:", data.toString());
});
socket.on("message", (msg) => {
console.log("Writing blob! ");
ffmpeg.stdin.write(msg);
});
socket.on("stop", () => {
console.log("Stop recording..");
ffmpeg.kill("SIGINT");
});
});
http.listen(3000, () => {
console.log("listening on *:3000");
});
And this is my client code, using HTML, JS:
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Document</title>
</head>
<script src="/socket.io/socket.io.js"></script>
<script>
const socket = io();
let mediaRecorder = null;
const startRecording = (someStream) => {
const mediaStream = new MediaStream();
const videoTrack = someStream.getVideoTracks()[0];
const audioTrack = someStream.getAudioTracks()[0];
console.log("Video trac ", videoTrack);
console.log("audio trac ", audioTrack);
mediaStream.addTrack(videoTrack);
mediaStream.addTrack(audioTrack);
const recorderOptions = {
mimeType: "video/webm;codecs=h264",
videoBitsPerSecond: 3 * 1024 * 1024,
};
mediaRecorder = new MediaRecorder(mediaStream, recorderOptions);
mediaRecorder.start(1000); // 1000 - the number of milliseconds to record into each Blob
mediaRecorder.ondataavailable = (event) => {
console.debug("Got blob data:", event.data);
if (event.data && event.data.size > 0) {
socket.emit("message", event.data);
}
};
};
const getVideoStream = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({
video: true,
audio: true,
});
startRecording(stream);
myVideo.srcObject = stream;
} catch (e) {
console.error("navigator.getUserMedia error:", e);
}
};
const stopRecording = () => {
mediaRecorder.stop();
socket.emit("stop");
};
</script>
<body>
<p>hello world</p>
<button onclick="getVideoStream()">start rec</button>
<button onclick="stopRecording()">stop rec</button>
<video width="300" height="300" autoplay id="myvideo" />
<script>
const myVideo = document.getElementById("myvideo");
myVideo.muted = true;
</script>
</body>
</html>
Any help is appreciated!
It looks like you are encoding into flv, not webm. So you have flv encoded video inside the webm container - which the playback device just has no idea what to do with :)
Try the command above on just an mp4 on your computer - I bet it will not work. To encode into webm - check out these ffmpeg commands
Some days working on this, and no luck, I'm just trying to get the basic example working of only audio, but no audio on index1.html, everything works, no issues, connection established, offer/answer exchanged, icecandidate also exchanged, this is not working on both chrome or firefox.
I don't know whats wrong, I'm just exactly copying https://webrtc.github.io/samples/src/content/peerconnection/audio/ just splitting it out into 2 files, I dont get it, also I see a lot of people saying that the issue is wit stun/ice servers, actually the examples work without them so I guess I dont need them.
The setup:
index.html, the one who will initiate the call and capture its audio.
index1.html, will receive the call and play the audio.
PD: the code isn't clean or anything, just testing stuff.
index.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Hello World! Site Title</title>
</head>
<body>
<h1>Hello World!</h1>
<button id="call">CALL</button>
</body>
<script src="http://localhost:3000/socket.io/socket.io.js"></script>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<script>
var socket = io("http://localhost:3000");
//const { RTCPeerConnection, RTCSessionDescription } = window;
let callButton = document.getElementById("call");
let peerConnection;
socket.on('sendICe_to_0', data => {
console.log("received ice candidate", data);
peerConnection.addIceCandidate(data.candidate);
});
callButton.onclick = e => {
peerConnection = new RTCPeerConnection(null);
peerConnection.onicecandidate = e => {
console.log("send ice to 1", e);
socket.emit('sendICe_to_1', e);
}
navigator.mediaDevices
.getUserMedia({
audio: true,
video: false
})
.then(gotStream)
.catch(e => {
alert(`getUserMedia() error: ${e.name}`);
});
};
async function gotStream(stream) {
console.log("capturing audio");
stream.getTracks().forEach(track => peerConnection.addTrack(track, stream));
let offer = await peerConnection.createOffer();
await peerConnection.setLocalDescription(offer);
console.log("sending offer", offer);
socket.emit('offer_to_1', offer);
/*peerConnection.createOffer()
.then(data => {
console.log("creating offer", data);
peerConnection.setLocalDescription(data)
.then(data => {
console.log("sending offer", data);
socket.emit('offer_to_1', data);
});
});*/
}
socket.on('send_asnwer_0', data => {
console.log("received anser", data);
peerConnection.setRemoteDescription(data);
});
</script>
</html>
index1.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Hello World! Site Title</title>
</head>
<body>
<h1>Hello World!</h1>
<audio id="audio" autoplay controls></audio>
</body>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<script src="http://localhost:3000/socket.io/socket.io.js"></script>
<script>
// https://github.com/peers/peerjs/issues/470
var socket = io("http://localhost:3000");
let peerConnection;
socket.on('sendICe_to_1', data => {
console.log("Received ice candidate", data);
peerConnection.onicecandidate = e => {
console.log("Sending ice candidate", e)
socket.emit('sendICe_to_0', e);
};
peerConnection.addIceCandidate(data.candidate);
peerConnection.ontrack = e => {
console.log("Adding remote stream");
let audio = document.querySelector('audio#audio');
if (audio.srcObject !== e.streams[0]) {
audio.srcObject = e.streams[0];
console.log('Received/Configured remote stream');
}
};
});
async function offerTo1(data) {
peerConnection = new RTCPeerConnection(null);
console.log("Received offer from 0", data)
await peerConnection.setRemoteDescription(data)
let answer = await peerConnection.createAnswer();
await peerConnection.setLocalDescription(answer);
console.log("sending answer", answer);
socket.emit('send_asnwer_0', answer);
}
socket.on('offer_to_1', data => {
offerTo1(data);
});
</script>
</html>
NodeJs server
const io = require('socket.io')();
io.origins('*:*');
io.listen(3000);
let connectedSockets = [];
/*io.on('connection', (socket) => {
connectedSockets.push(socket);
console.log("Client connected!");
socket.on('sendAudio', (data) => {
console.log(data);
connectedSockets[1].emit("receiveAudio", data)
})
});*/
io.on('connection', (socket) => {
connectedSockets.push(socket);
console.log("Client connected!");
socket.on("offer_to_1", (data) => {
console.log("offer_to_1")
connectedSockets[1].emit("offer_to_1", data);
});
socket.on('send_asnwer_0', data => {
console.log("send_asnwer_0")
connectedSockets[0].emit("send_asnwer_0", data);
});
socket.on("sendICe_to_1", (data) => {
console.log("sendICe_to_1")
connectedSockets[1].emit("sendICe_to_1", data);
});
socket.on("sendICe_to_0", (data) => {
console.log("sendICe_to_0")
connectedSockets[0].emit("sendICe_to_0", data);
});
});