I'm trying to capture webcam video in the client side and sent frames to the server to process it. I'm newbie in JS and I'm having some problems.
I tried to use OpenCV.js to get the data but I didn't understand how to get it, in Python we can make
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
and the frame is an 2d-array with the image, but how can I get each frame (as 2d array) to send using OpenCV.js?
I have this code on the client side:
<script type="text/javascript">
function onOpenCvReady() {
cv['onRuntimeInitialized'] = () => {
var socket = io('http://localhost:5000');
socket.on('connect', function () {
console.log("Connected...!", socket.connected)
});
const video = document.querySelector("#videoElement");
video.width = 500;
video.height = 400;
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
video.srcObject = stream;
video.play();
})
.catch(function (err0r) {
console.log(err0r)
console.log("Something went wrong!");
});
}
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let cap = new cv.VideoCapture(video);
const FPS = 15;
function processVideo() {
let begin = Date.now();
cap.read(src);
handle_socket(src['data']);
// schedule next one.
let delay = 1000 / FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
}
// schedule first one.
setTimeout(processVideo, 0);
function handle_socket(src) {
socket.emit('event', { info: 'I\'m connected!', data: src });
}
}
}
</script>
My solution was:
// Select HTML video element where the webcam data is
const video = document.querySelector("#videoElement");
// returns a frame encoded in base64
const getFrame = () => {
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
const data = canvas.toDataURL('image/jpeg');
return data;
}
// Send data over socket
function handle_socket(data, dst) {
socket.emit('event', data, function (res) {
if (res !== 0) {
results = res;
}
});
}
frame64 = getFrame()
handle_socket(frame64);
Related
I'm recording some audio in the browser and then want to loop it seamlessly, avoiding clicks etc when starting. This means fading it and out.
I can ramp the volume up and down once, but I can't find anyway to trigger Web Audio's 'ramp to value at time' every time the loop starts again.
Is there an easy way to do this? I've got 10 of these buffers looping so I'd like to avoid lots of costly setinterval checks if possible...
let source = audioContext.createBufferSource();
let gain = audioContext.createGain();
gain.gain.value = 0.01;
source.buffer = decodedData;
songLength = decodedData.duration;
source.loop = true;
source.connect(gain);
gain.connect(audioContext.destination);
source.start(0);
// fade in and out
gain.gain.exponentialRampToValueAtTime(0.2, audioContext.currentTime + 1);
gain.gain.exponentialRampToValueAtTime(0.01, audioContext.currentTime + songLength);
Consider listening to the ended event and re-trigger the playback:
class FadeInLoop {
ctx
audioBuffer
gainNode
isPlaying = true
constructor(ctx, url) {
this.ctx = ctx
this.audioBuffer = fetch(url)
.then(response => response.arrayBuffer())
.then(arrayBuffer => ctx.decodeAudioData(arrayBuffer))
this.gainNode = ctx.createGain()
this.gainNode.connect(ctx.destination)
}
async start() {
this.isPlaying = true
const source = ctx.createBufferSource()
this.source = source
source.addEventListener('ended', e => {
if (this.isPlaying) { // repeat unless stop() was called
this.start()
}
})
source.connect(this.gainNode)
source.buffer = await this.audioBuffer
const now = this.ctx.currentTime
this.gainNode.gain.setValueAtTime(Number.EPSILON, now);
this.gainNode.gain.exponentialRampToValueAtTime(1, now + 0.055)
source.start(0)
}
stop() {
this.isPlaying = false
this.source?.stop()
}
}
const ctx = new AudioContext({ latencyHint: 'interactive' })
const loop = new FadeInLoop(ctx, 'https://batman.dev/static/71474264/loop.mp3')
<button onclick="loop.start()">Start</button>
<button onclick="loop.stop()">Stop</button>
What I am trying to achieve is to have "stream" of blobs from my socket and append it to a video.
Currently, I have something like
const socket = io();
const videoGrid = document.getElementById("video-grid");
const video = document.createElement("video");
video.muted = true;
videoGrid.append(video);
let blobArray = [];
socket.on("view-stream-10", (data) => {
blobArray.push(
new Blob([new Uint8Array(data)], {
type: "video/x-matroska;codecs=avc1,opus",
})
);
let currentTime = video.currentTime;
let blob = new Blob(blobArray, { type: "video/x-matroska;codecs=avc1,opus" });
video.src = window.URL.createObjectURL(blob);
video.currentTime = currentTime;
video.play();
});
It works but there are some problems that video stops for a X ms at the point where new blob beeing created and url is changed and it's beeing so visible.
Is there any better way?
I have an webpage where the user will drop a video file, and the page will upload the video, and generate a thumbnail based on a timestamp that the user provide.
For the moment, I am just trying to generate the thumbnail based on the FIRST frame of the video.
here is a quick exemple on my current progress :
(please use chrome as firefox will complain about the https link, and also, sorry if it autodownload an image)
https://stackblitz.com/edit/rxjs-qc8iag
import { Observable, throwError } from 'rxjs'
const VIDEO = {
imageFromFrame(videoSrc: any): Observable<any> {
return new Observable<any>((obs) => {
const canvas = document.createElement('canvas')
const video = document.createElement('video')
const context = canvas.getContext('2d')
const source = document.createElement('source');
source.setAttribute('src', videoSrc);
video.appendChild(source);
document.body.appendChild(canvas)
document.body.appendChild(video)
if (!context) {
throwError(`Couldn't retrieve context 2d`)
obs.complete()
return
}
video.load()
video.addEventListener('loadedmetadata', function () {
console.log('loadedmetadata')
// Set canvas dimensions same as video dimensions
canvas.width = video.videoWidth
canvas.height = video.videoHeight
})
video.addEventListener('canplay', function () {
console.log('canplay')
canvas.style.display = 'inline'
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight)
// Convert canvas image to Base64
const img = canvas.toDataURL("image/png")
// Convert Base64 image to binary
obs.next(VIDEO.dataURItoBlob(img))
obs.complete()
})
})
},
dataURItoBlob(dataURI: string): Blob {
// convert base64/URLEncoded data component to raw binary data held in a string
var byteString
if (dataURI.split(',')[0].indexOf('base64') >= 0) byteString = atob(dataURI.split(',')[1])
else byteString = unescape(dataURI.split(',')[1])
// separate out the mime component
var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0]
// write the bytes of the string to a typed array
var ia = new Uint8Array(byteString.length)
for (var i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i)
}
return new Blob([ia], { type: mimeString })
},
}
VIDEO.imageFromFrame('https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4?_=1').subscribe((r) => {
var a = document.createElement('a')
document.body.appendChild(a)
const url = window.URL.createObjectURL(r)
a.href = url
a.download = 'sdf'
a.click()
window.URL.revokeObjectURL(url)
})
The problem is, the image it download is empty, and do not represent the first frame of the video. but the video should have been loaded. and drawed in the canvas.
I am trying to solve it, but if someone could help me found out the issue, thanks.
for anyone looking, I made this which work correctly (need improvement but the idea is there). I use observable cause the flow of my app use observable, but you can change to promise or whatever =>
imageFromFrame(
videoFile: File,
options: { frameTimeInSeconds: number; filename?: string; fileType?: string } = {
frameTimeInSeconds: 0.1,
}
): Observable<File> {
return new Observable<any>((obs) => {
const canvas = document.createElement('canvas')
const video = document.createElement('video')
const source = document.createElement('source')
const context = canvas.getContext('2d')
const urlRef = URL.createObjectURL(videoFile)
video.style.display = 'none'
canvas.style.display = 'none'
source.setAttribute('src', urlRef)
video.setAttribute('crossorigin', 'anonymous')
video.appendChild(source)
document.body.appendChild(canvas)
document.body.appendChild(video)
if (!context) {
throwError(`Couldn't retrieve context 2d`)
obs.complete()
return
}
video.currentTime = options.frameTimeInSeconds
video.load()
video.addEventListener('loadedmetadata', function () {
canvas.width = video.videoWidth
canvas.height = video.videoHeight
})
video.addEventListener('loadeddata', function () {
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight)
canvas.toBlob((blob) => {
if (!blob) {
return
}
obs.next(
new File([blob], options.filename || FILES.getName(videoFile.name), {
type: options.fileType || 'image/png',
})
)
obs.complete()
URL.revokeObjectURL(urlRef)
video.remove()
canvas.remove()
}, options.fileType || 'image/png')
})
})
},
so i'm doing my final project. i want to get head pose estimation from client webcam. i succesfully stream the webcam to server with websocket. but when i try to put my head pose function inside the socket route, the terminal show error valueError: too many packets in payload frequently. does anyone know how to prevent this error ? any answer would be appreciated. thank you !
here's my code for more information.
JavaScript
var constraints = { audio: false, video: { width: 500, height: 500 } };
var video = document.querySelector('video');
var canvas = document.querySelector('canvas');
var socket = io('https://0.0.0.0:8000');
navigator.mediaDevices.getUserMedia(constraints)
.then(function(mediaStream) {
video.srcObject = mediaStream;
video.onloadedmetadata = function(e) {
video.play();
};
})
.catch(function(err) { console.log(err.name + ": " + err.message); })
socket.on('connect', function () {
console.log('connected !', socket.connected);
function capture() {
canvas.width = 200;
canvas.height = 200;
canvas.getContext('2d').drawImage(video, 0, 0, 200, 200);
var data = canvas.toDataURL('image/jpeg');
return data;
};
var FPS = 50
setInterval(() => {
var imgData = capture();
socket.emit('image', imgData);
}, 1000 / FPS);
});
flask app
#socketio.on('image')
def image(data_image):
time.sleep(1)
encoded_image = data_image.split(",")[1]
decoded = base64.b64decode(encoded_image)
frame = cv2.imdecode(np.frombuffer(decoded, np.uint8), -1)
#this is my head pose module
pose = FacePosition(frame)
head_pose = pose.run()
print(head_pose)
if __name__ == '__main__':
socketio.run(app, threaded = True)
You may want to take a look at this github issue. Raising enigneio's max_decode_packets did solve that problem for me.
i have written this function to capture each frame for the GIF but the output is very laggy and crashes when the data increases. Any suggestions ?
Code :
function createGifFromPng(list, framerate, fileName, gifScale) {
gifshot.createGIF({
'images': list,
'gifWidth': wWidth * gifScale,
'gifHeight': wHeight * gifScale,
'interval': 1 / framerate,
}, function(obj) {
if (!obj.error) {
var image = obj.image;
var a = document.createElement('a');
document.body.append(a);
a.download = fileName;
a.href = image;
a.click();
a.remove();
}
});
}
/////////////////////////////////////////////////////////////////////////
function getGifFromCanvas(renderer, sprite, fileName, gifScale, framesCount, framerate) {
var listImgs = [];
var saving = false;
var interval = setInterval(function() {
renderer.extract.canvas(sprite).toBlob(function(b) {
if (listImgs.length >= framesCount) {
clearInterval(interval);
if (!saving) {
createGifFromPng(listImgs, framerate, fileName,gifScale);
saving = true;
}
}
else {
listImgs.push(URL.createObjectURL(b));
}
}, 'image/gif');
}, 1000 / framerate);
}
In modern browsers you can use a conjunction of the MediaRecorder API and the HTMLCanvasElement.captureStream method.
The MediaRecorder API will be able to encode a MediaStream in a video or audio media file on the fly, resulting in far less memory needed than when you grab still images.
const ctx = canvas.getContext('2d');
var x = 0;
anim();
startRecording();
function startRecording() {
const chunks = []; // here we will store our recorded media chunks (Blobs)
const stream = canvas.captureStream(); // grab our canvas MediaStream
const rec = new MediaRecorder(stream); // init the recorder
// every time the recorder has new data, we will store it in our array
rec.ondataavailable = e => chunks.push(e.data);
// only when the recorder stops, we construct a complete Blob from all the chunks
rec.onstop = e => exportVid(new Blob(chunks, {type: 'video/webm'}));
rec.start();
setTimeout(()=>rec.stop(), 3000); // stop recording in 3s
}
function exportVid(blob) {
const vid = document.createElement('video');
vid.src = URL.createObjectURL(blob);
vid.controls = true;
document.body.appendChild(vid);
const a = document.createElement('a');
a.download = 'myvid.webm';
a.href = vid.src;
a.textContent = 'download the video';
document.body.appendChild(a);
}
function anim(){
x = (x + 1) % canvas.width;
ctx.fillStyle = 'white';
ctx.fillRect(0,0,canvas.width,canvas.height);
ctx.fillStyle = 'black';
ctx.fillRect(x - 20, 0, 40, 40);
requestAnimationFrame(anim);
}
<canvas id="canvas"></canvas>
You can also use https://github.com/spite/ccapture.js/ to capture to gif or video.