How do I detect a disconnect from an MJPEG stream in javascript - javascript

I am showing an MJPEG stream from an IP camera on a web page. The stream is shown using an image element, which is set by jQuery:
view = $('<img>');
view.load(function() {
console.log('loaded');
});
view.error(function() {
console.log('error');
});
view.attr('src', 'http://camera_ip/videostream.mjpeg');
Both events fire neatly when their respective situation occurs. Until I disconnect the camera. The image freezes (of course). I want to detect this disconnection, to show the user an error message. I thought up a solution, which was copying frames several seconds apart from the image to a canvas, and comparing the contents.
Is there an easier option?

The only way I can think of doing this on the front end is creating an AJAX request exactly when you set the src attribute of the image. The AJAX request should call the "complete" callback when the mjpeg stream ends.
If you are comfortable with node.js and/or websockets, you could alternatively set up an mjpeg proxy back end that serves up the mjpeg stream and emits a 'close' event to that client over a websocket when the stream ends. So it would look something like this (keep in mind, I still haven't figured out exactly how bufferToJPEG would parse out the single jpeg frame from the stream):
http.get('http://camera_ip/videostream.mjpeg', function(response) {
var buffer = "";
response.on('data', function(chunk) {
buffer += chunk;
clientSocket.emit('imageFrame', bufferToJPEG(buffer));
});
response.on('end', function() {
clientSocket.emit('imageEnd');
});
});
The problem with this (which I am trying to deal with in my own project right now) is that you then have to either associate a websocket with each image request, or emit the raw jpegs from the mjpeg stream as they come in over websockets (you can render those images with data uris on the front end).
Hope that helped a little -- sorry you had to wait so long for a response.
edit: https://github.com/wilhelmbot/Paparazzo.js looks like a good way of proxying that image in the way that I described above.

Here is what I came up with after reading zigzackattack answer. I use the "datauri" package for simplicity but for more fine grained control over the final image I also successfully tested the "node-canvas" package instead.
var mjpeg2jpegs = require('mjpeg2jpegs')
const Datauri = require('datauri')
var camURL = '/videostream.cgi?user=admin&pwd=password'
var camPort = 81
var camTimeout = 10000
var FPS_DIVIDER = 1
var options = {
hostname: '192.168.1.241',
port: camPort,
path: camURL,
timeout: camTimeout
}
function startCamStream (camName, options) {
var http = require('http')
var req = http.request(options, mjpeg2jpegs(function (res) {
var data
var pos = 0
var count = 0
res.on('imageHeader', function (header) {
// console.log('Image header: ', header)
data = new Buffer(parseInt(header['content-length'], 10))
pos = 0
})
res.on('imageData', function (chunk) {
// console.log('Image data: ', data.length)
chunk.copy(data, pos)
pos += chunk.length
})
res.on('imageEnd', function () {
// console.log('Image end')
if (count++ % FPS_DIVIDER === 0) {
const datauri = new Datauri()
datauri.format('.jpeg', data)
socket.emit(camName, datauri.content) // Send the image uri via websockets.
}
})
})).on('timeout', function () {
console.log('timeout')
startCamStream(camName, options)
}).end()
}
startCamStream('ipcam1', options)
Using vue.js (optional) I simply embed the image uri with an img tag.
<img :src="ipcam1" alt="ipcam1" />
Increasing the FPS_DIVIDER variable will reduce the fps output. If you want to change the image when there is a timeout then you can send an "offline" image when it reach the "timeout" callback.

Related

How do I access my local webcam while my python script is running on a server? (Face-Recognition in real time)

I have a webpage with multiple users and I want each user to be able to log-in using face-recognition.
For this, I have two ways to access the webcam on local systems. First is using JavaScript and the second using OpenCV - Python.
My problem is running face-recognition in real time. The Face-Recognition script is hosted on a server, if I use JavaScript to access my webcam, how do I send each frame to the python script for it to recognize and display the output; or if I use OpenCV in my python script to access my webcam, it fails because the script is running on a server throwing the error
which completely makes sense because the server has no camera device, but what can be a way-around for this? How do I run face recognition in real time?
This is how I'm using the webcam using imutils:
vs = VideoStream(-1).start()
You can do this by taking images from the video stream in JavaScript. There is a detailed article on how to do that here on MDN.
Here's how to do that on a high-level, including sending the captured images over the network:
// Send the image data to the server
async function sendImage(image) {
const data = new FormData();
data.append("image", image);
return fetch("/some-endpoint", {
method: "POST",
body: data,
});
}
// Create a base64 image from the stream
function createImage(stream, { width = 640, height = 480 } = {}) {
// Draw new frame to the canvas
stream.ctx.clearRect(0, 0, width, height);
stream.ctx.drawImage(stream.video, 0, 0, width, height);
// Get base64 image string from canvas and add it the an "image" form field
return stream.canvas.toDataURL("image/png");
}
// Get video stream and add it to an in-memory video
function createVideoStream() {
return new Promise(async (resolve, reject) => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ video: true });
const canvas = document.createElement("canvas");
const video = document.createElement("video");
const ctx = canvas.getContext("2d");
video.onloadedmetadata = async () => {
await video.play();
resolve({ video, canvas, ctx });
};
video.srcObject = stream;
} catch (err) {
// Handle errors
reject(err);
}
});
}
async function process(stream) {
const image = await createImage(stream);
await sendImage(image);
setTimeout(() => process(stream), 500);
}
createVideoStream().then(process);
Note that for increased performance, you could send these images through WebSockets instead of HTTP.
Here's a working demo of that code in action (without the server sending part).
The easiest and fastest way I would recommend is to implement a python websocket on you server that receives each frame, processes it in real time.
Here is a sample using FastAPI WebSockets
#router.websocket("/")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
try:
while True:
dataTxt = await websocket.receive_text()
data = json.loads(dataTxt)
cmd = data['command']
print("Recieved", cmd)
frame = decode_to_numpy(data['frame'])
processed_frame = None
if cmd == "Monitoring":
processed_frame = intel.detect(frame=frame, d_face=False, d_holistic=False, d_keyboard=False, d_mouse=False, d_present=False, d_object=False, d_volume=False)
elif cmd == "Face Recognition":
processed_frame = intel.detect(frame=frame, d_face=True, d_holistic=False, d_keyboard=False, d_mouse=False, d_present=False, d_object=False, d_volume=False)
elif cmd == "Object Detection":
processed_frame = intel.detect(frame=frame, d_face=False, d_holistic=False, d_keyboard=False, d_mouse=False, d_present=False, d_object=True, d_volume=False)
else:
processed_frame = intel.detect(frame=frame, d_face=False, d_holistic=False, d_keyboard=False, d_mouse=False, d_present=False, d_object=False, d_volume=False)
encoded_frame = encode_to_json(processed_frame)
res = {"message": "Frame Processed Successfully", "frame": encoded_frame}
await websocket.send_text(json.dumps(res))
print("Sent Response")
except Exception as e:
await websocket.close(code=status.WS_1011_INTERNAL_ERROR, reason=f"{e}")
The given Sample above sends and receives data in form of JSON string then decode it to a python object for processing and after it sends back the response in the same way.

Using MP4box.js and onSegment callback is not called

Base problem: display a H264 live stream in a browser.
Solution: let's just convert it to fragmented mp4 and load chunk-by-chunk via websocket (or XHR) into MSE.
Sounds too easy. But I want to do the fragmentation on client side with pure JS.
So I'm trying to use MP4Box.js. On its readme page it states: it has a demo: "A player that performs on-the-fly fragmentation".
That's the thing I need!
However the onSegment callbacks which should feed MSE are not called at all:
var ws; //for websocket
var mp4box; //for fragmentation
function startVideo() {
mp4box = MP4Box.createFile();
mp4box.onError = function(e) {
console.log("mp4box failed to parse data.");
};
mp4box.onMoovStart = function () {
console.log("Starting to receive File Information");
};
mp4box.onReady = function(info) {
console.log(info.mime);
mp4box.onSegment = function (id, user, buffer, sampleNum) {
console.log("Received segment on track "+id+" for object "+user+" with a length of "+buffer.byteLength+",sampleNum="+sampleNum);
}
var options = { nbSamples: 1000 };
mp4box.setSegmentOptions(info.tracks[0].id, null, options); // I don't need user object this time
var initSegs = mp4box.initializeSegmentation();
mp4box.start();
};
ws = new WebSocket("ws://a_websocket_server_which_serves_h264_file");
ws.binaryType = "arraybuffer";
ws.onmessage = function (event) {
event.data.fileStart = 0; //tried also with event.data.byteOffset, but resulted error.
var nextBufferStart = mp4box.appendBuffer(event.data);
mp4box.flush(); //tried commenting out - unclear documentation!
};
}
window.onload = function() {
startVideo();
}
Now putting this into an HTML file would result this in the JavaScript console:
Starting to receive File Information
video/mp4; codecs="avc1.4d4028"; profiles="isom,iso2,avc1,iso6,mp41"
But nothing happens afterwards. Why is the onSegment not called here? (the h264 file which the websocket-server serves is playable in VLC - however it is not fragmented)
The problem was using the nextBufferStart in a wrong way.
This should be the correct one:
var nextBufferStart = 0;
...
ws.onmessage = function (event) {
event.data.fileStart = nextBufferStart;
nextBufferStart = mp4box.appendBuffer(event.data);
mp4box.flush();
};

Fetch vs Request

I'm consuming a JSON stream and am trying to use fetch to consume it. The stream emits some data every few seconds. Using fetch to consume the stream gives me access to the data only when the stream closes server side. For example:
var target; // the url.
var options = {
method: "POST",
body: bodyString,
}
var drain = function(response) {
// hit only when the stream is killed server side.
// response.body is always undefined. Can't use the reader it provides.
return response.text(); // or response.json();
};
var listenStream = fetch(target, options).then(drain).then(console.log).catch(console.log);
/*
returns a data to the console log with a 200 code only when the server stream has been killed.
*/
However, there have been several chunks of data already sent to the client.
Using a node inspired method in the browser like this works every single time an event is sent:
var request = require('request');
var JSONStream = require('JSONStream');
var es = require('event-stream');
request(options)
.pipe(JSONStream.parse('*'))
.pipe(es.map(function(message) { // Pipe catches each fully formed message.
console.log(message)
}));
What am I missing? My instinct tells me that fetch should be able to mimic the pipe or stream functionality.
response.body gives you access to the response as a stream. To read a stream:
fetch(url).then(response => {
const reader = response.body.getReader();
reader.read().then(function process(result) {
if (result.done) return;
console.log(`Received a ${result.value.length} byte chunk of data`);
return reader.read().then(process);
}).then(() => {
console.log('All done!');
});
});
Here's a working example of the above.
Fetch streams are more memory-efficient than XHR, as the full response doesn't buffer in memory, and result.value is a Uint8Array making it way more useful for binary data. If you want text, you can use TextDecoder:
fetch(url).then(response => {
const reader = response.body.getReader();
const decoder = new TextDecoder();
reader.read().then(function process(result) {
if (result.done) return;
const text = decoder.decode(result.value, {stream: true});
console.log(text);
return reader.read().then(process);
}).then(() => {
console.log('All done!');
});
});
Here's a working example of the above.
Soon TextDecoder will become a transform stream, allowing you to do response.body.pipeThrough(new TextDecoder()), which is much simpler and allows the browser to optimise.
As for your JSON case, streaming JSON parsers can be a little big and complicated. If you're in control of the data source, consider a format that's chunks of JSON separated by newlines. This is really easy to parse, and leans on the browser's JSON parser for most of the work. Here's a working demo, the benefits can be seen at slower connection speeds.
I've also written an intro to web streams, which includes their use from within a service worker. You may also be interested in a fun hack that uses JavaScript template literals to create streaming templates.
Turns out I could get XHR to work - which doesn't really answer the request vs. fetch question. It took a few tries and the right ordering of operations to get it right. Here's the abstracted code. #jaromanda was right.
var _tryXhr = function(target, data) {
console.log(target, data);
var xhr = new XMLHttpRequest();
xhr.onreadystatechange = function () {
console.log("state change.. state: "+ this.readyState);
console.log(this.responseText);
if (this.readyState === 4) {
// gets hit on completion.
}
if (this.readyState === 3) {
// gets hit on new event
}
};
xhr.open("POST", target);
xhr.setRequestHeader("cache-control", "no-cache");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.send(data);
};

how to send image to server with http.post in javascript and store base64 in mongodb

I have trouble getting into http requests on the client-side storing images on the server-side using mongodb. I appreciate help a lot. I need an easy example of how i add an image file as data into a http post request such as XMLhttprequest. Lets say, I know the url of the servermethod. The source of the image is defined in
imgsrc
a name of the file is stored in
name
I have this atm:
var http = new XMLHttpRequest();
httpPost.onreadystatechange = function(err) {
if (httpPost.readyState == 4 && httpPost.status == 200){
console.log(httpPost.responseText);
} else {
console.log(err);
}
}
var path = "http://127.0.0.1:8000/uploadImage/"+name;
httpPost.open("POST", path, true);
// I guess I have to add the imagedata into the httpPost here, but i dont know how
httpPost.send(null);
Then on the serverside at the path, the following method will be called and I want to store the url of the base64-encoded image in mongodb. How do I access the image from the httpPost?
function postNewImageType(req, res, next){
var newImageTypeData = {
name: req.params.name,
image: "placeholder.png"
}
var data = // how to access the image?
var imageBuffer = decodeBase64Image(data);
fs.writeFile(cfg.imageFolger+newImageTypeData._id+'.jpeg', imageBuffer.data, function(err){
if (err) return new Error(err);
newImageTypeData.set({image:newImageTypeData._id+'.jpeg'});
var image = new ImageType(newImageData);
});
imagetype.save(function (err) {
if (error) {return next(new restify.InvalidArgumentError(JSON.stringify(error.errors)));}
else { res.send(201, imagetype);}
});
}
There are a number of ways that you can send your image data in the request to the server, but all of them will involve calling the send method of your XMLHttpRequest object with the data you wish to send as its argument.
The send method both dispatches the request to the remote server, and sets its argument as the body of that request. Since you're expecting Base64 encoded image data on your server, you'll first need to convert your image file to Base64 data on the client.
The simplest way to convert an image to Base64 on the client is by loading the image as an image element, drawing it to a canvas element, and then getting the Base64 representation of the canvas's image data.
That might look something like the following (given that the URL for the original image is stored in a variable named imgsrc, and the desired name is stored in name as stated):
// This function accepts three arguments, the URL of the image to be
// converted, the mime type of the Base64 image to be output, and a
// callback function that will be called with the data URL as its argument
// once processing is complete
var convertToBase64 = function(url, imagetype, callback){
var img = document.createElement('IMG'),
canvas = document.createElement('CANVAS'),
ctx = canvas.getContext('2d'),
data = '';
// Set the crossOrigin property of the image element to 'Anonymous',
// allowing us to load images from other domains so long as that domain
// has cross-origin headers properly set
img.crossOrigin = 'Anonymous'
// Because image loading is asynchronous, we define an event listening function that will be called when the image has been loaded
img.onLoad = function(){
// When the image is loaded, this function is called with the image object as its context or 'this' value
canvas.height = this.height;
canvas.width = this.width;
ctx.drawImage(this, 0, 0);
data = canvas.toDataURL(imagetype);
callback(data);
};
// We set the source of the image tag to start loading its data. We define
// the event listener first, so that if the image has already been loaded
// on the page or is cached the event listener will still fire
img.src = url;
};
// Here we define the function that will send the request to the server.
// It will accept the image name, and the base64 data as arguments
var sendBase64ToServer = function(name, base64){
var httpPost = new XMLHttpRequest(),
path = "http://127.0.0.1:8000/uploadImage/" + name,
data = JSON.stringify({image: base64});
httpPost.onreadystatechange = function(err) {
if (httpPost.readyState == 4 && httpPost.status == 200){
console.log(httpPost.responseText);
} else {
console.log(err);
}
};
// Set the content type of the request to json since that's what's being sent
httpPost.setHeader('Content-Type', 'application/json');
httpPost.open("POST", path, true);
httpPost.send(data);
};
// This wrapper function will accept the name of the image, the url, and the
// image type and perform the request
var uploadImage = function(src, name, type){
convertToBase64(src, type, function(data){
sendBase64ToServer(name, data);
});
};
// Call the function with the provided values. The mime type could also be png
// or webp
uploadImage(imgsrc, name, 'image/jpeg')
When the request is received by your server, the request body will contain the JSON string with your Base64 image within it. Since you haven't provided the server framework or database driver you're using for Mongo, I've adapted your code assuming that you're using Express and Mongoose with an ImageType model already defined in your application.
Since you can always construct the file name of the image record from its _id property and your image folder path, it doesn't necessarily make sense to save that as a property on the record, but I've preserved that functionality here, which will require you to save your record twice in one request cycle.
I've also changed the way any errors from the filesystem call are handled. The 'err' you get back from a filesystem error is already an Error object, and will need to be handled by your server in some way.
function postNewImageType(req, res, next){
var json = JSON.parse(req.body),
newImageTypeData = {
name: json.name,
image: "placeholder.png"
},
imageBuffer = decodeBase64Image(data),
newImageType = new ImageType(newImageTypeData);
//First we save the image to Mongo to get an id
newImageType.save(function(err){
if(err) return next(new restify.InvalidArgumentError(JSON.stringify(err.errors)));
var fileName = cfg.imageFolder + newImageType._id + '.jpeg';
fs.writeFile(fileName, imageBuffer.data, function(err){
//Handle error in next middleware function somehow
if (err) return next(err);
newImageType.set({image: 'filename.png'});
newImageType.save(function(err){
if (err) return next(new restify.InvalidArgumentError(JSON.stringify(err.errors)));
res.send(201, imagetype);
});
})
});
}

Accessing JSON data when PhantomJS onResourceReceived is triggered (ESPN fantasy football draft app)

So I'm trying to write a hook into ESPN fantasy football's HTML lite draft page to cross-reference player ranking lists (from a CSV file) to eliminate already-drafted players from the available pool. I've done this by hand in the past: but with a 16-team draft by the late rounds, it's nearly impossible to keep up since by then no one really knows who the players are.
I'm very much a Javascript and PhantomJS newbie, so please don't laugh.
At this point, I can see the page.onResourceReceived metadata in my console as the AJAX polls the PhantomJS instance. But I can't figure out how to access the data actually being received by the "browser". According to Chrome's inspector, the "Preview" tab under the Network Inspector tab -- either a time sync signal or the data of the actual player who was drafted is being sent to the browser in JSON format.
Long story short, how do I get the actual JSON data when I receive the page.onResourceReceived metadata?
(P.S. I know I commented out phantom.exit(); that's to keep the script from terminating after the redirect and onLoad is complete--I need to keep it running to listen for the draft updates)
var draft = 'http://games.espn.go.com/ffl/htmldraft?leagueId=1246633&teamId=8&fromTeamId=8';
var draftURL = encodeURIComponent(draft);
var page = require('webpage').create(),
server = 'https://r.espn.go.com/espn/memberservices/pc/login',
data = 'SUBMIT=1&failedLocation=&aff_code=espn_fantgames&appRedirect=' + draftURL + '&cookieDomain=.go.com&multipleDomains=true&username=[redacted]&password=[redacted]&submit=Sign+In';
page.onResourceReceived = function (response) {
console.log('Response (#' + response.id + ', stage "' + response.stage + '"): ' + JSON.stringify(response));
};
page.open(server, 'post', data, function (status) {
if (status !== 'success') {
console.log('Unable to post!');
} else {
page.render('example.png');
//console.log(page.content)
}
//phantom.exit();
});
The following version of your script will just grab and return the entire contents of the URL you are accessing. You are not really going to get useful json data, I don't think, just an html page, unless I'm missing something. In my tests, all I get is html:
var draft = 'http://games.espn.go.com/ffl/htmldraft?leagueId=1246633&teamId=8&fromTeamId=8';
var draftURL = encodeURIComponent(draft);
var page = require('webpage').create(),
server = 'https://r.espn.go.com/espn/memberservices/pc/login',
data = 'SUBMIT=1&failedLocation=&aff_code=espn_fantgames&appRedirect=' + draftURL + '&cookieDomain=.go.com&multipleDomains=true&username=[redacted]&password=[redacted]&submit=Sign+In';
page.open(server, 'post', data, function (status) {
if (status == 'success') {
var delay, checker = (function() {
var html = page.evaluate(function () {
var body = document.getElementsByTagName('body')[0];
return document.getElementsByTagName('html')[0].outerHTML;
});
if (html) {
clearTimeout(delay);
console.log(html);
phantom.exit();
}
});
delay = setInterval(checker, 100);
}
else {
phantom.exit();
}
});
Currently, phantomjs doesn't include the response body in the onResponseReceived events.
You could instead slimerjs, which mirrors the phantomjs, but does allow you to access response.body (which should have the JSON data). Example here:
http://darrendev.blogspot.jp/2013/11/saving-downloaded-files-in-slimerjs-and.html
Alternatively, you could write a chrome extension and create a content script that grabs the data.

Categories

Resources