NodeJS Streaming for Audio/Video with Client Side Control - javascript

i am building a simple streaming server to stream audio/video. below code gets the job done but when i click on time line on video nothing happens as i cannot fetch from that time. also need to send event regarding clikc to nodejs streaming and stream from where i clicked.
I had been able to come up with this. i send the file using the URL sendFile="some file" and then the node checks for that file. if successful i start getting the stream on my client side and i put it directly into video src. But i need morecontrol over this. such as when user click on certain timeline so that i can directly start strweaming from there.
app.get is used to get the location of the folder where i store the files.
used just express and nodejs
function FolderReaderMerger(path,pathToMerge,cb)
{
log("Reading File/Folder To Combine : ");
fs.readdir(path, function(err, audio)
{
fs.readdir(pathToMerge, function(err, audioText)
{
var obj ={};
global.list.audio =audio;
global.list.audioText =audioText;
obj.audio = audio;
obj.audioText = audioText;
cb(obj);
});
});
}
app.get("/audio",function(req,resp)
{
log("App Audio File Serv : ");
var playFile = req.param("playFile");
var filePath = {};
filePath.status =false;
if(playFile!=undefined)
{
log("Params File : "+playFile);
/* log("Requested URL : "+req.url);
log("Total Audio Files : "+global.list.audio.length);*/
var i =0;
while(i!=global.list.audio.length)
{
if(playFile==global.list.audio[i])
{
log("File Found : "+playFile);
//get files location
log(app.get("audioPath")+playFile);
filePath.status = true;
var filePath = app.get("audioPath")+playFile;
log("FILE PATH : "+filePath);
var stat = fs.statSync(filePath);
log(stat);
log(stat.size);
resp.writeHead(200, {
'Content-Type': 'audio/mpeg',
'Content-Length': stat.size
});
var readStream = fs.createReadStream(filePath);
// We replaced all the event handlers with a simple call to readStream.pipe()
log("Streaming.......");
readStream.pipe(resp);
}
else
{
log("Requested Resource Not Found");
}
i+=1;
}
//readStream = fs.createReadStream(app.get("audioPath"));
}
else
{
log("Requested Params Not Found");
resp.send(filePath);
}
});
http.listen(app.get("PORT"),function()
{
consolelog("--------------------------------------------------------");
console.log("Server Started On PORT: "+app.get("PORT"));
console.log("All NPM Initialized");
console.log("Please Wait Checking Connection Status...");
console.log("--------------------------------------------------------");
});
<video src="http://localhost:3000/audio?playFile='some media file"></video>
ANy help would be appreciated.This works on click side
But also need more control regarding click and events.

Related

WebRTC issue with File transfer in production application

Hello I am developing a WebRTC based file transfer application.I have deployed the application online using Heroku. But there seems to be a problem with file transfer specifically at the the receiving end which I have been unable to figure out. The file is transferred and received successfully when done on localhost but there is a problem with file reception when done in production. The browser used is Google Chrome if that's any help.
Here is the File reception code:
function dataChannelStateChanged() {
if (dataChannel.readyState === 'open') {
console.log("Data Channel open");
dataChannel.onmessage = receiveDataChannelMessage;
}
}
function receiveDataChannel(event) {
console.log("Receiving a data channel");
dataChannel = event.channel;
dataChannel.onmessage = receiveDataChannelMessage;
}
function receiveDataChannelMessage(event) {
console.log("From DataChannel: " + event.data);
if (fileTransferring) {
//Now here is the file handling code:
fileBuffer.push(event.data);
fileSize += event.data.byteLength;
fileProgress.value = fileSize;
//Provide link to downloadable file when complete
if (fileSize === receivedFileSize) {
var received = new window.Blob(fileBuffer);
fileBuffer = [];
downloadLink.href = URL.createObjectURL(received);
downloadLink.download = receivedFileName;
downloadLink.appendChild(document.createTextNode(receivedFileName + "(" + fileSize + ") bytes"));
fileTransferring = false;
//Also put the file in the text chat area
var linkTag = document.createElement('a');
linkTag.href = URL.createObjectURL(received);
linkTag.download = receivedFileName;
linkTag.appendChild(document.createTextNode(receivedFileName));
var div = document.createElement('div');
div.className = 'message-out';
div.appendChild(linkTag);
messageHolder.appendChild(div);
}
}
else {
appendChatMessage(event.data, 'message-out');
}
}
The following image shows the problem i'm encountering when receiving the file:
Any guidance would be much appreciated.
WEBRTC is based on UDP, it means there is no guarantee that your sequence of data transfers "in order" or successfully
use "webSocket" or a simple "http request" instead.
=============
UPDATE: see the first comment

Display a message upon the beginning and completion of a function in OSC API

The idea is to allow me to press a button on the HTML page to execute a command to copy and delete all photos on cameras with feedback showing at the beginning and ending of the execution.
At the moment, after clicking the "Get Images From Camera", the textarea is showing this text:
Executed command: \copyImages
Result is as below: Copying images from
both cameras...\n
And it goes on to copy and delete all images like I want. But at the end of this process, nothing is returned back to the screen, so the user has no idea what happens. The nature of callback in Node js makes it too confusing for me to figure out how to do this.
P.S. I've tried all I know before I come here to get your help. So know that any suggestions are very appreciated!
So, my question is how do I change the codes below so that I could
display a message to show the user that the copying is completed successfully like:
Please wait for the copying to complete...
Completed!
Below are the HTML markups
<button id="copyImages" type="button" class="button">Get Images From Camera</button>
<textarea id="output" readonly></textarea>
Here is the Javascript event handling:
copyImages.onclick = function() {
dest = '/copyImages';
writeToOutput(dest);
}
function writeToOutput(dest) {
$.get(dest, null, function(data) {
resultText += "Executed command: "+dest+"\n"
+"Result is as below: \n"+data;
$("#output").val(resultText);
}, "text");
return true;
}
These functions below are for setting up a Node App server using express module to listen to anything the HTML page passes to it. They are run on a different device.
expressServer.listen( expressPort, function() {
console.log('expressServer listening at *:%d', expressPort );
});
// allow CORS on the express server
expressServer.use(function(req, res, next) {
// enable cross original resource sharing to allow html page to access commands
res.header("Access-Control-Allow-Origin", "*");
// return to the console the URL that is being accesssed, leaving for clarity
console.log("\n"+req.url);
next();
});
expressServer.get('/copyImages', function (req, res) {
// user accesses /copyImages and the copyImages function is called
copyImages(function(result) {
res.end(result + "\n");
});
});
Copy images from Theta S Camera to Raspberry Pi and delete those from the cameras
var resultCopyImages = "";
copyImages = function (callback) {
resultCopyImages = "Copying images from both cameras...\n";
for (var i = 0; i < camArray.length; i++) {
copyOneCamImages(i, callback);
}
return (callback(resultCopyImages));
//how to return multiple messages?
}
copyOneCamImages = function (camID, callback) {
d.on('error', function(err){
console.log('There was an error copying the images');
return(callback('There was an error running a function, please make sure all cameras are connected and restart the server'));
})
d.run(function(){
var imageFolder = baseImageFolder + camID;
// if the directory does not exist, make it
if (!fs.existsSync(imageFolder)) {
fs.mkdirSync(imageFolder);
console.log("no 'images' folder found, so a new one has been created!");
}
// initialise total images, approximate time
var totalImages = 0;
var approxTime = 0;
// get the first image and do not include thumbnail
var entryCount = 1;
var includeThumb = false;
var filename;
var fileuri;
// get the total amount of images
camArray[camID].oscClient.listImages(entryCount, includeThumb)
.then(function (res) {
totalImages = res.results.totalEntries;
approxTime = totalImages * 5;
resultCopyImages = '';
resultCopyImages = 'Camera ' + (camID + 1) + ': Copying a total of: ' + totalImages + ' images'
+ '\nTo folder: ' + imageFolder
+ '\nThis process will take approximately: ' + approxTime + ' seconds \n';
console.log(resultCopyImages);
callback(resultCopyImages);
});
// copy a single image, with the same name and put it in images folder
camArray[camID].oscClient.listImages(entryCount, includeThumb)
.then(function (res) {
filename = imageFolder + '/' + res.results.entries[0].name;
fileuri = res.results.entries[0].uri;
imagesLeft = res.results.totalEntries;
// gets the image data
camArray[camID].oscClient.getImage(res.results.entries[0].uri)
.then(function (res) {
var imgData = res;
fs.writeFile(filename, imgData);
camArray[camID].oscClient.delete(fileuri).then(function () {
if (imagesLeft != 0) {
// callback to itself to continue copying if images are left
callback(copyOneCamImages(camID, callback));
//????????????????????????????????????????????????????????????????????????????
//if(imagesLeft==1) return(callback("Finished copying"));
}/* else {
resultCopyImages = "Finshed copying image.\n";
console.log(resultCopyImages);
}
else if
return(callback(resultCopyImages));
}*/
});
});
});
})
}
So far there is no real answer to the question I asked so we have concluded the project and skipped the feature. However, it's just the matter of mastering the REST API and the asynchronous functions in NodeJs. The project is expected to continue for a next version sometime next year.

How to post/upload recorded video as blob in MVC using HTML5/ Javascript at client side and C# code at controller?

I am building a video surveillance application where the requirement is to save recorded video feeds to the server so that they can be served up to the viewer later. Am using MediaRecorder API to do so, and as the continuous stream would end up in a very large file difficult to be posted all at once, i plan to chop the stream blob into multiple chunks and keep posting periodically. Now the recording event is fired with a toggle switch in html page and then Javascript takes over.
Here is the code that i have so far:
HTML:
some code...
<div class="onoffswitch">
<input type="checkbox" name="onoffswitch" class="onoffswitch-checkbox" id="switch1">
<label class="onoffswitch-label" for="switch1" onclick="toggleVideoFeed();">
<span class="onoffswitch-inner"></span>
<span class="onoffswitch-switch"></span>
</label>
</div>
some code...
JavaScript:
var mediaRecorder;
var pushInterval = 6000;
var id, counter = 0;
// var formData;
function toggleVideoFeed() {
var element = document.getElementById("switch1");
element.onchange = (function (onchange) {
return function (evt) {
// reference to event to pass argument properly
evt = evt || event;
// if an existing event already existed then execute it.
if (onchange) {
onchange(evt);
}
if (evt.target.checked) {
startRecord();
} else {
stopRecord();
};
}
})(element.onchange);
}
var dataAvailable = function (e) {
var formData = new FormData();
var fileName = "blob" + counter + ".mp4";
console.log("data size: ", e.data.size);
var encodeData = new Blob([e.data], { type: 'multipart/form-data' });
formData.append("blob", encodeData, fileName);
var request = new XMLHttpRequest();
request.open("POST", "/Device/Upload", false);
request.send(formData);
counter++;
}
function startRecord() {
navigator.getUserMedia = (navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
if (navigator.getUserMedia) {
navigator.getUserMedia(
// constraints
{
video: true,
audio: false
},
// successCallback
function (stream) {
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
mediaRecorder.ondataavailable = dataAvailable;
// setInterval(function () { mediaRecorder.requestData() }, 10000);
},
// errorCallback
function (err) {
console.log("The following error occured: " + err);
}
);
} else {
console.log("getUserMedia not supported");
}
}
function stopRecord() {
mediaRecorder.stop();
}
Controller-C#
[HttpPost]
public JsonResult Upload(HttpPostedFileBase blob)
{
string fileName = blob.FileName;
int i = Request.Files.Count;
blob.SaveAs(#"C:\Users\priya_000\Desktop\Videos\" + fileName);
return Json("success: " + i + fileName);
}
THE PROBLEM:
When i try playing the received .mp4 files as i get them on the server end, i can play just the first file blob0 and the rest although they show similar sizes to the first file (4 mb each) do not contain any video data. Is it possible that the data i receive on the other end is corrupt/ garbled? Or is there something wrong with my code. Please help guys - Have been trying to solve this problem since the last 10 days with no clue how to figure it out.
mp4 files have a header which is put at the beginning of the file. The header contains information used to initialize the decoder and without it the file cannot be played. Try concatenating the files (the first one and the second one) and see if the whole file plays. If yes then the problem is a missing header. If not please send me the two files so I can inspect them and see what exactly is stored in these.
There is no way AFAIK to instruct MediaRecorder to append the header to each blob of video. You have to do it manually.
PS. sorry for the late response.
You can use RecordRTC by Muaz Khan and use the MVC Section to record and save video to the server.

How to Save As using Node-Webkit

Using Node-Webkit, The following page,
https://github.com/rogerwang/node-webkit/wiki/File-dialogs
Describes that you can use
[input type="file" nwsaveas="filename.txt" /]
to open a File Save dialog.
However it does not explain how would you write the data to the filesystem.
I expected/imagined something simple like,
var directory = FileOpen();
fs.writeFile(directory+"myfile.png", buffer);
Is there any explanation for this?
You are right, after you trigger the Save As dialog, you will be prompted a dialog, specify the name, and you could receive the file path by doing this.
Sample Code (using jQuery):
$("#save").trigger("click");
$("#save").on("change", function () {
var filePath = $(this).val();
if (filePath !== "") {
var fs = require("fs");
fs.writeFile(filePath, "Hello World", function (err) {
if (err)
alert("Unable to save file");
else
console.log("saved. ");
});
}
else {
// User cancelled
}
});

How to upload base64 image resource with dropzone?

I'm trying to upload generated client side documents (images for the moment) with Dropzone.js.
// .../init.js
var myDropzone = new Dropzone("form.dropzone", {
autoProcessQueue: true
});
Once the client have finished his job, he just have to click a save button which call the save function :
// .../save.js
function save(myDocument) {
var file = {
name: 'Test',
src: myDocument,
};
console.log(myDocument);
myDropzone.addFile(file);
}
The console.log() correctly return me the content of my document
data:image/png;base64,iVBORw0KGgoAAAANS...
At this point, we can see the progress bar uploading the document in the drop zone but the upload failed.
Here is my (standart dropzone) HTML form :
<form action="/upload" enctype="multipart/form-data" method="post" class="dropzone">
<div class="dz-default dz-message"><span>Drop files here to upload</span></div>
<div class="fallback">
<input name="file" type="file" />
</div>
</form>
I got a Symfony2 controller who receive the post request.
// Get request
$request = $this->get('request');
// Get files
$files = $request->files;
// Upload
$do = $service->upload($files);
Uploading from the dropzone (by drag and drop or click) is working and the uploads are successfull but using the myDropzone.addFile() function return me an empty object in my controller :
var_dump($files);
return
object(Symfony\Component\HttpFoundation\FileBag)#11 (1) {
["parameters":protected]=>
array(0) {
}
}
I think i don't setup correctly my var file in the save function.
I tryied to create JS image (var img = new Image() ...) but without any success.
Thanks for your help !
Finally i found a working solution without creating canvas :
function dataURItoBlob(dataURI) {
'use strict'
var byteString,
mimestring
if(dataURI.split(',')[0].indexOf('base64') !== -1 ) {
byteString = atob(dataURI.split(',')[1])
} else {
byteString = decodeURI(dataURI.split(',')[1])
}
mimestring = dataURI.split(',')[0].split(':')[1].split(';')[0]
var content = new Array();
for (var i = 0; i < byteString.length; i++) {
content[i] = byteString.charCodeAt(i)
}
return new Blob([new Uint8Array(content)], {type: mimestring});
}
And the save function :
function save(dataURI) {
var blob = dataURItoBlob(dataURI);
myDropzone.addFile(blob);
}
The file appears correctly in dropzone and is successfully uploaded.
I still have to work on the filename (my document is named "blob").
The dataURItoBlob function have been found here : Convert Data URI to File then append to FormData
[EDIT] : I finally wrote the function in dropzone to do this job. You can check it here : https://github.com/CasperArGh/dropzone
And you can use it like this :
var dataURI = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAmAAAAKwCAYAAA...';
myDropzone.addBlob(dataURI, 'test.png');
I can't comment currently and wanted to send this to you.
I know you found your answer, but I had some trouble using your Git code and reshaped it a little for my needs, but I am about 100% positive this will work for EVERY possible need to add a file or a blob or anything and be able to apply a name to it.
Dropzone.prototype.addFileName = function(file, name) {
file.name = name;
file.upload = {
progress: 0,
total: file.size,
bytesSent: 0
};
this.files.push(file);
file.status = Dropzone.ADDED;
this.emit("addedfile", file);
this._enqueueThumbnail(file);
return this.accept(file, (function(_this) {
return function(error) {
if (error) {
file.accepted = false;
_this._errorProcessing([file], error);
} else {
file.accepted = true;
if (_this.options.autoQueue) {
_this.enqueueFile(file);
}
}
return _this._updateMaxFilesReachedClass();
};
})(this));
};
If this is added to dropzone.js (I did just below the line with Dropzone.prototype.addFile = function(file) { potentially line 1110.
Works like a charm and used just the same as any other. myDropzone.addFileName(file,name)!
Hopefully someone finds this useful and doesn't need to recreate it!
1) You say that: "Once the client have finished his job, he just have to click a save button which call the save function:"
This implies that you set autoProcessQueue: false and intercept the button click, to execute the saveFile() function.
$("#submitButton").click(function(e) {
// let the event not bubble up
e.preventDefault();
e.stopPropagation();
// process the uploads
myDropzone.processQueue();
});
2) check form action
Check that your form action="/upload" is routed correctly to your SF controller & action.
3) Example Code
You may find a full example over at the official Wiki
4) Ok, thanks to your comments, i understood the question better:
"How can i save my base64 image resource with dropzone?"
You need to embedd the image content as value
// base64 data
var dataURL = canvas.toDataURL();
// insert the data into the form
document.getElementById('image').value = canvas.toDataURL('image/png');
//or jQ: $('#img').val(canvas.toDataURL("image/png"));
// trigger submit of the form
document.forms["form1"].submit();
You might run into trouble doing this and might need to set the "origin-clean" flag to "true". see http://www.whatwg.org/specs/web-apps/current-work/multipage/the-canvas-element.html#security-with-canvas-elements
how to save html5 canvas to server

Categories

Resources