Change video resolution with JS - javascript

I'm developing a video storage service for users and I need that large videos (v.g. 4K) can be compressed to 1080p before saving them. Is there a JS library (browser or Node) that helps with this task? Maybe a webservice?
I also accept language suggestions.

When it comes to downscaling video, the most accessible option is ffmpeg.
There is a package that makes using ffmpeg in node.js easier: https://www.npmjs.com/package/fluent-ffmpeg
For example, downscaling a video to 1080p and 720p:
var ffmpeg = require('fluent-ffmpeg');
function baseName(str) {
var base = new String(str).substring(str.lastIndexOf('/') + 1);
if(base.lastIndexOf(".") != -1) {
base = base.substring(0, base.lastIndexOf("."));
}
return base;
}
var args = process.argv.slice(2);
args.forEach(function (val, index, array) {
var filename = val;
var basename = baseName(filename);
console.log(index + ': Input File ... ' + filename);
ffmpeg(filename)
.output(basename + '-1280x720.mp4')
.videoCodec('libx264')
.size('1280x720')
.output(basename + '-1920x1080.mp4')
.videoCodec('libx264')
.size('1920x1080')
.on('error', function(err) {
console.log('An error occurred: ' + err.message);
})
.on('progress', function(progress) {
console.log('... frames: ' + progress.frames);
})
.on('end', function() {
console.log('Finished processing');
})
.run();
});
(source: https://gist.github.com/dkarchmer/635496ff9280011b3eef)
You don't need any node packages to run ffmpeg, you could make use of the child_process API in node.js.
The ffmpeg package has to be installed on the server that will be running your application.

.format("h264") √
.format("mp4") ×
or add
.outputOptions(['-movflags isml+frag_keyframe'])
.format("mp4")

Related

Adding local file to zip

I'm trying to add a local file to the zip so when the user downloads and unzips, he'll get a folder with a .dll and a config.json file:
var zip = new JSZip();
options.forEach(option => {
zip.folder("REST." + option + ".Connector")
.file("config.json", "//config for " + option)
// I want this file to be from a local directory within my project
// eg. {dir}\custom_rest_connector_repository\src\dlls\Connectors.RestConnector.dll
.file('../dlls/Connectors.RestConnector.dll', null);
});
zip.generateAsync({type:"blob"}).then(function (blob) {
FileSaver.saveAs(blob, "REST_Connectors_"
+ dateStr
+ ".zip");
});
I read through the JSZip documentation but couldn't find an example or any information whether this can actually be done.
If it can't, is there any other more robust library that does support this operation?
Found the answer to my own question using the jszip-utils
JSZipUtils.getBinaryContent("../dlls/Connectors.RestConnector.dll", function (err, data) {
if(err) {
throw err; // or handle the error
}
zip.file("../dlls/Connectors.RestConnector.dll", data, {binary:true});
});

Writing an image to file, received over an HTTP request in Node

I'm certain I'm missing something obvious, but the gist of the problem is I'm receiving a PNG from a Mapbox call with the intent of writing it to the file system and serving it to the client. I've successfully relayed the call, received a response of raw data and written a file. The problem is that my file ends up truncated no matter what path I take, and I've exhausted the answers I've found skirting the subject. I've dumped the raw response to the log, and it's robust, but any file I make tends to be about a chunk's worth of unreadable data.
Here's the code I've got at present for the file making. I tried this buffer move as a last ditch after several failed and comparably fruitless iterations. Any help would be greatly appreciated.
module.exports = function(req, res, cb) {
var cartography = function() {
return https.get({
hostname: 'api.mapbox.com',
path: '/v4/mapbox.wheatpaste/' + req.body[0] + ',' + req.body[1] + ',6/750x350.png?access_token=' + process.env.MAPBOX_API
}, function(res) {
var body = '';
res.on('data', function(chunk) {
body += chunk;
});
res.on('end', function() {
var mapPath = 'map' + req.body[0] + req.body[1] + '.png';
var map = new Buffer(body, 'base64');
fs.writeFile(__dirname + '/client/images/maps/' + mapPath, map, 'base64', function(err) {
if (err) throw err;
cb(mapPath);
})
})
});
};
cartography();
};
It is possible to rewrite your code in more compact subroutine:
const fs = require('fs');
const https = require('https');
https.get(url, (response)=> { //request itself
if(response) {
let imageName = 'image.png'; // for this purpose I usually use crypto
response.pipe( //pipe response to a write stream (file)
fs.createWriteStream( //create write stream
'./public/' + imageName //create a file with name image.png
)
);
return imageName; //if public folder is set as default in app.js
} else {
return false;
}
})
You could get original name and extension from url, but it safer to generate a new name with crypto and get file extension like i said from url or with read-chunk and file-type modules.

Possible to open and write to local files using javascript?

At work I have to repeat this same process multiple times:
Open a certain Dreamweaver file.
Look for all <p> tags and replace then with <h1> tags.
Look for all </p> and replace with </h1>.
Look for the string 'Welcome' and replace with 'goodbye'.
Look for '0:01:00' and replace with '01:00'.
Copy everything in that file.
Create a new Dreamweaver file and paste everything in the new file.
Save the new file in a given directory and call it a certain name, which can be provided as a variable.
I don't need to run the JavaScript from a browser. It can be a JavaScript file which I just double click on the desktop.
Is it possible for me to do this with JavaScript / jQuery?
There are many other programming languages that you could accomplish this task with but if you really want to use Javascript then you could do the following:
var fs = require('fs');
if(process.argv.length < 4) {
console.log('Usage: node replace.js fromFilePath toFilePath');
return;
}
from = process.argv[2];
to = process.argv[3];
fs.readFile(from, { encoding: 'utf-8' }, function (err, data) {
if (err) throw err;
console.log('successfully opened file ' + from);
var rules = {
'<p>': '<h1>',
'</p>': '</h1>',
'Welcome': 'goodbye',
'0:01:00': '01:00'
};
for(var index in rules) {
console.log('Replacing ' + index + ' with ' + rules[index] + '...');
data = data.replace(new RegExp(index, 'gi'), rules[index]);
console.log('Done');
}
console.log("Result");
console.log(data);
console.log("Writing data to " + to);
fs.writeFile(to, data, function (err) {
if (err) throw err;
console.log('It\'s saved!');
});
});
INSTRUCTIONS
Download node.js from here
Install it
Create a file in C:\replace.js (Win) or ~/replace.js (Mac OS)
Put the code from above in replace.js
Open cmd (Ctrl+R on Win) or Terminal (on Mac OS)
Type node C:\replace.js <fileToReadFrom> <fileToSaveTo> on Win or node ~/replace.js <fileToReadFrom> <fileToSaveTo> on Mac OS
Done

node js azure SDK getBlobToStream uses lots of memory

I am writing a backup script that simply downloads all the blobs in all the blob containers of a specific Azure account.
The script uses async.js to make sure only so much threads can run at the same time so it doesn't overload the server. When I run this script it works fine, but when it hits large files it runs out of memory. I'm guessing the download runs faster than the disk can write, and it eventually fills up the in-memory buffer so badly that I run out of memory entirely, but debugging the exact cause has been impossible so far.
The specific function which appears to use a lot of memory is called as follows:
blobService.getBlobToStream(
containerName,
blob.name,
fs.createWriteStream(fullPath),
function(error) {
if(error){ //Something went wrong, write it to the console but finish the queue item and continue.
console.log("Failed writing " + blob.name + " (" + error + ")");
callback();
}
else if(!error) { //Write the last modified date and finish the queue item silently
fs.writeFile(fullPath + ".date", blobLastModified, function(err)
{ if(err) console.log("Couldn't write .date file: " + err); });
callback();
}
});
Even a single 700MB download will easily fill up 1GB of memory on my side.
Is there any way around this? Am I missing a parameter which magically prevents the Azure SDK from buffering everything and the kitchen sink?
Full code:
#!/usr/bin/env node
//Requires
var azure = require('azure');
var fs = require('fs');
var mkdirp = require('mkdirp');
var path = require('path');
var async = require('async');
var maxconcurrency = 1; //Max amount of simultaneous running threads of getBlobsAndSaveThem() running through async.js.
var blobService = azure.createBlobService();
backupPrefix='/backups/azurebackup/' //Always end with a '/'!!
//Main flow of the script is near the bottom of the file.
var containerProcessingQueue = async.queue(
function getBlobsAndSaveThem(containerName) {
console.log(containerName); //DEBUG
blobService.listBlobs(containerName,
function(error, blobs) {
if(!error){
var blobProcessingQueue =
async.queue(function(index,callback) {
var blob = blobs[index];
console.log(blob); //DEBUG
var fullPath = backupPrefix + containerName + '/' + blob.name;
var blobLastModified = new Date(blob.properties['last-modified']);
//Only create if the directoy doesn't exist, since mkdirp fails if the directory exists.
if(!fs.existsSync(path.dirname(fullPath))){ //And do it sync, because otherwise it'll check 99999 times if the directory exists simultaneously, doesn't find it, then fails to create it 99998 times.
mkdirp.sync(path.dirname(fullPath), function(err) { console.log('Failed to create directory ' + path.dirname(fullPath) + " ("+ err + ")"); });
}
if(fs.existsSync(fullPath + ".date")){
if(blobLastModified == fs.readFileSync(fullPath + ".date").toString()) {
callback();
return; //If the file is unmodified, return. No this won't exit the program, because it's called within a function definition (async.queue(function ...))
}
}
blobService.getBlobToStream(
containerName,
blob.name,
fs.createWriteStream(fullPath),
function(error) {
if(error){ //Something went wrong, write it to the console but finish the queue item and continue.
console.log("Failed writing " + blob.name + " (" + error + ")");
callback();
}
else if(!error) { //Write the last modified date and finish the queue item silently
fs.writeFile(fullPath + ".date", blobLastModified, function(err)
{ if(err) console.log("Couldn't write .date file: " + err); });
callback();
}
});
},maxconcurrency);
for(var blobindex in blobs){
blobProcessingQueue.push(blobindex);
} //Push new items to the queue for processing
}
else {
console.log("An error occurred listing the blobs: " + error);
}
});
},1);
blobService.listContainers(function(err, result){
for(var i=0;i<result.length;i++) {
containerProcessingQueue.push(result[i].name);
}
});
For all those now curious the variables for the start and end have changed. They are now just rangeStart and rangeEnd.
Here is the azure node documentation for more help
http://dl.windowsazure.com/nodestoragedocs/BlobService.html
One thing that you could possibly do is read only a chunk of data into stream instead of whole blob data, append that to the file and read next chunk. Blob Storage service supports that. If you look at the source code for getBlobToStream (https://github.com/WindowsAzure/azure-sdk-for-node/blob/master/lib/services/blob/blobservice.js), you can specify from/to bytes in the options - rangeStartHeader and rangeEndHeader. See if that helps.
I have hacked some code which does just that (as you can see from my code, my knowledge about node.js is quite primitive :)). [Please use this code just to get an idea about how you can do chunked download as I think it still has some glitches]
var azure = require('azure');
var fs = require('fs');
var blobService = azure.createBlobService("account", "accountkey");
var containerName = "container name";
var blobName = "blob name";
var blobSize;
var chunkSize = 1024 * 512;//chunk size -- we'll read 512 KB at a time.
var startPos = 0;
var fullPath = "D:\\node\\";
var blobProperties = blobService.getBlobProperties(containerName, blobName, null, function (error, blob) {
if (error) {
throw error;
}
else {
blobSize = blob.contentLength;
fullPath = fullPath + blobName;
console.log(fullPath);
doDownload();
}
}
);
function doDownload() {
var stream = fs.createWriteStream(fullPath, {flags: 'a'});
var endPos = startPos + chunkSize;
if (endPos > blobSize) {
endPos = blobSize;
}
console.log("Downloading " + (endPos - startPos) + " bytes starting from " + startPos + " marker.");
blobService.getBlobToStream("test", blobName, stream,
{ "rangeStartHeader": startPos, "rangeEndHeader": endPos-1 }, function(error) {
if (error) {
throw error;
}
else if (!error) {
startPos = endPos;
if (startPos <= blobSize - 1) {
doDownload();
}
}
});
}

Node.js copy remote file to server

Right now I'm using this script in PHP. I pass it the image and size (large/medium/small) and if it's on my server it returns the link, otherwise it copies it from a remote server then returns the local link.
function getImage ($img, $size) {
if (#filesize("./images/".$size."/".$img.".jpg")) {
return './images/'.$size.'/'.$img.'.jpg';
} else {
copy('http://www.othersite.com/images/'.$size.'/'.$img.'.jpg', './images/'.$size.'/'.$img.'.jpg');
return './images/'.$size.'/'.$img.'.jpg';
}
}
It works fine, but I'm trying to do the same thing in Node.js and I can't seem to figure it out. The filesystem seems to be unable to interact with any remote servers so I'm wondering if I'm just messing something up, or if it can't be done natively and a module will be required.
Anyone know of a way in Node.js?
You should check out http.Client and http.ClientResponse. Using those you can make a request to the remote server and write out the response to a local file using fs.WriteStream.
Something like this:
var http = require('http');
var fs = require('fs');
var google = http.createClient(80, 'www.google.com');
var request = google.request('GET', '/',
{'host': 'www.google.com'});
request.end();
out = fs.createWriteStream('out');
request.on('response', function (response) {
response.setEncoding('utf8');
response.on('data', function (chunk) {
out.write(chunk);
});
});
I haven't tested that, and I'm not sure it'll work out of the box. But I hope it'll guide you to what you need.
To give a more updated version (as the most recent answer is 4 years old, and http.createClient is now deprecated), here is a solution using the request method:
var fs = require('fs');
var request = require('request');
function getImage (img, size, filesize) {
var imgPath = size + '/' + img + '.jpg';
if (filesize) {
return './images/' + imgPath;
} else {
request('http://www.othersite.com/images/' + imgPath).pipe(fs.createWriteStream('./images/' + imgPath))
return './images/' + imgPath;
}
}
If you can't use remote user's password for some reasons and need to use the identity key (RSA) for authentication, then programmatically executing the scp with child_process is good to go
const { exec } = require('child_process');
exec(`scp -i /path/to/key username#example.com:/remote/path/to/file /local/path`,
(error, stdout, stderr) => {
if (error) {
console.log(`There was an error ${error}`);
}
console.log(`The stdout is ${stdout}`);
console.log(`The stderr is ${stderr}`);
});

Categories

Resources