NodeJs - How to convert chunks of data to new Buffer? - javascript

In NodeJS, I have chunks of data from a file upload that saved the file in parts. I'd like to convert this by doing new Buffer() then upload it to Amazon s3
This would work if there was only one chunk but when there are multiple, I cannot figure out how to do new Buffer()
Currently my solution is write the chunks of data into a real file on my own server, then send the PATH of that file to Amazon s3.
How can I skip the file creation step and actually send the buffer the Amazon s3?

i guess you need to use streaming-s3
var streamingS3 = require('streaming-s3');
var uploadFile = function (fileReadStream, awsHeader, cb) {
//set options for the streaming module
var options = {
concurrentParts: 2,
waitTime: 20000,
retries: 2,
maxPartSize: 10 * 1024 * 1024
};
//call stream function to upload the file to s3
var uploader = new streamingS3(fileReadStream, aws.accessKey, aws.secretKey, awsHeader, options);
//start uploading
uploader.begin();// important if callback not provided.
// handle these functions
uploader.on('data', function (bytesRead) {
console.log(bytesRead, ' bytes read.');
});
uploader.on('part', function (number) {
console.log('Part ', number, ' uploaded.');
});
// All parts uploaded, but upload not yet acknowledged.
uploader.on('uploaded', function (stats) {
console.log('Upload stats: ', stats);
});
uploader.on('finished', function (response, stats) {
console.log(response);
cb(null, response);
});
uploader.on('error', function (err) {
console.log('Upload error: ', err);
cb(err);
});
};

Related

Upload file to google drive after http get request

I have two functions in separate files to split up the workflow.
const download = function(url){
const file = fs.createWriteStream("./test.png");
const request = https.get(url, function(response) {
response.pipe(file);
});
}
This function in my fileHelper.js is supposed to take a URL with an image in it and then save it locally to test.png
function uploadFile(filePath) {
fs.readFile('credentials.json', (err, content) => {
if (err) return console.log('Error loading client secret file:', err);
// Authorize a client with credentials, then call the Google Drive API.
authorize(JSON.parse(content), function (auth) {
const drive = google.drive({version: 'v3', auth});
const fileMetadata = {
'name': 'testphoto.png'
};
const media = {
mimeType: 'image/png',
body: fs.createReadStream(filePath)
};
drive.files.create({
resource: fileMetadata,
media: media,
fields: 'id'
}, (err, file) => {
if (err) {
// Handle error
console.error(err);
} else {
console.log('File Id: ', file.id);
}
});
});
});
}
This function in my googleDriveHelper.js is supposed to take the filePath of call and then upload that stream into my google drive. These two functions work on their own but it seems that the https.get works asynchronously and if I try to call the googleDriveHelper.uploadFile(filePath) function after the download, it doesn't have time to get the full file to upload so instead a blank file will be uploaded to my drive.
I want to find a way so that when the fileHelper.download(url) is called, it automatically uploads into my drive.
I also don't know if there is a way to create a readStream directly from the download function to the upload function, so I can avoid having to save the file locally to upload it.
I believe your goal as follows.
You want to upload a file retrieving from an URL to Google Drive.
When you download the file from the URL, you want to upload it to Google Drive without creating the file.
You want to achieve this using googleapis with Node.js.
You have already been able to upload a file using Drive API.
For this, how about this answer?
Modification points:
At download function, the retrieved buffer is converted to the stream type, and the stream data is returned.
At uploadFile function, the retrieved stream data is used for uploading.
When the file ID is retrieved from the response value of Drive API, please use file.data.id instead of file.id.
By above modification, the file downloaded from the URL can be uploaded to Google Drive without creating a file.
Modified script:
When your script is modified, please modify as follows.
download()
const download = function (url) {
return new Promise(function (resolve, reject) {
request(
{
method: "GET",
url: url,
encoding: null,
},
(err, res, body) => {
if (err && res.statusCode != 200) {
reject(err);
return;
}
const stream = require("stream");
const bs = new stream.PassThrough();
bs.end(body);
resolve(bs);
}
);
});
};
uploadFile()
function uploadFile(data) { // <--- Modified
fs.readFile("drive_credentials.json", (err, content) => {
if (err) return console.log("Error loading client secret file:", err);
authorize(JSON.parse(content), function (auth) {
const drive = google.drive({ version: "v3", auth });
const fileMetadata = {
name: "testphoto.png",
};
const media = {
mimeType: "image/png",
body: data, // <--- Modified
};
drive.files.create(
{
resource: fileMetadata,
media: media,
fields: "id",
},
(err, file) => {
if (err) {
console.error(err);
} else {
console.log("File Id: ", file.data.id); // <--- Modified
}
}
);
});
});
}
For testing
For example, when above scripts are tested, how about the following script?
async function run() {
const url = "###";
const data = await fileHelper.download(url);
googleDriveHelper.uploadFile(data);
}
References:
Class: stream.PassThrough
google-api-nodejs-client

Read Stream not doing firing / catching errors

I am trying to create a read stream to use Cloudinary's upload stream function, I am also using resumable.js to chunk the initial file, while the create read stream is working perfectly fine (as the whole file gets written perfectly fine.) the read stream / cloudinary upload function seems to not even be firing and failing silently.
router.post("/upload", (req, res, next) => {
console.log("the params are.. ", req.body);
resumable.post(req, function(
status,
filename,
original_filename,
identifier
) {
if (status === "done") {
let timestamp = new Date().getTime().toString();
//stich the chunks
var s = fs.createWriteStream(timestamp + filename);
resumable.write(identifier, s);
var upload_stream = cloudinary.uploader.upload_stream(
{ tags: "basic_sample" },
function(err, image) {
console.log();
console.log("** Stream Upload");
if (err) {
console.warn(err);
}
console.log("* Same image, uploaded via stream");
console.log("* " + image.public_id);
console.log("* " + image.url);
waitForAllUploads(timestamp + filename, err, image);
}
);
fs.createReadStream(timestamp + filename)
.pipe(upload_stream)
.on("error", err => {
console.log(err);
});
s.on("finish", function() {
// Stream upload
console.log("ive finished...");
// delete chunks
setTimeout(() => {
resumable.clean(identifier);
}, 1000);
});
}
res.send(status);
});
});
Here are the resources to what I am using:
https://github.com/cloudinary/cloudinary_npm/blob/master/samples/basic/basic.js
https://github.com/mrawdon/resumable-node
fs.createReadStream(timestamp + filename) accepts the file path but looks like you are passing the timestamp as well. Also, waitForAllUploads is not defined. You can try the following code just using Node and Cloudinary to test it out.
var upload_stream= cloudinary.uploader.upload_stream({tags: 'basic_sample'},function(err,image) {
console.log("** Stream Upload");
if (err){ console.warn(err);}
console.log("* "+image.url)
});
var file_reader = fs.createReadStream('<file path>').pipe(upload_stream);

Save images on Google Storage on javascript

My goal is to store an image on S3 or Google Storage and save the link to the database. How can I do it? There is some free solution?
Can someone link me a code sample for to do that?
I never used Google Storage or S3 before.
I pick an image like that:
handleImage = e => {
e.preventDefault();
let reader = new FileReader();
let file = e.target.files[0];
reader.onloadend = () => {
this.setState({
file: file,
image: reader.result
});
}
reader.readAsDataURL(file);
this.setState({ imgchange: true })
}
And then send it to server:
this.props.editUser({
img: this.state.image,
})
My server is written with node.js
I've done this exact thing before, though I'm not claiming I had the most straightforward way.
1) sent the image from the client side to the server as a base64 image, took that image, 2) created a buffer, and 3) then used imageMagick to to stream it to my google cloud bucket. Lastly, 4) stored that link to the google cloud bucket on the object in your database as the imgLink or what have you so it can show in your front-end application.
Some important things to require at the top
var gm = require('gm').subClass({ imageMagick: true });
var gcs = require('#google-cloud/storage')({
keyFilename: sails.config.gcloud.keyFileName,
projectId: sails.config.gcloud.projectId
});
var bucket = gcs.bucket(sails.config.gcloud.bucketname);
Step 1 - Sending base64 image to backend service and decoding it
imageControllerFirstHitAtEndpoint: function (req, res) {
PictureService.uploadPictureCDNReturnLink(req.body.picture, function (err, imageLink) {
if (err) {
// Handle error...
}
// Success, save that imageLink to whatever db object you want
}
}
Step 2 and 3 - Create buffer with base64 data and Stream it to Google Cloud Bucket
// Step 2 create buffer with base64 data
uploadPictureCDNReturnLink: function(picDataInBase64, cb) {
var imageBuffer;
imageBuffer = PictureService.bufferFromBase64(picDataInBase64);
var file = bucket.file('cool-file-name');
// Step 3 stream it to where it needs to go
gm(imageBuffer)
.stream()
.pipe(file.createWriteStream())
.on('finish', function() {
file.getMetadata(function(err, metadata) {
if (err) {
console.log("Error getting metadata from google cloud", err);
return cb(err);
}
cb(null, metadata.mediaLink);
});
}).on('error', function(err) {
console.log("Got an error uploading to Cloud Storage:", err);
cb(err);
});
}
Step 4 - Save that imageLink to wherever you want
Won't spell this out totally for you, not that hard. Something kinda like:
Organization.findOne(req.body.orgID).exec(function (err, organization) {
if(!organization) {
return res.json(400, { error: "No organization with id: " + req.param('id') });
}
if (err) {
return res.json(400, err);
}
organization.pictureLink = imageLink;
organization.save(function (err) {
return res.json(organization);
});
});
Hope that helps! Should give you an idea of one way to do it.
P.S. A lot of that stuff might be using Sails-like NodeJS conventions, Sails is my backend framework of choice.

Ionic Upload Video to S3

I'm attempting to use this to upload videos
http://market.ionic.io/plugins/image-upload
I know the file paths of the videos (they're on cameras I can access via HTTP), I'm trying to work out how to use something like this to get these videos onto S3.
I get that I can't pass the file path to this function, and it needs to be an actual file as per the directive.
How do I copy a large video file into an Javascript variable to upload as a file?
var imageUploader = new ImageUploader();
scene.videoFiles.forEach(function(videoFile) {
imageUploader.push(videoFile, function (data) {
console.log('File uploaded Successfully', videoFile, data);
$scope.uploadUri = data.url;
$scope.$digest();
});
})};
you can use $cordovaFile and aws java script-sdk for upload file (video ) to aws-s3 bucket. This is the sample code for upload video to s3 bucket.
var uploadFile = function (file_name,file_path) {
var deferred = $q.defer();
$cordovaFile.readAsArrayBuffer(file_name,file_path)
.then(function (success) {
AWS.config.region = 'eu-west-1';
AWS.config.update({
accessKeyId: 'ACCESS-KEY',
secretAccessKey:'SECRET-KEY'
});
var bucket = new AWS.S3({
params: {
Bucket: 'Bucket-NAME'
}
});
var params = {
Key: "uploads/"+file_name,
Body: success
};
bucket.upload(params).on('httpUploadProgress',function(evt){
$scope.uploading = true;
$scope.progress = parseInt((evt.loaded*100)/ evt.total);
console.log("Uploaded :: " + $scope.progress);
$scope.$apply();
}).send(function (err, data) {
$scope.uploading = false;
$scope.$apply();
deferred.resolve(data);
});
$scope.i++;
}, function (error) {
deferred.reject(error);
});
return deferred.promise;
};

Issues with streams and S3 upload

I am having issues with a zero byte stream. I am resizing an image and uploading it as a stream to S3. If I pipe output to response it displays correctly.
// Fetch remote file
var request = http.get('mybigfile.jpg', function(response) {
// Setup IM convert
var convert = spawn('convert', ['-', '-strip', '-thumbnail', 600, '-']);
// Pipe file stream to it
response.pipe(convert.stdin);
// Pipe result to browser - works fine
//convert.stdout.pipe(res);
// S3 requires headers
var headers = {
'content-type': response.headers['content-type'],
'x-amz-acl': 'public-read'
};
// Upload to S3
var aws = aws2js.load('s3', aws.key, aws.secret);
aws.setBucket(aws.bucket);
aws.putStream('thumb.jpg', convert.stdout, false, headers, function(err) {
if (err) {
return console.error('Error storing:', err.toString());
} else {
// No errors - this shows - but file is 0kb
console.log(path + ' uploaded to S3');
}
}
I see notes about streams not working with S3 due to content length. I am trying buffers but no success with that so far.
Well no go on streams - I guess I can use pause-stream or multipart to technically achieve this but otherwise I don't think it's possible. I ended up using a buffer.
...
// Pipe file stream to it
response.pipe(convert.stdin);
// Save to buffer
var bufs = [] ;
convert.stdout.on('data', function(chunk) {
bufs.push(chunk);
});
convert.stdout.on('end', function() {
var buffer = Buffer.concat(bufs);
// S3 requires headers
...
aws.putBuffer(path, buffer, false, headers, function(err) {
...

Categories

Resources