Read Props of Promise of Stream in nodejs - javascript

basically what I want to achieve is check in a middleware whether an uploaded file has the correct image type (png for example). This is what I have come up with till now:
export const fileCheckMiddleware = (req, res, next) => {
const acceptedImageTypes = ["image/gif", "image/jpeg", "image/png"];
const oldWrite = res.write;
const oldEnd = res.end;
const chunks = [];
res.write = (...restArgs) => {
chunks.push(new Buffer(restArgs[0]));
oldWrite.apply(res, restArgs);
};
res.end = async (...restArgs) => {
if (restArgs[0]) {
chunks.push(new Buffer(restArgs[0]));
}
const body = Buffer.concat(chunks).toString("utf8");
try {
let parsedBody = {};
try {
parsedBody = JSON.parse(body);
} catch (err) {
parsedBody = { data: { unparsedBody: body } };
}
const { variables } = req.body;
console.log("\x1b[1m%s\x1b[0m", "LOG variables", variables.file);
if (variables.file) {
console.log("\x1b[1m%s\x1b[0m", "LOG type", typeof variables.file);
}
} catch (err) {}
oldEnd.apply(res, restArgs);
};
next();
};
The logged type of variables.file is an object. And the result of the console.log is this:
LOG variables Promise {
{ filename: 'trump.jpeg',
mimetype: 'image/jpeg',
encoding: '7bit',
createReadStream: [Function: createReadStream] } }
So how can I access the mimetype here? I tried to map over the keys, variables.file["Promise"],...

Promise is not a key of variables.file, it's the type of variables.file. That means your code starts executing as soon as the HTTP request starts, and the file is received asynchronously, so you have to do something like:
variables.file.then(file => {
// Do whatever you want with the file
next();
});
Or declare the surrounding function as async and do this:
const file = await variables.file;
// Do whatever you want with the file
next();

Related

Upload byte array from axios to Node server

Background
Javascript library for Microsoft Office add-ins allows you to get raw content of the DOCX file through getFileAsync() api, which returns a slice of up to 4MB in one go. You keep calling the function using a sliding window approach till you have reed entire content. I need to upload these slices to the server and the join them back to recreate the original DOCX file.
My attempt
I'm using axios on the client-side and busboy-based express-chunked-file-upload middleware on my node server. As I call getFileAsync recursively, I get a raw array of bytes that I then convert to a Blob and append to FormData before posting it to the node server. The entire thing works and I get the slice on the server. However, the chunk that gets written to the disk on the server is much larger than the blob I uploaded, normally of the order of 3 times, so it is obviously not getting what I sent.
My suspicion is that this may have to do with stream encoding, but the node middleware does not expose any options to set encoding.
Here is the current state of code:
Client-side
public sendActiveDocument(uploadAs: string, sliceSize: number): Promise<boolean> {
return new Promise<boolean>((resolve) => {
Office.context.document.getFileAsync(Office.FileType.Compressed,
{ sliceSize: sliceSize },
async (result) => {
if (result.status == Office.AsyncResultStatus.Succeeded) {
// Get the File object from the result.
const myFile = result.value;
const state = {
file: myFile,
filename: uploadAs,
counter: 0,
sliceCount: myFile.sliceCount,
chunkSize: sliceSize
} as getFileState;
console.log("Getting file of " + myFile.size + " bytes");
const hash = makeId(12)
this.getSlice(state, hash).then(resolve(true))
} else {
resolve(false)
}
})
})
}
private async getSlice(state: getFileState, fileHash: string): Promise<boolean> {
const result = await this.getSliceAsyncPromise(state.file, state.counter)
if (result.status == Office.AsyncResultStatus.Succeeded) {
const data = result.value.data;
if (data) {
const formData = new FormData();
formData.append("file", new Blob([data]), state.filename);
const boundary = makeId(12);
const start = state.counter * state.chunkSize
const end = (state.counter + 1) * state.chunkSize
const total = state.file.size
return await Axios.post('/upload', formData, {
headers: {
"Content-Type": `multipart/form-data; boundary=${boundary}`,
"file-chunk-id": fileHash,
"file-chunk-size": state.chunkSize,
"Content-Range": 'bytes ' + start + '-' + end + '/' + total,
},
}).then(async res => {
if (res.status === 200) {
state.counter++;
if (state.counter < state.sliceCount) {
return await this.getSlice(state, fileHash);
}
else {
this.closeFile(state);
return true
}
}
else {
return false
}
}).catch(err => {
console.log(err)
this.closeFile(state)
return false
})
} else {
return false
}
}
else {
console.log(result.status);
return false
}
}
private getSliceAsyncPromise(file: Office.File, sliceNumber: number): Promise<Office.AsyncResult<Office.Slice>> {
return new Promise(function (resolve) {
file.getSliceAsync(sliceNumber, result => resolve(result))
})
}
Server-side
This code is totally from the npm package (link above), so I'm not supposed to change anything in here, but still for reference:
makeMiddleware = () => {
return (req, res, next) => {
const busboy = new Busboy({ headers: req.headers });
busboy.on('file', (fieldName, file, filename, _0, _1) => {
if (this.fileField !== fieldName) { // Current field is not handled.
return next();
}
const chunkSize = req.headers[this.chunkSizeHeader] || 500000; // Default: 500Kb.
const chunkId = req.headers[this.chunkIdHeader] || 'unique-file-id'; // If not specified, will reuse same chunk id.
// NOTE: Using the same chunk id for multiple file uploads in parallel will corrupt the result.
const contentRangeHeader = req.headers['content-range'];
let contentRange;
const errorMessage = util.format(
'Invalid Content-Range header: %s', contentRangeHeader
);
try {
contentRange = parse(contentRangeHeader);
} catch (err) {
return next(new Error(errorMessage));
}
if (!contentRange) {
return next(new Error(errorMessage));
}
const part = contentRange.start / chunkSize;
const partFilename = util.format('%i.part', part);
const tmpDir = util.format('/tmp/%s', chunkId);
this._makeSureDirExists(tmpDir);
const partPath = path.join(tmpDir, partFilename);
const writableStream = fs.createWriteStream(partPath);
file.pipe(writableStream);
file.on('end', () => {
req.filePart = part;
if (this._isLastPart(contentRange)) {
req.isLastPart = true;
this._buildOriginalFile(chunkId, chunkSize, contentRange, filename).then(() => {
next();
}).catch(_ => {
const errorMessage = 'Failed merging parts.';
next(new Error(errorMessage));
});
} else {
req.isLastPart = false;
next();
}
});
});
req.pipe(busboy);
};
}
Update
So it looks like I have found the problem at least. busboy appears to be writing my array of bytes as text in the output file. I get 80,75,3,4,20,0,6,0,8,0,0,0,33,0,44,25 (as text) when I upload the array of bytes [80,75,3,4,20,0,6,0,8,0,0,0,33,0,44,25]. Now need to figure out how to force it to write it as a binary stream.
Figured out. Just in case it helps anyone, there was no problem with busboy or office.js or axios. I just had to convert the incoming chunk of data to Uint8Array before creating a blob from it. So instead of:
formData.append("file", new Blob([data]), state.filename);
like this:
const blob = new Blob([ new Uint8Array(data) ])
formData.append("file", blob, state.filename);
And it worked like a charm.

Forward multipartform request from one GraphQL API to other GraphQL API

I'm uploading files from the browser via a multipart request to a GraphQL-API which is powered by graphql-yoga which is powered by express.
Now I want to forward this exact same request body to another GraphQL-API.
const fetch = require('node-fetch');
async passThrough(args, opts) {
const { body, getRawBody, headers, method } = opts.request;
var rawBody;
if (body.files && body.files.length) {
rawBody = await getRawBody;
} else {
rawBody = typeof body == 'string' ? body : JSON.stringify(body)
}
let options = {
body: rawBody,
method, headers
};
var res = await fetch(otherApiUrl, options).then((res) => {
return res.json();
});
return res;
}
In this function I get the body as an object. But it includes "files" as promises which I can't simply forward (Couldn't find anything to do it). So I tried to get the raw body through a express middleware and access it like above with await getRawBody.
function getRawBody(req, res, next) {
req.getRawBody = new Promise(resolve => {
var buf = '';
req.on('data', x => buf += x);
req.on('end', () => {
resolve(buf);
});
});
next();
}
server.express.use(getRawBody);
It passes the request to the other API but the files are no valid jpegs anymore. I found out, that the uploaded file is shifted some bits from the original file. What am I maybe doing wrong?
I found a solution here and adapted the function to get the raw body. Now the file contents are not shifted anymore on the target host.
const concatStream = require('concat-stream');
function getRawBody(req, res, next) {
req.getRawBody = new Promise(resolve => {
req.pipe(concatStream(function (data) {
resolve(data);
}));
});
next();
}

How to read/write to a JSON file in node.js

I am fairly new to node.js and i am wondering how to (or even if) i can read and write to a JSON file. I am trying to create an accessible punishment history.
Ideally i would want to be able to create something along the lines of this:
{
"punishments": {
"users": {
"<example user who has a punishment history>": {
"punishment-1567346": {
"punishment-id": "1567346",
"punishment-type": "mute",
"punishment-reason": "<reason>"
},
"punishment-1567347": {
"punishment-id": "1567347",
"punishment-type": "ban",
"punishment-reason": "<reason>"
}
}
}
}
}
Then i would have a way to access the formatted punishment history. I genuinely have no clue where to start.
You can use a NodeJS built-in library called fs to do read/write operations.
Step #1 - Import fs
const fs = require('fs');
Step #2 - Read the file
let rawdata = fs.readFileSync('punishmenthistory.json');
let punishments= JSON.parse(rawdata);
console.log(punishments);
Now you can use the punishments variable to check the data inside the JSON File. Also, you can change the data but it only resides inside the variable for now.
Step #3 - Write to the File
let data = JSON.stringify(punishments);
fs.writeFileSync('punishmenthistory.json', data);
Full code:
const fs = require('fs');
let rawdata = fs.readFileSync('punishmenthistory.json');
let punishments= JSON.parse(rawdata);
console.log(punishments);
let data = JSON.stringify(punishments);
fs.writeFileSync('punishmenthistory.json', data);
References:
https://stackabuse.com/reading-and-writing-json-files-with-node-js/
Use NodeJS File System https://nodejs.org/dist/latest-v14.x/docs/api/fs.html.
Here I have used writeFileSync API to write to file and readFileSync to read from file. Also, when writing don't forget to JSON.stringify(data) because you are writing the data to a JSON file.
const fs = require("fs");
const path = require("path");
// Write Data
const data = {
"punishments": {
"users": {
"<example user who has a punishment history>": {
"punishment-1567346": {
"punishment-id": "1567346",
"punishment-type": "mute",
"punishment-reason": "<reason>"
},
"punishment-1567347": {
"punishment-id": "1567347",
"punishment-type": "ban",
"punishment-reason": "<reason>"
}
}
}
}
};
fs.writeFileSync(path.join(__dirname, "outputfilepath", "outputfile.json"), JSON.stringify(data), "utf8");
// Read data
const rData = fs.readFileSync(path.join(__dirname, "outputfilepath", "outputfile.json"), "utf8");
const jsonData = JSON.parse(rData);
Here is the working example,
https://repl.it/repls/OutrageousInbornBruteforceprogramming#index.js
you can do something like this for reading:
const fs = require('fs')
function jsonReader(filePath, cb) {
fs.readFile(filePath, (err, fileData) => {
if (err) {
return cb && cb(err)
}
try {
const object = JSON.parse(fileData)
return cb && cb(null, object)
} catch(err) {
return cb && cb(err)
}
})
}
jsonReader('./customer.json', (err, customer) => {
if (err) {
console.log(err)
return
}
console.log(customer.address) // => "Infinity Loop Drive"
})
and like this for writing:
const fs = require('fs')
const customer = {
name: "Newbie Co.",
order_count: 0,
address: "Po Box City",
}
const jsonString = JSON.stringify(customer)
fs.writeFile('./newCustomer.json', jsonString, err => {
if (err) {
console.log('Error writing file', err)
} else {
console.log('Successfully wrote file')
}
})

multi file upload with skipper-better-s3 and sailjs returns the same key

As seen in the title, I am currently using sailjs + skipper-better-s3 for s3 upload. Started with uploading one file which works great, then because change request the need of multi-file upload at once so I added a for loop but by doing this, all keys will be the same and ended up the only one file is uploaded which is the last uploaded file but with the first upload filename.
I did read some articles and people are saying something like The problem is because for loop is synchronous and file upload is asynchronous and people saying the result of this is using a recursion which I tried too but no luck though, the same thing happens.
My recursive code is below...
s3_upload_multi: async (req, res) => {
const generatePath = (rootPath, fieldName) => {
let path;
// this is just a switch statement here to check which fieldName is provided then value of path will depend on it
// as for the other two variable is just checking if upload content type is correct
return { path };
};
const processUpload = async ({
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}) => {
return new Promise(function (resolve, reject) {
req.file(fieldName).upload(awsOp, async (err, filesUploaded) => {
if (err) reject(err);
const filesUploadedF = filesUploaded[0]; // F = first file
const response = {
status: true,
errCode: 200,
msg: 'OK',
response: {
url: filesUploadedF.extra.Location,
size: filesUploadedF.size,
type: fileExtension,
filename: filesUploadedF.filename,
key: filesUploadedF.extra.Key,
field: fieldName,
}
};
resolve(response);
});
});
}
const process_recur = async (files, fieldName) => {
if (files.length <= 0) return;
const fileUpload = files[0].stream;
const rootPath = `${sails.config.aws.upload.path.root}`;
const fileCType = fileUpload.headers['content-type'];
// console.log(fileCType, 'fileCType');
const { path } = generatePath(rootPath, fieldName);
const fileName = fileUpload.filename;
const fileExtension = fileUpload.filename.split('.').pop();
const genRan = await UtilsService.genRan(8);
const fullPath = `${path}${genRan}-${fileName}`;
const awsOp = {
adapter: require('skipper-better-s3'),
key: sails.config.aws.access_key,
secret: sails.config.aws.secret_key,
saveAs: fullPath,
bucket: sails.config.aws.bucket,
s3params: {
ACL: 'public-read'
},
};
const config = {
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}
const procceed = await processUpload(config);
files.shift();
await process_recur(files, fieldName);
};
try {
const fieldName = req._fileparser.upstreams[0].fieldName;
const files = req.file(fieldName)._files;
await process_recur(files, fieldName);
} catch (e) {
console.log(e, 'inside UploadService');
return false;
}
}
below is the code for me using for loop which is quite similiar from above though
s3_upload_multi: async (req, res) => {
const generatePath = (rootPath, fieldName) => {
let path;
// this is just a switch statement here to check which fieldName is provided then value of path will depend on it
// as for the other two variable is just checking if upload content type is correct
return { path };
};
const processUpload = async ({
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}) => {
return new Promise(function (resolve, reject) {
req.file(fieldName).upload(awsOp, async (err, filesUploaded) => {
if (err) reject(err);
const filesUploadedF = filesUploaded[0]; // F = first file
const response = {
status: true,
errCode: 200,
msg: 'OK',
response: {
url: filesUploadedF.extra.Location,
size: filesUploadedF.size,
type: fileExtension,
filename: filesUploadedF.filename,
key: filesUploadedF.extra.Key,
field: fieldName,
}
};
resolve(response);
});
});
}
try {
const fieldName = req._fileparser.upstreams[0].fieldName;
const files = req.file(fieldName)._files;
for (const file of files) {
const fileUpload = file.stream;
const rootPath = `${sails.config.aws.upload.path.root}`;
const fileCType = fileUpload.headers['content-type'];
// console.log(fileCType, 'fileCType');
const fileName = fileUpload.filename;
const { path } = generatePath(rootPath, fieldName);
const fileExtension = fileUpload.filename.split('.').pop();
// using a variable here because if this is an image, a thumbnail will be created with the same name as the original one
const genRan = await UtilsService.genRan(8);
const fullPath = await `${path}${genRan}-${fileName}`;
const awsOp = {
adapter: require('skipper-better-s3'),
key: sails.config.aws.access_key,
secret: sails.config.aws.secret_key,
saveAs: fullPath,
bucket: sails.config.aws.bucket,
s3params: {
ACL: 'public-read'
},
};
const config = {
fieldName,
awsOp,
fileExtension,
rootPath,
fileName,
}
const procceed = await processUpload(config);
console.log(procceed, 'procceed');
}
} catch (e) {
console.log(e, 'inside UploadService');
return false;
}
}
Which part am I making mistake that's causing such behavior? I checked my path it's totally correct with correct filename too when I console.log
Thanks in advance for any suggestions and help.
Took me quite a lot of time to figure this out ages ago.
Especially you are using skipper-better-s3 which did not conclude as much detailed documentation as skipper, going back to look into skipper documentation actually the saveAs field doesn't only take string but also a function which you can then use that to get each file's filename and return it as needed so actually you do not even need to use neither resursive or for loop at all.
for example with some of your codes
const awsOp = {
adapter: require('skipper-better-s3'),
key: sails.config.aws.access_key,
secret: sails.config.aws.secret_key,
saveAs: (__newFileStream, next) => {
// generatePath is what you wrote
// __newFileStream.filename would the filename of each each before uploading
// the path is pretty much the s3 key which includes your filename too
const { path } = generatePath(rootPath, __newFileStream.filename, fieldName);
return next(undefined, path);
},
bucket: sails.config.aws.bucket,
s3params: {
ACL: 'public-read'
},
};
skipper documentation https://www.npmjs.com/package/skipper#customizing-at-rest-filenames-for-uploads

How can I generate a `V1Job` object for the Kubernetes nodejs API client from a yaml file?

I've done this previously in python using:
with open(path.join(path.dirname(__file__), "job.yaml")) as f:
body= yaml.safe_load(f)
try:
api_response = api_instance.create_namespaced_job(namespace, body)
Looking at source of the nodejs api client:
public createNamespacedJob (namespace: string, body: V1Job, includeUninitialized?: boolean, pretty?: string, dryRun?: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body: V1Job; }> {
How can I generate that the V1Job?
I've tried the below but get back a very verbose error message / response:
const k8s = require('#kubernetes/client-node');
const yaml = require('js-yaml');
const fs = require('fs');
const kc = new k8s.KubeConfig();
kc.loadFromDefault();
const k8sApi = kc.makeApiClient(k8s.BatchV1Api);
var namespace = {
metadata: {
name: 'test123',
},
};
try {
var job = yaml.safeLoad(fs.readFileSync('job.yaml', 'utf8'));
k8sApi.createNamespacedJob(namespace, job).then(
(response) => {
console.log('Created namespace');
console.log("Success!")
},
(err) => {
console.log(err);
console.log(job);
console.log("Err")
},
);
} catch (e) {
console.log(e);
}
V1Job seems to be an ordinary object so the below worked.
Namespace had to be a string rather than an object...
const k8s = require('#kubernetes/client-node');
const yaml = require('js-yaml');
const fs = require('fs');
const kc = new k8s.KubeConfig();
kc.loadFromDefault();
const k8sApi = kc.makeApiClient(k8s.BatchV1Api);
try {
var job = yaml.safeLoad(fs.readFileSync('job.yaml', 'utf8'));
k8sApi.createNamespacedJob("default", job).then(
(response) => {
console.log("Success")
},
(err) => {
console.log(e);
process.exit(1);
},
);
} catch (e) {
console.log(e);
process.exit(1);
}
This is the same as chris-stryczynski's example with 2 slight modifications. Also please note that chris-stryczynski's example with NodeJs-8 results in (at least on my side):
(upon execution of k8sApi.createNamespacedJob)
TypeError [ERR_INVALID_ARG_TYPE]: The "original" argument must be of type function at promisify
This error does not occur with NodeJs-12.
Here is the modified version:
const k8s = require('#kubernetes/client-node');
const yaml = require('js-yaml');
const fs = require('fs');
const kc = new k8s.KubeConfig();
kc.loadFromDefault(); //You might consider using kc.loadFromFile(...) here
const k8sApi = kc.makeApiClient(k8s.BatchV1Api);
try {
var job = yaml.load(fs.readFileSync('job.yaml', 'utf8')); // Change#1 safeLoad->load
k8sApi.createNamespacedJob("default", job).then(
(response) => {
console.log("Success")
},
(err) => {
console.log(err); // Change#2 e->err
process.exit(1);
},
);
} catch (e) {
console.log(e);
process.exit(1);
}

Categories

Resources