How to save Int16Array buffer to wav file node js - javascript

I am sending Int16Array buffer to server while audio processing
var handleSuccess = function (stream) {
globalStream = stream;
input = context.createMediaStreamSource(stream);
input.connect(processor);
processor.onaudioprocess = function (e) {
var left = e.inputBuffer.getChannelData(0);
var left16 = convertFloat32ToInt16(left);
socket.emit('binaryData', left16);
};
};
navigator.mediaDevices.getUserMedia(constraints)
.then(handleSuccess);
and in server i am trying to save the file as follows
client.on('start-audio', function (data) {
stream = fs.createWriteStream('tesfile.wav');
});
client.on('end-audio', function (data) {
if (stream) {
stream.end();
}
stream = null;
});
client.on('binaryData', function (data) {
if (stream !== null) {
stream.write(data);
}
});
But this is not working so how i save this array buffer as wav file?

At the O/P question, there is an attempt to write and add data directly to an existing file which it can't work because WAVE files need to have a header and there can't be a header by just creating a write file stream using createWriteStream. You can check about that header format here "WAVE PCM soundfile format".
There is the wav NPM package which it can help to handle the whole process of writing the data to the server. It has the FileWriter class which creates a stream that will properly handle WAVE audio data and it will write the header when the stream ends.
1. Create a WAVE FileWriter stream on start-audio event:
// Import wav package FileWriter
const WavFileWriter = require('wav').FileWriter;
...
// Global FileWriter stream.
// It won't handle multiple client connections; it's just for demonstration
let outputFileStream;
// The UUID of the filename that's being recorded
let id;
client.on('start-audio', function() {
id = uuidv4();
console.log(`start-audio:${id}`);
// Create a FileWriter stream using UUID generated for filename
outputFileStream = new WavFileWriter(`./audio/recs/${id}.wav`, {
sampleRate: 16000,
bitDepth: 16,
channels: 1
});
});
2. Use the stream we created to write audio data on binaryData event:
client.on('binaryData', function(data) {
console.log(`binaryData:${id}, got ${data ? data.length : 0} bytes}`);
// Write the data directly to the WAVE file stream
if (outputFileStream) {
outputFileStream.write(data);
}
});
3. End the stream when we receive end-audio event:
client.on('end-audio', function() {
console.log(`end-audio:${id}`);
// If there is a stream, end it.
// This will properly handle writing WAVE file header
if (outputFileStream) {
outputFileStream.end();
}
outputFileStream = null;
});
I have created a Github repository with this example which you can find here: https://github.com/clytras/node-wav-stream.
Keep in mind that handling data like this will result in issues because using this code, there is only one FileWriter stream variable for every client that connects. You should create an array for each client stream and use client session ID's to store and identify each stream item that belongs to the corresponding client.

Related

Not able to create seekable video blobs from mediarecorder using EBML.js - MediaRecorder API - Chrome

Using media recorder, I am able to upload and append the video blobs on azure. But combined video is not seekable on download with following code -
var chunks =[];
var mediaRecorder = new MediaRecorder(stream, 'video/x-matroska;codecs=vp8,opus');
mediaRecorder.ondataavailable = function(event) {
if(event.data && event.data.size > 0) {
chunks.push(event.data);
appendBlockToAzure(chunks);
}
};
mediaRecorder.start(10000);
I tried using EBML.js, if I use the following code then I get the seekable video file. This approach needs the file to be processed at the end. Therefore, final file could be of 1GB in size which will take very long time to upload.
var chunks =[];
var mediaRecorder = new MediaRecorder(stream, 'video/x-matroska;codecs=vp8,opus');
mediaRecorder.ondataavailable = function(event) {
if(event.data && event.data.size > 0) {
chunks.push(event.data);
if(mediaRecorder.state == "inactive") { //if media recorder is stopped
var combined = new Blob(chunks, { type: event.data.type });
getSeekableBlob(combined, function (seekableBlob) {
saveCombinedVideoToAzure(seekableBlob);
});
}
}
};
mediaRecorder.start(10000);
That's the reason I want to upload simultaneously to the azure. If I use the following code, then it logs unknown tag warnings and then length error. Also, the video file is not playable.
var seekablechunks =[];
var mediaRecorder = new MediaRecorder(stream, 'video/x-matroska;codecs=vp8,opus');
mediaRecorder.ondataavailable = function(event) {
if(event.data && event.data.size > 0) {
getSeekableBlob(event.data, function (seekableBlob) {
seekablechunks.push(seekableBlob);
saveCombinedVideoToAzure(seekablechunks);
});
}
};
mediaRecorder.start(10000);
Function 'getSeekableBlob':
function getSeekableBlob(inputBlob, callback) {
// EBML.js copyrights goes to: https://github.com/legokichi/ts-ebml
if(typeof EBML === 'undefined') {
throw new Error('Please link: https://www.webrtc- experiment.com/EBML.js');
}
var reader = new EBML.Reader();
var decoder = new EBML.Decoder();
var tools = EBML.tools;
var fileReader = new FileReader();
fileReader.onload = function (e) {
var ebmlElms = decoder.decode(this.result);
ebmlElms.forEach(function (element) {
reader.read(element);
});
reader.stop();
var refinedMetadataBuf = tools.makeMetadataSeekable(reader.metadatas, reader.duration, reader.cues);
var body = this.result.slice(reader.metadataSize);
var newBlob = new Blob([refinedMetadataBuf, body], {
type: 'video/webm'
});
callback(newBlob);
};
fileReader.readAsArrayBuffer(inputBlob);
}
Is there a way to get seekable blobs and upload them to azure?
It's a challenge for an open-ended streaming source for media (for example MediaRecorder) to create a file with SeekHead elements in it. The Seek elements in a SeekHead element contain byte offsets to elements in the file.
MediaRecorder doesn't create segments or SeekHead elements as you have discovered. To do so it would need to be able to see the future to know how big future compressed video and audio elements will be in the file.
A good way for you to handle this problem might be to post-process your uploaded files on a server. You can use ts-ebml to do this in a streaming fashion on a server when a file is completely uploaded.
It's possible, I suppose, to create Javascript software in your browser that can transform the stream of data emitted by MediaRecorder so it's seekable, on the fly. To make your stream seekeable you'd need to insert SeekHead elements every so often. You'd buffer up multiple seconds of the stream, then locate the Cluster elements in each buffer, then write a SeekHead element pointing to some of them. (Chrome's MediaRecorder outputs Clusters beginning with video key frames.) If you succeed in doing this you'll know a lot about Matroska / webm.
Suddenly, our Face on camera web-cam recorder component stopped saving webm blob.
In the console there were warnings about {EBML_ID: "55b0", type: "unknown", ...} during reader.read(element) and then
"Uncaught (in promise) Error: No schema entry found for unknown" in EBMLEncoder.js" at tools.makeMetadataSeekable(...) call.
Ignoring unknown elements from the decoder workarounded the issue:
...
var ebmlElms = decoder.decode(this.result);
ebmlElms.forEach(function (element) {
if (element.type !== 'unknown') {
reader.read(element);
}
});
reader.stop();
...
Related issue on ts-ebml npm package https://github.com/legokichi/ts-ebml/issues/33 with similar workaround

Why is nodeJs not reading entire binary file from disk?

I have a PDF file which I want to read into memory using NodeJS. Ideally I'd like to encode it using base64 for transferring it. But somehow the read function does not seem to read the full PDF file, which makes no sense to me. The original PDF was generated using pdfKit, and is ok and viewable using a PDF reader program.
The original file test.pdf has 90kB on disk. But if I read and write it back to disk there are just 82kB and the new PDF test-out.pdf is not ok. The pdf viewer says:
Unable to open document. The pdf document is damaged.
The base64 encoding therefore also does not work correctly. I tested it using this webservice. Does someone know why and what is happening here? And how to resolve it.
I found this post already.
fs = require('fs');
let buf = fs.readFileSync('test.pdf'); // returns raw buffer binary data
// buf = fs.readFileSync('test.pdf', {encoding:'base64'}); // for the base64 encoded data
// ...transfer the base64 data...
fs.writeFileSync('test-out.pdf', buf); // should be pdf again
EDIT MCVE:
const fs = require('fs');
const PDFDocument = require('pdfkit');
let filepath = 'output.pdf';
class PDF {
constructor() {
this.doc = new PDFDocument();
this.setupdocument();
this.doc.pipe(fs.createWriteStream(filepath));
}
setupdocument() {
var pageNumber = 1;
this.doc.on('pageAdded', () => {
this.doc.text(++pageNumber, 0.5 * (this.doc.page.width - 100), 40, {width: 100, align: 'center'});
}
);
this.doc.moveDown();
// draw some headline text
this.doc.fontSize(25).text('Some Headline');
this.doc.fontSize(15).text('Generated: ' + new Date().toUTCString());
this.doc.moveDown();
this.doc.font('Times-Roman', 11);
}
report(object) {
this.doc.moveDown();
this.doc
.text(object.location+' '+object.table+' '+Date.now())
.font('Times-Roman', 11)
.moveDown()
.text(object.name)
.font('Times-Roman', 11);
this.doc.end();
let report = fs.readFileSync(filepath);
return report;
}
}
let pdf = new PDF();
let buf = pdf.report({location: 'athome', table:'wood', name:'Bob'});
fs.writeFileSync('outfile1.pdf', buf);
The encoding option for fs.readFileSync() is for you to tell the readFile function what encoding the file already is so the code reading the file knows how to interpret the data it reads. It does not convert it into that encoding.
In this case, your PDF is binary - it's not base64 so you are telling it to try to convert it from base64 into binary which causes it to mess up the data.
You should not be passing the encoding option at all and you will then get the RAW binary buffer (which is what a PDF file is - raw binary). If you then want to convert that to base64 for some reason, you can then do buf.toString('base64') on it. But, that is not its native format and if you write that converted data back out to disk, it won't be a legal PDF file.
To just read and write the same file out to a different filename, leave off the encoding option entirely:
const fs = require('fs');
let buf = fs.readFileSync('test.pdf'); // get raw buffer binary data
fs.writeFileSync('test-out.pdf', buf); // write out raw buffer binary data
After a lot of searching I found this Github issue. The problem in my question seems to be the call of doc.end() which for some reason doesn't wait for the stream to finish (finish event of write stream). Therefore as suggested in the Github issue, the following approaches work:
callback based:
doc = new PDFDocument();
writeStream = fs.createWriteStream('filename.pdf');
doc.pipe(writeStream);
doc.end()
writeStream.on('finish', function () {
// do stuff with the PDF file
});
or promise based:
const stream = fs.createWriteStream(localFilePath);
doc.pipe(stream);
.....
doc.end();
await new Promise<void>(resolve => {
stream.on("finish", function() {
resolve();
});
});
or even nicer, instead of calling doc.end() direcly, call the function savePdfToFile below:
function savePdfToFile(pdf : PDFKit.PDFDocument, fileName : string) : Promise<void> {
return new Promise<void>((resolve, reject) => {
// To determine when the PDF has finished being written sucessfully
// we need to confirm the following 2 conditions:
//
// 1. The write stream has been closed
// 2. PDFDocument.end() was called syncronously without an error being thrown
let pendingStepCount = 2;
const stepFinished = () => {
if (--pendingStepCount == 0) {
resolve();
}
};
const writeStream = fs.createWriteStream(fileName);
writeStream.on('close', stepFinished);
pdf.pipe(writeStream);
pdf.end();
stepFinished();
});
}
This function should correctly handle the following situations:
PDF generated successfully
Error is thrown inside pdf.end() before write stream is closed
Error is thrown inside pdf.end() after write stream has been closed

Upload a file stream to S3 without a file and from memory

I'm trying to create a csv from a string and upload it to my S3 bucket. I don't want to write a file. I want it all to be in memory.
I don't want to read from a file to get my stream. I would like to make a stream with out a file. I would like this method createReadStream, but instead of a file, I would like to pass a string with my stream's contents.
var AWS = require('aws-sdk'),
zlib = require('zlib'),
fs = require('fs');
s3Stream = require('s3-upload-stream')(new AWS.S3()),
// Set the client to be used for the upload.
AWS.config.loadFromPath('./config.json');
// Create the streams
var read = fs.createReadStream('/path/to/a/file');
var upload = s3Stream.upload({
"Bucket": "bucket-name",
"Key": "key-name"
});
// Handle errors.
upload.on('error', function (error) {
console.log(error);
});
upload.on('part', function (details) {
console.log(details);
});
upload.on('uploaded', function (details) {
console.log(details);
});
read.pipe(upload);
You can create a ReadableStream and push your string directly to it which, can then be consumed by your s3Stream instance.
const Readable = require('stream').Readable
let data = 'this is your data'
let read = new Readable()
read.push(data) // Push your data string
read.push(null) // Signal that you're done writing
// Create upload s3Stream instance and attach listeners go here
read.pipe(upload)

How to create and upload a video to parse server?

I am able to upload image file to S3 using parse server. (by creating parse file from base64 image data and doing save() on parse file)
How can I do the same thing for a video file? I am doing this using parse-server js library in Ionic 2 app with typescript. The below code worked for images.
let file = new Parse.File("thumbnail", { base64: imageData });
file.save().then(() => {
// The file has been saved to Parse.
console.log("File uploaded....");
}, (error) => {
// The file either could not be read, or could not be saved to Parse.
console.log("File upload failed.");
});
In case of a video file, I have the file location received from cordova media capture callback. Help me in uploading the video file.
Thank you
here is my solution after days of research.
it works for iphone.
the important statement is this:
data=data.replace("quicktime","mov");
var options = { limit: 1, duration: 30 };
navigator.device.capture.captureVideo(function(files){
// Success! Audio data is here
console.log("video file ready");
var vFile = files[0];
console.log(vFile.fullPath);
///private/var/mobile/Containers/Data/Application/7A0069EB-F864-438F-A685-A0DAE97F8B2D/tmp/capture-T0x144510b50.tmp.GfXOow/capturedvideo.MOV
self.auctionvideo = vFile.fullPath; //localURL;
console.log(self.auctionvideo);
var fileReader = new FileReader();
var file;
fileReader.onload = function (readerEvt) {
var data = fileReader.result;
data=data.replace("quicktime","mov");
console.log(data);
//data:video/quicktime;base64,AAAAFGZ0
console.log(data.length);
self.auctionvideo=data;
self.videofile = {base64:data};
};
//fileReader.reasAsDataURL(audioFile); //This will result in your problem.
file = new window.File(vFile.name, vFile.localURL,
vFile.type, vFile.lastModifiedDate, vFile.size);
fileReader.readAsDataURL(file); //This will result in the solution.
// fileReader.readAsBinaryString(file); //This will result in the solution.
},
function(error){
},
options);

Sending audio data (base64) using $resource

I'm working on an app where I need to record audio using a microphone and send it to a backend app (tomcat server).
It seems that sending too big streams drives angular crazy and freezes my browser.
To record my audio file, I use the native function RecorderWorkerFactory.getUserMedia() which allow me to get a RecordBlob object.
After that, still in Angular, I extract the audio content in base64 enconding, and I send it to the backend app using $resource.
The backend app correctly receives the data and process it, but the callback of this call is never executed, as Firefox detects an infinite loop and freezes.
However, if I keep running the program, after a very long time the page refresh will pass.
This is the code where I extract the audio content into base64 String, to send it:
var blob = $scope.audio.recordBlob;
if (blob) {
var reader = new FileReader();
reader.readAsDataURL(blob);
reader.onloadend = function() {
$scope.audioContent = reader.result;
$scope.sendMessage();
}
}
$scope.sendMessage = function(){
var outputStream = {
"audio": $scope.audioContent
};
$scope.sendIM(outputStream);
}
Here I send outputStream via POST to the back, and in callback I launch loadData() function that reload my view.
services.FileCreation= $resource(URI_SERVICE_CREATION, {}, {
'post' : urlEncodedFormPost
});
$scope.sendIM = function(fluxSortie) {
$services.FileCreation.post(angular.toJson(outputStream)).$promise.then(function(result){
$scope.loadData();
});
}
And this is the Java code for the creation of the audio file:
private void createAudioFile(File file, byte[] content) throws IOException {
FileOutputStream stream = null;
try {
stream = new FileOutputStream(file.getPath());
IOUtils.write(content, stream);
} catch (IOException e) {
LOGGER.debug("creation failed");
} finally {
if (stream != null) {
stream.flush();
stream.close();
}
}
}
Where content is the conversion of the base64 string sent.
After research I found that the infinite loop is in a native Angular function named shallowClearAndCopy() that occured after the Java execution but just before the callback. In this function the code apparently transforms each character of the audio string (base64 encoded) into an object property and do a loop on these to delete them. But this lead to a very long treatment that Firefox consider as an infinite loop.
function shallowClearAndCopy(src, dst) {
dst = dst || {};
angular.forEach(dst, function(value, key) { // This is where it freezes, as dst contains all my base64 encoded data and iterate over each character of it (which is veeeeeery long !)
delete dst[key];
});
for (var key in src) {
if (src.hasOwnProperty(key) && !(key.charAt(0) === '$' && key.charAt(1) === '$')) {
dst[key] = src[key];
}
}
return dst;
}
Is it because of angularjs performance (and there is nothing else to be done) ?
Or am I missing something that creates an infinite loop ? Or is something wrong in my callback definition ?
Cheers !
I found, the problem!
It was the angular.toJson(outputStream) that transformed the object without need.

Categories

Resources