Scenario, as a user scrubs an audio file I want to display a new image in the background of audio player div. Audio files have different images, some have 8 images some have 42 and so on. Audio files have different lengths.
we have our length: 30000 ms.
we have our image count for audio file: 13.
we have our url: http://www.foostorage.com/api/image_1.jpg
const duration = 30000
const imageCount = 13
const url = 'http://www.foostorage.com/api/image_1.png'
function renderImage(percentage) {
const ab = Math.round((percentage / imageCount) * 100);
const fooImage = url.replace(/poster.*.png/, `poster_${ab}.jpg`);
return fooImage;
}
The percentage is coming from the scrub position, ex: 0.232 would be 23% out of 100.
Hopefully i explained this ok, feel free to let me know if I haven't
Try this..
const duration = 30000
const imageCount = 13
const url = 'http://www.foostorage.com/api/image_1.png'
function renderImage(percentage) {
const ab = Math.round((percentage * imageCount));
const fooImage = url.replace(/image.*.png/, `poster_${ab}.jpg`);
return fooImage;
}
// Assuming scrub scale 0-1
console.log(renderImage(0.1325));
console.log(renderImage(0.20));
console.log(renderImage(0.336));
console.log(renderImage(0.85));
console.log(renderImage(1));
Related
In the code below I want to:
save the first 5 seconds of the camera capture as frames
then each time round the draw() loop I want to draw the 1st frame on the canvas, then the 2nd one on the next draw loop, etc. until I reach the end and start back from the top.
However, this is not working and I believe it's because the imageData of each frame is an data:image/octet-stream;base64,.... String which I'm not sure how to draw on the canvas.
const FRAME_RATE = 10
const CAPTURE_DURATION = 5 // secs
let framesCaptured = []
let i = 0
function setup() {
frameRate(FRAME_RATE)
let video = createCapture(VIDEO)
video.hide()
createCanvas(640, 480)
saveFrames('frames', 'png', CAPTURE_DURATION, FRAME_RATE, (frames) => {
framesCaptured = frames.map(({ imageData }) => {
return imageData
})
})
}
function draw() {
if (i < framesCaptured.length) {
image(loadImage(framesCaptured[i]), 0, 0)
i = i === framesCaptured.length - 1 ? 0 : i+1
}
}
Btw, it's important that I have access to and loop through individual frame-images, because I will later be sending those to the server.
LoadImage can handle base64 encoded images, but you have to add "data:image/png;base64," in front of the string.
you could try loadImage("data:image/png;base64,"+framesCaptured[i])
( https://p5js.org/reference/#/p5/loadImage )
I'll show you my code
audio_music = new Audio();
var track = audioContext.createMediaElementSource(audio_music);
//Import music files from other sources into base64 form.
audio_music.src = "data:audio/ogg;base64,"+ data.music;
var splitter = audioContext.createChannelSplitter(6);
var merger = audioContext.createChannelMerger(2);
track.connect(splitter);
//omitted in addition to 0 and 1 due to repetition of the similar content
gainNode0 = audioContext.createGain(); gainNode0.gain.setValueAtTime((musicvolume*0.1), audioContext.currentTime);
gainNode1 = audioContext.createGain(); gainNode1.gain.setValueAtTime((musicvolume*0.1), audioContext.currentTime);
splitter.connect(gainNode0, 0);
splitter.connect(gainNode1, 1);
var pitchshift0 = new Tone.PitchShift(pitch);
var pitchshift1 = new Tone.PitchShift(pitch);
Tone.connect(gainNode0, pitchshift0);
Tone.connect(gainNode1, pitchshift1);
Tone.connect(pitchshift0, merger, 0, 0);
Tone.connect(pitchshift1, merger, 0, 1);
Tone.connect(merger, audioContext.destination);
I am not familiar with the use of audioContext and tone.js, so I don't know if I understand correctly, but my intention is to separate input sources with six channels and process them in the order of gain adjustment, pitch shift, and marge, respectively.
This will do everything else, but you can't change the value of the pitch shift during playback.
I want a way to function similar to the setValueAtTime used in GainNode in pitch shift.
What should I do?
You can change the pitch by setting the pitch parameter:
pitchshift0.pitch = -12 // Semitone to shift the pitch to.
If you want to set this at a specific time during playback, you can use the Transport class to schedule this:
Tone.Transport.schedule(() => pitchshift0.pitch = -12, time /* The transport time you want to schedule it at */);
I get a WAV file from user upload (basically a file input) and have to do some manipulation with that:
Validate is it's a valid .wav file
If user uploaded a stereo file, extract a single channel (left)
Add w fade out at the end (50 last samples of the file)
My first thought was hey, there's an api for that (web audio), so I did something similar to:
const source = audioContext.createBufferSource();
const splitter = audioContext.createChannelSplitter(audioBuffer.numberOfChannels);
const gainNode = audioContext.createGain();
source.buffer = audioBuffer;
source.connect(splitter);
gainNode.gain.linearRampToValueAtTime(0, audioBuffer.duration);
splitter.connect(gainNode, 0);
Which in my thinking is taking the first channel out of the source and adding linear fade out (not really on last 50 samples, but that's not a point for now).
But...
How do I extract the output of that into a file? I know how to play manipulated sound frontend side, but am I able to turn it back into a file?
So at some point I assumed there's no way to do that, so I came up with a different solution, which uses low level file manipulation, that goes as follows:
const audioContext = new AudioContext();
// const arrayBuffer = await toArrayBuffer(file);
const audioBuffer = await decodeAudio(audioContext, arrayBuffer);
const channels = 1;
const duration = audioBuffer.duration;
const rate = audioBuffer.sampleRate;
const length = Math.ceil(duration * rate * channels * 2 + 44);
const buffer = new ArrayBuffer(length);
const view = new DataView(buffer);
let position = 0;
let offset = 0;
const setUint16 = (data) => {
view.setUint16(position, data, true);
position += 2;
};
const setUint32 = (data) => {
view.setUint32(position, data, true);
position += 4;
};
setUint32(0x46464952); // RIFF
setUint32(length - 8); // file length
setUint32(0x45564157); // WAV
setUint32(0x20746d66); // fmt
setUint32(16); // data size
setUint16(1); // PCM
setUint16(channels);
setUint32(rate);
setUint32(rate * 16 * channels);
setUint16(channels * 2);
setUint16(16);
setUint32(0x61746164); // "data"
setUint32(length - position - 4);
const leftChannel = audioBuffer.getChannelData(0);
let sample;
console.log('left', leftChannel);
console.log('length', length);
while (position < length) {
sample = leftChannel[offset];
sample = sample < 0 ? sample * 0x8000 : sample * 0x7FFF;
view.setInt16(position, sample, true);
position += 2;
offset++;
}
console.log('buffer', buffer);
const blob = new Blob([buffer], { type: file.type });
but seems it has a lot of flows, output is distorted / has different sample rate and so on...
My question then would be:
How do I extract a file out of a web audio api, if that's even possible? Cause that the best way to do that imho
If (1) is not possible, what am I doing wrong on the second approach?
WebAudio has no way of saving audio to a file. You can use MediaRecorder as one way, but I don't think that that's required to support WAV files. Or you can do it by hand as you show above. At a glance I don't see anything wrong with what you've done. Might be helpful to look at what Chrome does to save files in its test suite; it does basically what you do.
I'm attempting to use opencv.js to align images to a baseline image. I'm following some basic python guidance that i've seen work (example: https://alexanderpacha.com/2018/01/29/aligning-images-an-engineers-solution/)
but i'm getting tripped up with an error that I don't quite understand. The error is "opencv.js:30 Uncaught TypeError: Cannot use 'in' operator to search for 'type' in 1e-10" and it seems to be caused by the "criteria" variable passed to "cv.findTransformECC();" see here.
any guidance as to what I'm doing wrong here?
function Align_img(){
let image_baseline = cv.imread(imgElement_Baseline);
let image = cv.imread('imageChangeup');
let im1_gray = new cv.Mat();
let im2_gray = new cv.Mat();
let im2_aligned = new cv.Mat();
//get size of baseline image
width1 = image_baseline.cols;
height1 = image_baseline.rows;
//resize image to baseline image
let dim1 = new cv.Size(width1, height1);
cv.resize(image, image, dim1, cv.INTER_AREA);
// Convert images to grayscale
cv.cvtColor(image_baseline, im1_gray, cv.COLOR_BGR2GRAY);
cv.cvtColor(image, im2_gray, cv.COLOR_BGR2GRAY);
// Find size of image1
let dsize = new cv.Size(image_baseline.rows, image_baseline.cols);
// Define the motion model
warp_mode = cv.MOTION_HOMOGRAPHY;
// Define 3x3 matrix and initialize the matrix to identity
let warp_matrix = cv.Mat.eye(3, 3, cv.CV_8U);
// Specify the number of iterations.
number_of_iterations = 5000;
// Specify the threshold of the increment in the correlation coefficient between two iterations
termination_eps = 0.0000000001; //1e-10;
// Define termination criteria
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps);
//Run the ECC algorithm. The results are stored in warp_matrix.
cv.findTransformECC(im1_gray, im2_gray, warp_matrix, warp_mode, criteria, null, 5);
// Use warpPerspective for Homography
cv.warpPerspective (image, im2_aligned, warp_matrix, dsize, cv.INTER_LINEAR + cv.WARP_INVERSE_MAP);
cv.imshow('imageChangeup', im2_aligned);
im1_gray.delete();
im2_gray.delete();
im2_aligned.delete();
};
UPDATE: 2 things. 1. Found easy fix to error (code below) and 2. looks like a bug in the findTransformECC opencv.js API causing this method not to work. Here is current code.
The API has 2 optional parameters (inputMask and gaussFiltSize) but if you don't include them you get an error ("function findTransformECC called with 5 arguments, expected 7 args!").
The issue is what to use for inputMask - "null" does not work, there doesn't seem to be support for 'cv.noArray()' and I can't find a mask that doesn't lead to a 'uncaught exception' error.
I'll update again once I find a workaround. Let me know if anyone sees a work around.
function Align_img(){
let image_baseline = cv.imread(imgElement_Baseline);
let image = cv.imread('imageChangeup');
let im1_gray = new cv.Mat();
let im2_gray = new cv.Mat();
let im2_aligned = new cv.Mat();
//get size of baseline image
var width1 = image_baseline.cols;
var height1 = image_baseline.rows;
//resize image to baseline image
let dim1 = new cv.Size(width1, height1);
cv.resize(image, image, dim1, cv.INTER_AREA);
// Convert images to grayscale
cv.cvtColor(image_baseline, im1_gray, cv.COLOR_BGR2GRAY);
cv.cvtColor(image, im2_gray, cv.COLOR_BGR2GRAY);
// Find size of image1
let dsize = new cv.Size(image_baseline.rows, image_baseline.cols);
// Define the motion model
const warp_mode = cv.MOTION_HOMOGRAPHY;
// Define 3x3 matrix and initialize the matrix to identity
let warp_matrix = cv.Mat.eye(3, 3, cv.CV_8U);
// Specify the number of iterations.
const number_of_iterations = 5000;
// Specify the threshold of the increment in the correlation coefficient between two iterations
const termination_eps = 0.0000000001; //1e-10;
// Define termination criteria
//const criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps);
let criteria = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps);
//Run the ECC algorithm. The results are stored in warp_matrix.
//let inputMask = new cv.Mat.ones(im1_gray.size(), cv.CV_8U); //uint8
cv.findTransformECC(im1_gray, im2_gray, warp_matrix, warp_mode, criteria, null, 5);
// Use warpPerspective for Homography
cv.warpPerspective (image, im2_aligned, warp_matrix, dsize, cv.INTER_LINEAR + cv.WARP_INVERSE_MAP);
getMatStats(im2_aligned, 1); //0 = baseline (srcMat), 1 = image (srcMat_compare)
cv.imshow('imageChangeup', im2_aligned);
im1_gray.delete();
im2_gray.delete();
im2_aligned.delete();
};
UPDATE 2 I verified code works fine in Python. code below. The issue at hand now is simply, how do you this in Javascript: "inputMask=None"
Python:
# Read the images to be aligned
im1 = cv2.imread(r"C:\temp\tcoin\69.jpg");
im2 = cv2.imread(r"C:\temp\tcoin\pic96_crop.jpg");
#resize image to compare
width1 = int(im1.shape[1])
height1 = int(im1.shape[0])
dim1 = (width1, height1)
im2 = cv2.resize(im2, dim1, interpolation = cv2.INTER_AREA)
# Convert images to grayscale
im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY)
# Find size of image1
sz = im1.shape
# Define the motion model
warp_mode = cv2.MOTION_HOMOGRAPHY
# Define 2x3 or 3x3 matrices and initialize the matrix to identity
warp_matrix = np.eye(3, 3, dtype=np.float32)
# Specify the number of iterations.
number_of_iterations = 5000;
# Specify the threshold of the increment
# in the correlation coefficient between two iterations
termination_eps = 1e-10;
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC (im1_gray,im2_gray,warp_matrix, warp_mode, criteria, inputMask=None, gaussFiltSize=1)
# Use warpPerspective for Homography
im2_aligned = cv2.warpPerspective (im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
# Show final results
cv2.imshow("Aligned Image 2", im2_aligned)
cv2.imwrite(r"c:\temp\tcoin\output\pic96_cropB.jpg", im2_aligned)
cv2.waitKey(0)
I'm using Flash CS6 and I need to save a 32 bit PNG from a vector graphic in 9 different sizes.
16
32
36
48
72
114
128
150
480
How to write a batch export script for that in JSFL?
JSFL Docs (PDF)
I have a script that does what you are looking to do. It doesn't appear that you have really attempted to write any code or show any research attempt so if you do end up using this script I would appreciate the credit.
Select a movie clip on the stage then run this command.
Andrew Doll - Multiple Size PNG Exporter
Note: Before running this script export one PNG image using the desired PNG export settings.
fl.getDocumentDOM.exportPNG() accepts 3 paramaters. The first is the string for the file name. The second is a Boolean value that specifies whether to use the current PNG publish settings (true) or to display the Export PNG dialog box (false). The third is a Boolean value that specifies whether to export only the current frame (true) or to export all frames, with each frame as a separate PNG file (false).
Since this script sets the second paramater to true just be sure that the PNG export settings are already set to 32 bit PNG.
// Multiple Size PNG Exporter
// Copyright © 2014 Andrew Doll
// http://www.andrewdollanimation.com/
/* NOTE:Before running this script export one PNG image using the desired PNG export settings. fl.getDocumentDOM.exportPNG() accepts 3
** paramaters. The first is the string for the file name. The second is a Boolean value that specifies whether to use the current PNG
** publish settings (true) or to display the Export PNG dialog box (false). The third is a Boolean value that specifies whether to export
** only the current frame (true) or to export all frames, with each frame as a separate PNG file (false). Since this script sets the
** second paramater to true just be sure that the PNG export settings are already set to 32 bit PNG.
*/
// Check to see if there is a file open first.
var dom = fl.getDocumentDOM();
if (dom == null)
{
alert("Please open a file.");
}
else
{
var sel = [];
var exportSizeArray = [];
var folderURI = "";
var folderLocation = "";
var pngFileName = "";
var URI = "";
var selWidth;
var selHeight;
var sideToUse;
var scaleAmount;
function setupExportFolder()
{
// Create a folder and file name for the PNG files.
folderLocation = fl.browseForFolderURL("Select a folder.");
if(folderLocation != null)
{
folderURI = folderLocation + "/PNG Exports";
FLfile.createFolder(folderURI);
pngFileName = prompt("What would you like to name the png files?");
}
}
// Check if a movie clip on the stage is selected to export PNG images.
var selectionCheck = dom.selection;
if(!selectionCheck || !selectionCheck.length)
{
alert("Please select a movie clip on the stage.");
}
else
{
// Set up export sizes in this array.
exportSizeArray = [16, 32, 64, 128, 256, 512, 1024];
// Setup export folder
setupExportFolder();
if(folderLocation != null && pngFileName != null)
{
// Copy the selected artwork from the stage.
sel = dom.selection[0];
dom.clipCopy();
// Calculate the amount to scale the symbol by based on the longest side.
function calculateScaleAmount(selWidth, selHeight)
{
if(selWidth >= selHeight)
{
sideToUse = selWidth;
}
else
{
sideToUse = selHeight;
}
scaleAmount = exportSizeArray[i]/sideToUse;
return scaleAmount;
}
// Set the width and height of the symbol. Handle this with the size array.
for (var i = 0; i < exportSizeArray.length; i++)
{
// Create a new FLA document.
fl.createDocument();
dom = fl.getDocumentDOM();
// Resize the document to the current export size.
dom.width = exportSizeArray[i];
dom.height = exportSizeArray[i];
// Paste the artwork to the stage.
dom.clipPaste(true);
sel = dom.selection[0];
dom.setAlignToDocument(true);
selWidth = sel.width;
selHeight = sel.height;
calculateScaleAmount(selWidth, selHeight);
// Scale the artwork to the size of the stage based on the largest side.
dom.scaleSelection(scaleAmount, scaleAmount, "center");
// Align to the center of the stage.
dom.align("vertical center", true);
dom.align("horizontal center", true);
// Output the image.
URI = folderURI + "/" + pngFileName + "_" + exportSizeArray[i] + " x " + exportSizeArray[i] + "_";
dom.exportPNG(URI, true, true);
// Close the temporary FLA without saving.
dom.close(false);
}
}
}
}