Runtime differences between two machines in TF.js - javascript

I was working on a small scale project for Fashion MNIST. I have used the below code. I first tried executing the code on my primary machine and received an unchanging loss of ~2, after which I tried running the same code on my secondary machine and I could see that my loss and accuracy metrics were performing just the way they should have.
Here is my index.html
<html>
<head>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs#latest"></script>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs-vis"></script>
</head>
<body>
<h1>Fashion Classifier!</h1>
<canvas id="canvas" width="280" height="280" style="position:absolute;top:100;left:100;border:8px solid;"></canvas>
<img id="canvasimg" style="position:absolute;top:10%;left:52%;width=280;height=280;display:none;">
<input type="button" value="classify" id="sb" size="48" style="position:absolute;top:400;left:100;">
<input type="button" value="clear" id="cb" size="23" style="position:absolute;top:400;left:180;">
<script src="fashion-data.js" type="module"></script>
<script src="fashion-script_exercise.js" type="module"></script>
</body>
</html>
A JS code to get the data
const IMAGE_SIZE = 784;
const NUM_CLASSES = 10;
const NUM_DATASET_ELEMENTS = 70000;
const TRAIN_TEST_RATIO = 1 / 7;
const NUM_TRAIN_ELEMENTS = Math.floor(TRAIN_TEST_RATIO * NUM_DATASET_ELEMENTS);
const NUM_TEST_ELEMENTS = NUM_DATASET_ELEMENTS - NUM_TRAIN_ELEMENTS;
const MNIST_IMAGES_SPRITE_PATH =
'https://storage.googleapis.com/learnjs-data/model-builder/fashion_mnist_images.png';
const MNIST_LABELS_PATH =
'https://storage.googleapis.com/learnjs-data/model-builder/fashion_mnist_labels_uint8';
export class FMnistData {
constructor() {
this.shuffledTrainIndex = 0;
this.shuffledTestIndex = 0;
}
async load() {
// Make a request for the MNIST sprited image.
const img = new Image();
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
const imgRequest = new Promise((resolve, reject) => {
img.crossOrigin = '';
img.onload = () => {
img.width = img.naturalWidth;
img.height = img.naturalHeight;
const datasetBytesBuffer =
new ArrayBuffer(NUM_DATASET_ELEMENTS * IMAGE_SIZE * 4);
const chunkSize = 5000;
canvas.width = img.width;
canvas.height = chunkSize;
for (let i = 0; i < NUM_DATASET_ELEMENTS / chunkSize; i++) {
const datasetBytesView = new Float32Array(
datasetBytesBuffer, i * IMAGE_SIZE * chunkSize * 4,
IMAGE_SIZE * chunkSize);
ctx.drawImage(
img, 0, i * chunkSize, img.width, chunkSize, 0, 0, img.width,
chunkSize);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
for (let j = 0; j < imageData.data.length / 4; j++) {
// All channels hold an equal value since the image is grayscale, so
// just read the red channel.
datasetBytesView[j] = imageData.data[j * 4] / 255;
}
}
this.datasetImages = new Float32Array(datasetBytesBuffer);
resolve();
};
img.src = MNIST_IMAGES_SPRITE_PATH;
});
const labelsRequest = fetch(MNIST_LABELS_PATH);
const [imgResponse, labelsResponse] =
await Promise.all([imgRequest, labelsRequest]);
this.datasetLabels = new Uint8Array(await labelsResponse.arrayBuffer());
this.trainIndices = tf.util.createShuffledIndices(NUM_TRAIN_ELEMENTS);
this.testIndices = tf.util.createShuffledIndices(NUM_TEST_ELEMENTS);
this.trainImages =
this.datasetImages.slice(0, IMAGE_SIZE * NUM_TRAIN_ELEMENTS);
this.testImages = this.datasetImages.slice(IMAGE_SIZE * NUM_TRAIN_ELEMENTS);
this.trainLabels =
this.datasetLabels.slice(0, NUM_CLASSES * NUM_TRAIN_ELEMENTS);
this.testLabels =
this.datasetLabels.slice(NUM_CLASSES * NUM_TRAIN_ELEMENTS);
}
nextTrainBatch(batchSize) {
return this.nextBatch(
batchSize, [this.trainImages, this.trainLabels], () => {
this.shuffledTrainIndex =
(this.shuffledTrainIndex + 1) % this.trainIndices.length;
return this.trainIndices[this.shuffledTrainIndex];
});
}
nextTestBatch(batchSize) {
return this.nextBatch(batchSize, [this.testImages, this.testLabels], () => {
this.shuffledTestIndex =
(this.shuffledTestIndex + 1) % this.testIndices.length;
return this.testIndices[this.shuffledTestIndex];
});
}
nextBatch(batchSize, data, index) {
const batchImagesArray = new Float32Array(batchSize * IMAGE_SIZE);
const batchLabelsArray = new Uint8Array(batchSize * NUM_CLASSES);
for (let i = 0; i < batchSize; i++) {
const idx = index();
const image =
data[0].slice(idx * IMAGE_SIZE, idx * IMAGE_SIZE + IMAGE_SIZE);
batchImagesArray.set(image, i * IMAGE_SIZE);
const label =
data[1].slice(idx * NUM_CLASSES, idx * NUM_CLASSES + NUM_CLASSES);
batchLabelsArray.set(label, i * NUM_CLASSES);
}
const xs = tf.tensor2d(batchImagesArray, [batchSize, IMAGE_SIZE]);
const labels = tf.tensor2d(batchLabelsArray, [batchSize, NUM_CLASSES]);
return {xs, labels};
}
}
The implementation JS file
import {FMnistData} from './fashion-data.js';
var canvas, ctx, saveButton, clearButton;
var pos = {x:0, y:0};
var rawImage;
var model;
function getModel() {
model = tf.sequential();
model.add(tf.layers.conv2d({inputShape: [28, 28, 1], kernelSize: 3, filters: 8, activation: 'relu'}));
model.add(tf.layers.maxPooling2d({poolSize: [2, 2]}));
model.add(tf.layers.conv2d({filters: 16, kernelSize: 3, activation: 'relu'}));
model.add(tf.layers.maxPooling2d({poolSize: [2, 2]}));
model.add(tf.layers.flatten());
model.add(tf.layers.dense({units: 128, activation: 'relu'}));
model.add(tf.layers.dense({units: 10, activation: 'softmax'}));
model.compile({optimizer: tf.train.adam(), loss: 'categoricalCrossentropy', metrics: ['accuracy']});
return model;
}
async function train(model, data) {
const metrics = ['loss', 'val_loss', 'acc', 'val_acc'];
const container = { name: 'Model Training', styles: { height: '1000px' } };
const fitCallbacks = tfvis.show.fitCallbacks(container, metrics);
const BATCH_SIZE = 512;
const TRAIN_DATA_SIZE = 6000;
const TEST_DATA_SIZE = 1000;
const [trainXs, trainYs] = tf.tidy(() => {
const d = data.nextTrainBatch(TRAIN_DATA_SIZE);
return [
d.xs.reshape([TRAIN_DATA_SIZE, 28, 28, 1]),
d.labels
];
});
const [testXs, testYs] = tf.tidy(() => {
const d = data.nextTestBatch(TEST_DATA_SIZE);
return [
d.xs.reshape([TEST_DATA_SIZE, 28, 28, 1]),
d.labels
];
});
return model.fit(trainXs, trainYs, {
batchSize: BATCH_SIZE,
validationData: [testXs, testYs],
epochs: 10,
shuffle: true,
callbacks: fitCallbacks
});
}
function setPosition(e){
pos.x = e.clientX-100;
pos.y = e.clientY-100;
}
function draw(e) {
if(e.buttons!=1) return;
ctx.beginPath();
ctx.lineWidth = 24;
ctx.lineCap = 'round';
ctx.strokeStyle = 'white';
ctx.moveTo(pos.x, pos.y);
setPosition(e);
ctx.lineTo(pos.x, pos.y);
ctx.stroke();
rawImage.src = canvas.toDataURL('image/png');
}
function erase() {
ctx.fillStyle = "black";
ctx.fillRect(0,0,280,280);
}
function save() {
var raw = tf.browser.fromPixels(rawImage,1);
var resized = tf.image.resizeBilinear(raw, [28,28]);
var tensor = resized.expandDims(0);
var prediction = model.predict(tensor);
var pIndex = tf.argMax(prediction, 1).dataSync();
var classNames = ["T-shirt/top", "Trouser", "Pullover",
"Dress", "Coat", "Sandal", "Shirt",
"Sneaker", "Bag", "Ankle boot"];
alert(classNames[pIndex]);
}
function init() {
canvas = document.getElementById('canvas');
rawImage = document.getElementById('canvasimg');
ctx = canvas.getContext("2d");
ctx.fillStyle = "black";
ctx.fillRect(0,0,280,280);
canvas.addEventListener("mousemove", draw);
canvas.addEventListener("mousedown", setPosition);
canvas.addEventListener("mouseenter", setPosition);
saveButton = document.getElementById('sb');
saveButton.addEventListener("click", save);
clearButton = document.getElementById('cb');
clearButton.addEventListener("click", erase);
}
async function run() {
const data = new FMnistData();
await data.load();
const model = getModel();
tfvis.show.modelSummary({name: 'Model Architecture'}, model);
await train(model, data);
await model.save('downloads://my_model');
init();
alert("Training is done, try classifying your drawings!");
}
document.addEventListener('DOMContentLoaded', run);
I used the same version of Chrome and the Chrome server extension to run the code. What could possibly be the problem? Note: I have also checked the console logs and receive no errors there too.

I can see in your code no initialization of the kernel weights, so depending on the default implementation on different machines, you might have the weight initialized to 0, which makes it very difficult for the optimizer to initiate its convergence.
Try in the implementation JS file section, in the get_model function, in the layers definition, to add an option kernelInitializer: 'glorotUniform' to see if any improvement.

Related

Draw ASCII art version of image on canvas using canvas-sketch (Javascript)

The idea was for the code to draw an ASCII art version of an image and my initial code was drawing the glyphs on the background and not the profile of the person.
profile of Pastor Gospel
I played around a bit and found out that the profile wasn't being drawn because the resolution of that part is low so I included these 2 lines of code: if (v < 40) return "."; if (v < 50) return "/";.
And it looks pretty decent but the thing is I can't see the eyes, nose and whatnots because it's not clear. So my question is how can I made them visible?
This is my code:
const canvasSketch = require("canvas-sketch");
const random = require("canvas-sketch-util/random");
const settings = {
dimensions: [1080, 1080],
};
let manager, image;
let text = "C";
let fontSize = 1200;
let fontFamily = "serif";
const typeCanvas = document.createElement("canvas");
const typeContext = typeCanvas.getContext("2d");
const sketch = ({ context, width, height }) => {
const cell = 20;
const cols = Math.floor(width / cell);
const rows = Math.floor(width / cell);
const numCells = cols * rows;
typeCanvas.width = cols;
typeCanvas.height = rows;
return ({ context, width, height }) => {
typeContext.fillStyle = "black";
typeContext.fillRect(0, 0, cols, rows);
typeContext.save();
typeContext.drawImage(image, 0, 0, cols, rows);
typeContext.restore();
const typeData = typeContext.getImageData(0, 0, cols, rows).data;
context.fillStyle = "black";
context.fillRect(0, 0, width, height);
for (let i = 0; i < numCells; i++) {
const col = i % cols;
const row = Math.floor(i / cols);
const x = col * cell + random.range(-cell, cell) * 0.5;
const y = row * cell + random.range(-cell, cell) * 0.5;
const r = typeData[i * 4 + 0];
const g = typeData[i * 4 + 1];
const b = typeData[i * 4 + 2];
const a = typeData[i * 4 + 3];
const glyph = getGlyph(r);
context.font = `${cell * 2}px ${fontFamily}`;
if (Math.random() < 0.1) context.font = `${cell * 6}px ${fontFamily}`;
context.fillStyle = `rgb(${r}, ${g}, ${b})`;
//context.fillStyle = "black";
context.save();
context.translate(x, y);
context.translate(cell * 0.5, cell * 0.5);
//context.fillRect(0, 0, cell, cell);
//context.fillStyle = "white";
context.fillText(glyph, 0, 0);
context.restore();
}
context.drawImage(typeCanvas, 0, 0);
};
};
const getGlyph = (v) => {
if (v < 40) return ".";
if (v < 50) return "/";
if (v < 100) return ".";
if (v < 150) return "-";
if (v < 200) return "+";
const glyphs = "_= /".split("");
return random.pick(glyphs);
};
const loadMeSomeImage = (url) => {
return new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = () => reject();
img.src = url;
});
};
const start = async () => {
const url = "./headshot-pstG.jpeg";
image = await loadMeSomeImage(url);
typeCanvas.width = image.width;
typeCanvas.height = image.height;
manager = await canvasSketch(sketch, settings);
};
start();

Implement 360 degree rotation audio using Web Audio API

What I'm currently trying to implement is 360 degree rotation audio (I don't know the exact term for it--maybe 8d audio?) using PannerNode.
As far as I think, what I need to do is just rotate the position of PannerNode around y axis, with AudioListener being at (0, 0, 0).
But the result sounds like the audio is not changed at all. The below is my code.
const $fileInput = document.createElement('input');
$fileInput.setAttribute('type', 'file');
document.body.appendChild($fileInput);
const $audio = document.createElement('audio');
$audio.setAttribute('controls', true);
document.body.appendChild($audio);
$fileInput.addEventListener('change', async (e) => {
const file = $fileInput.files[0];
const arrayBuffered = await file.arrayBuffer();
const actx = new (window.AudioContext || window.webkitAudioContext)({ latencyHint: 'interactive', sampleRate: 44100 });
const decoded = await actx.decodeAudioData(arrayBuffered);
const oactx = new OfflineAudioContext({ numberOfChannels: 2, length: decoded.length, sampleRate: actx.sampleRate });
const absn = new AudioBufferSourceNode(oactx, { buffer: decoded });
const pn = new PannerNode(oactx, {
panningModel: 'equalpower',
distanceModel: 'inverse',
positionX: 0,
positionY: 0,
positionZ: 0,
orientationX: 1,
orientationY: 0,
orientationZ: 0,
refDistance: 1,
maxDistance: 10000,
rolloffFactor: 1,
coneInnerAngle: 360,
coneOuterAngle: 360,
coneOuterGain: 0
});
oactx.listener.positionX.value = 0;
oactx.listener.positionY.value = 0;
oactx.listener.positionZ.value = 0;
oactx.listener.forwardX.value = 0;
oactx.listener.forwardY.value = 0;
oactx.listener.forwardZ.value = -1;
oactx.listener.upX.value = 0;
oactx.listener.upY.value = 1;
oactx.listener.upZ.value = 0;
// rotation
for (let t = 0; t < decoded.duration; t++) {
const rad = t * Math.PI / 180;
const x = pn.positionX.value * Math.cos(rad) - pn.positionZ.value * Math.sin(rad);
const z = pn.positionX.value * Math.sin(rad) + pn.positionZ.value * Math.cos(rad);
pn.positionX.setValueAtTime(x, t);
pn.positionZ.setValueAtTime(z, t);
}
absn.connect(pn);
pn.connect(oactx.destination);
absn.start();
const resultBuffer = await oactx.startRendering();
const test = new AudioBufferSourceNode(actx, { buffer: resultBuffer });
test.connect(actx.destination);
test.start();
});
// from
PannerNode.positionX: 0
// to
PannerNode.positionX: 1
// from
for (let t = 0; t < decoded.duration; t++) {
const rad = t * Math.PI / 180;
}
// to
for (let t = 0; t < decoded.duration; t += 0.01) {
const rad = 100 * t * Math.PI / 180;
}
I set PannerNode.positionX to 1 because in order for PannerNode to rotate around AudioListener, PannerNode needs to be have some distance from AudioListener.
I change for statement because I want a fast, smoothly changing effect.

Reactjs recorder-js download

I am trying to make a React app for recording voice samples and save them as wav files, I used recorder-js to make the record but I am still struggling to make the saving part as wav file, and I got the exporWAV is not a function everytime.
If you've been there please help
Sample code is here
//create
var record_recorder;
record_recorder = new RecorderV2(input,{sampleRate:44100, numChannels:2});
//start
record_recorder.record();
//stop
record_recorder.stop();
//export
record_recorder.exportWAV(function(blob) {
var url = URL.createObjectURL(blob);
var li = document.createElement('div');
var au = document.createElement('audio');
var hf = document.createElement('a');
var br = document.createElement('br');
record_result_blob = blob;
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'record.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(br);
li.appendChild(hf);
if (result)
result(li);
});
See the example website:
https://voice-recorder-online.com
code:
https://voice-recorder-online.com/js/index_editor.js
Here's How u can make waves depending on your voice frequency and record and save files in wav format.
(async() => {
let leftchannel = [];
let rightchannel = [];
let recorder = null;
let recording = false;
let recordingLength = 0;
let volume = null;
let audioInput = null;
let sampleRate = null;
let AudioContext = window.AudioContext || window.webkitAudioContext;
let context = null;
let analyser = null;
let canvas = document.querySelector("canvas");
let canvasCtx = canvas.getContext("2d");
let visualSelect = document.querySelector("#visSelect");
let micSelect = document.querySelector("#micSelect");
let stream = null;
let tested = false;
try {
window.stream = stream = await getStream();
console.log("Got stream");
} catch (err) {
alert("Issue getting mic", err);
}
const deviceInfos = await navigator.mediaDevices.enumerateDevices();
var mics = [];
for (let i = 0; i !== deviceInfos.length; ++i) {
let deviceInfo = deviceInfos[i];
if (deviceInfo.kind === "audioinput") {
mics.push(deviceInfo);
let label = deviceInfo.label || "Microphone " + mics.length;
console.log("Mic ", label + " " + deviceInfo.deviceId);
const option = document.createElement("option");
option.value = deviceInfo.deviceId;
option.text = label;
micSelect.appendChild(option);
}
}
function getStream(constraints) {
if (!constraints) {
constraints = {
audio: true,
video: false
};
}
return navigator.mediaDevices.getUserMedia(constraints);
}
setUpRecording();
function setUpRecording() {
context = new AudioContext();
sampleRate = context.sampleRate;
// creates a gain node
volume = context.createGain();
// creates an audio node from teh microphone incoming stream
audioInput = context.createMediaStreamSource(stream);
// Create analyser
analyser = context.createAnalyser();
// connect audio input to the analyser
audioInput.connect(analyser);
// connect analyser to the volume control
// analyser.connect(volume);
let bufferSize = 2048;
let recorder = context.createScriptProcessor(bufferSize, 2, 2);
// we connect the volume control to the processor
// volume.connect(recorder);
analyser.connect(recorder);
// finally connect the processor to the output
recorder.connect(context.destination);
recorder.onaudioprocess = function(e) {
// Check
if (!recording) return;
// Do something with the data, i.e Convert this to WAV
console.log("recording");
let left = e.inputBuffer.getChannelData(0);
let right = e.inputBuffer.getChannelData(1);
if (!tested) {
tested = true;
// if this reduces to 0 we are not getting any sound
if (!left.reduce((a, b) => a + b)) {
alert("There seems to be an issue with your Mic");
// clean up;
stop();
stream.getTracks().forEach(function(track) {
track.stop();
});
context.close();
}
}
// we clone the samples
leftchannel.push(new Float32Array(left));
rightchannel.push(new Float32Array(right));
recordingLength += bufferSize;
};
visualize();
}
function mergeBuffers(channelBuffer, recordingLength) {
let result = new Float32Array(recordingLength);
let offset = 0;
let lng = channelBuffer.length;
for (let i = 0; i < lng; i++) {
let buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel) {
let length = leftChannel.length + rightChannel.length;
let result = new Float32Array(length);
let inputIndex = 0;
for (let index = 0; index < length;) {
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string) {
let lng = string.length;
for (let i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function start() {
recording = true;
document.querySelector("#msg").style.visibility = "visible";
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
console.log("context: ", !!context);
if (!context) setUpRecording();
}
function stop() {
console.log("Stop");
recording = false;
document.querySelector("#msg").style.visibility = "hidden";
// we flat the left and right channels down
let leftBuffer = mergeBuffers(leftchannel, recordingLength);
let rightBuffer = mergeBuffers(rightchannel, recordingLength);
// we interleave both channels together
let interleaved = interleave(leftBuffer, rightBuffer);
///////////// WAV Encode /////////////////
// from http://typedarray.org/from-microphone-to-wav-with-getusermedia-and-web-audio/
//
// we create our wav file
let buffer = new ArrayBuffer(44 + interleaved.length * 2);
let view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, "RIFF");
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, "WAVE");
// FMT sub-chunk
writeUTFBytes(view, 12, "fmt ");
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, "data");
view.setUint32(40, interleaved.length * 2, true);
// write the PCM samples
let lng = interleaved.length;
let index = 44;
let volume = 1;
for (let i = 0; i < lng; i++) {
view.setInt16(index, interleaved[i] * (0x7fff * volume), true);
index += 2;
}
// our final binary blob
const blob = new Blob([view], {
type: "audio/wav"
});
const audioUrl = URL.createObjectURL(blob);
console.log("BLOB ", blob);
console.log("URL ", audioUrl);
document.querySelector("#audio").setAttribute("src", audioUrl);
const link = document.querySelector("#download");
link.setAttribute("href", audioUrl);
link.download = "output.wav";
}
// Visualizer function from
// https://webaudiodemos.appspot.com/AudioRecorder/index.html
//
function visualize() {
WIDTH = canvas.width;
HEIGHT = canvas.height;
CENTERX = canvas.width / 2;
CENTERY = canvas.height / 2;
let visualSetting = visualSelect.value;
console.log(visualSetting);
if (!analyser) return;
if (visualSetting === "sinewave") {
analyser.fftSize = 2048;
var bufferLength = analyser.fftSize;
console.log(bufferLength);
var dataArray = new Uint8Array(bufferLength);
canvasCtx.clearRect(0, 0, WIDTH, HEIGHT);
var draw = function() {
drawVisual = requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
canvasCtx.fillStyle = "rgb(200, 200, 200)";
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "rgb(0, 0, 0)";
canvasCtx.beginPath();
var sliceWidth = (WIDTH * 1.0) / bufferLength;
var x = 0;
for (var i = 0; i < bufferLength; i++) {
var v = dataArray[i] / 128.0;
var y = (v * HEIGHT) / 2;
if (i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
};
draw();
} else if (visualSetting == "frequencybars") {
analyser.fftSize = 64;
var bufferLengthAlt = analyser.frequencyBinCount;
console.log(bufferLengthAlt);
var dataArrayAlt = new Uint8Array(bufferLengthAlt);
canvasCtx.clearRect(0, 0, WIDTH, HEIGHT);
var drawAlt = function() {
drawVisual = requestAnimationFrame(drawAlt);
analyser.getByteFrequencyData(dataArrayAlt);
canvasCtx.fillStyle = "rgb(0, 0, 0)";
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
var barWidth = WIDTH / bufferLengthAlt;
var barHeight;
var x = 0;
for (var i = 0; i < bufferLengthAlt; i++) {
barHeight = dataArrayAlt[i];
canvasCtx.fillStyle = "rgb(" + (barHeight + 100) + ",50,50)";
canvasCtx.fillRect(
x,
HEIGHT - barHeight / 2,
barWidth,
barHeight / 2
);
x += barWidth + 1;
}
};
drawAlt();
} else if (visualSetting == "circle") {
analyser.fftSize = 32;
let bufferLength = analyser.frequencyBinCount;
console.log(bufferLength);
let dataArray = new Uint8Array(bufferLength);
canvasCtx.clearRect(0, 0, WIDTH, HEIGHT);
let draw = () => {
drawVisual = requestAnimationFrame(draw);
analyser.getByteFrequencyData(dataArray);
canvasCtx.fillStyle = "rgb(0, 0, 0)";
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
// let radius = dataArray.reduce((a,b) => a + b) / bufferLength;
let radius = dataArray[2] / 2;
if (radius < 20) radius = 20;
if (radius > 100) radius = 100;
// console.log('Radius ', radius)
canvasCtx.beginPath();
canvasCtx.arc(CENTERX, CENTERY, radius, 0, 2 * Math.PI, false);
// canvasCtx.fillStyle = 'rgb(50,50,' + (radius+100) +')';
// canvasCtx.fill();
canvasCtx.lineWidth = 6;
canvasCtx.strokeStyle = "rgb(50,50," + (radius + 100) + ")";
canvasCtx.stroke();
};
draw();
}
}
#msg {
visibility: hidden;
color: red;
font-weight: bold;
font-size: 22px;
font-family: Verdana;
}
button {
padding: 5px 10px;
border: 1px solid grey;
font-size: 18px;
background: white;
}
.audio-controls {
display: flex;
align-items: center;
padding-top: 20px;
justify-content: center;
}
.audio-controls button {
margin: 0px 5px;
}
canvas {
margin-top: 10px;
background-color: black;
}
select {
height: 25px;
margin: 0px 5px;
}
a {
margin-left: 20px;
}
.app {
text-align: center;
padding-top: 20px;
}
<div class="app">
<select name="" id="micSelect"></select>
<select id="visSelect">
<option value="frequencybars">Bar</option>
<option value="sinewave">Wave</option>
<option value="circle">Circle</option>
</select>
<a id="download">Download</a>
<div class="audio-controls">
<button id="record">Record</button>
<button id="stop">Stop</button>
<audio id="audio" controls></audio>
</div>
<div id="msg">Recording...</div>
<canvas width="500" height="300"></canvas>
<div>
Live Demo: https://codepen.io/furki911/pen/jOYpvMx

Plain Javascript not Working in Angular CLI Component

I'm developing a web using Angular CLI.
I'm trying to applying similar things like this :
https://codepen.io/chrisdoble/pen/WQLLVp
Is there any way to write plain javascript in component.ts.
Here is my JS :
var image = document.getElementById('hero-bg');
var imageCanvas = document.createElement('canvas');
var imageCanvasContext = imageCanvas.getContext('2d');
var lineCanvas = document.createElement('canvas');
var lineCanvasContext = lineCanvas.getContext('2d');
var pointLifetime = 1000;
var points = [];
var newImage = document.getElementById('hero')
if (image.complete) {
start();
} else {
image.onload = start;
}
function start() {
document.addEventListener('mousemove', onMouseMove);
window.addEventListener('resize', resizeCanvases);
newImage.appendChild(imageCanvas);
resizeCanvases();
tick();
}
function onMouseMove(event) {
points.push({
time: Date.now(),
x: event.clientX,
y: event.clientY
});
}
function resizeCanvases() {
imageCanvas.width = lineCanvas.width = window.innerWidth;
imageCanvas.height = lineCanvas.height = window.innerHeight;
}
function tick() {
// Remove old points
points = points.filter(function(point) {
var age = Date.now() - point.time;
return age < pointLifetime;
});
drawLineCanvas();
drawImageCanvas();
requestAnimationFrame(tick);
}
function drawLineCanvas() {
var minimumLineWidth = 25;
var maximumLineWidth = 100;
var lineWidthRange = maximumLineWidth - minimumLineWidth;
var maximumSpeed = 50;
lineCanvasContext.clearRect(0, 0, lineCanvas.width, lineCanvas.height);
lineCanvasContext.lineCap = 'round';
lineCanvasContext.shadowBlur = 30;
lineCanvasContext.shadowColor = '#000';
for (var i = 1; i < points.length; i++) {
var point = points[i];
var previousPoint = points[i - 1];
// Change line width based on speed
var distance = getDistanceBetween(point, previousPoint);
var speed = Math.max(0, Math.min(maximumSpeed, distance));
var percentageLineWidth = (maximumSpeed - speed) / maximumSpeed;
lineCanvasContext.lineWidth = minimumLineWidth + percentageLineWidth * lineWidthRange;
// Fade points as they age
var age = Date.now() - point.time;
var opacity = (pointLifetime - age) / pointLifetime;
lineCanvasContext.strokeStyle = 'rgba(0, 0, 0, ' + opacity + ')';
lineCanvasContext.beginPath();
lineCanvasContext.moveTo(previousPoint.x, previousPoint.y);
lineCanvasContext.lineTo(point.x, point.y);
lineCanvasContext.stroke();
}
}
function getDistanceBetween(a, b) {
return Math.sqrt(Math.pow(a.x - b.x, 2) + Math.pow(a.y - b.y, 2));
}
function drawImageCanvas() {
// Emulate background-size: cover
var width = imageCanvas.width;
var height = imageCanvas.width / image.naturalWidth * image.naturalHeight;
if (height < imageCanvas.height) {
width = imageCanvas.height / image.naturalHeight * image.naturalWidth;
height = imageCanvas.height;
}
imageCanvasContext.clearRect(0, 0, imageCanvas.width, imageCanvas.height);
imageCanvasContext.globalCompositeOperation = 'source-over';
imageCanvasContext.drawImage(image, 0, 0, width, height);
imageCanvasContext.globalCompositeOperation = 'destination-in';
imageCanvasContext.drawImage(lineCanvas, 0, 0);
}
And here is my Component
import { Component } from '#angular/core';
import * as $ from 'jquery';
#Component({
selector: 'app-hero',
templateUrl: './hero.component.html',
styleUrls: ['./hero.component.scss']
})
export class HeroComponent {
}
The Error i got is this :
message: 'Property 'complete' does not exist on type 'HTMLElement'.'
message: 'Property 'naturalWidth' does not exist on type 'HTMLElement'.'
message: 'Property 'naturalHeight' does not exist on type 'HTMLElement'.'
message: 'Argument of type 'HTMLElement' is not assignable to parameter of type 'HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | ImageBitmap'.
Type 'HTMLElement' is not assignable to type 'ImageBitmap'.
Property 'width' is missing in type 'HTMLElement'.'
Everything works fine if i put the code inline at Index.html file,
but i don't want to put it there, i want to apply it in my component.
Please, help me to find a way to implement this plain Javascript in my component.ts.
Or maybe if there are any Typescript master here, kindly recode for me. hehehe
Thank You guys alot.
You do not have to use jQuery. Angular have all things in place to interact with a dom.
You have to use https://angular.io/api/core/Renderer2 and https://angular.io/api/core/HostListener to adjust the code the way angular can understand that.
I just converted the code from https://codepen.io/chrisdoble/pen/WQLLVp in to angular way. Here is an example what are you looking for:
export class NativeBlurComponent implements OnInit {
img = new Image();
imageCanvas = document.createElement('canvas');
imageCanvasContext = this.imageCanvas.getContext('2d');
lineCanvas = document.createElement('canvas');
lineCanvasContext = this.lineCanvas.getContext('2d');
pointLifetime = 1000;
points = [];
constructor(private renderer: Renderer2) { }
ngOnInit() {
this.img.src = document.querySelector('img').src;
if (this.img.complete) {
this.start();
} else {
this.img.onload = this.start;
}
}
start() {
this.renderer.appendChild(document.body, this.imageCanvas);
this.resizeCanvases();
this.tick();
}
#HostListener('document:mousemove', [ '$event' ])
onMouseMove(event) {
this.points.push({
time: Date.now(),
x: event.clientX,
y: event.clientY
});
}
#HostListener('window:resize', [ '$event' ])
resizeCanvases() {
this.imageCanvas.width = this.lineCanvas.width = window.innerWidth;
this.imageCanvas.height = this.lineCanvas.height = window.innerHeight;
}
tick() {
// Remove old points
this.points = this.points.filter(function(point) {
const age = Date.now() - point.time;
return age < this.pointLifetime;
});
this.drawLineCanvas();
this.drawImageCanvas();
requestAnimationFrame(this.tick);
}
drawLineCanvas() {
const minimumLineWidth = 25;
const maximumLineWidth = 100;
const lineWidthRange = maximumLineWidth - minimumLineWidth;
const maximumSpeed = 50;
this.lineCanvasContext.clearRect(0, 0, this.lineCanvas.width, this.lineCanvas.height);
this.lineCanvasContext.lineCap = 'round';
this.lineCanvasContext.shadowBlur = 30;
this.lineCanvasContext.shadowColor = '#000';
for (let i = 1; i < this.points.length; i++) {
const point = this.points[i];
const previousPoint = this.points[i - 1];
// Change line width based on speed
const distance = this.getDistanceBetween(point, previousPoint);
const speed = Math.max(0, Math.min(maximumSpeed, distance));
const percentageLineWidth = (maximumSpeed - speed) / maximumSpeed;
this.lineCanvasContext.lineWidth = minimumLineWidth + percentageLineWidth * lineWidthRange;
// Fade points as they age
const age = Date.now() - point.time;
const opacity = (this.pointLifetime - age) / this.pointLifetime;
this.lineCanvasContext.strokeStyle = 'rgba(0, 0, 0, ' + opacity + ')';
this.lineCanvasContext.beginPath();
this.lineCanvasContext.moveTo(previousPoint.x, previousPoint.y);
this.lineCanvasContext.lineTo(point.x, point.y);
this.lineCanvasContext.stroke();
}
}
getDistanceBetween(a, b) {
return Math.sqrt(Math.pow(a.x - b.x, 2) + Math.pow(a.y - b.y, 2));
}
drawImageCanvas() {
// Emulate background-size: cover
let width = this.imageCanvas.width;
let height = this.imageCanvas.width / this.img.naturalWidth * this.img.naturalHeight;
if (height < this.imageCanvas.height) {
width = this.imageCanvas.height / this.img.naturalHeight * this.img.naturalWidth;
height = this.imageCanvas.height;
}
this.imageCanvasContext.clearRect(0, 0, this.imageCanvas.width, this.imageCanvas.height);
this.imageCanvasContext.globalCompositeOperation = 'source-over';
this.imageCanvasContext.drawImage(this.img, 0, 0, width, height);
this.imageCanvasContext.globalCompositeOperation = 'destination-in';
this.imageCanvasContext.drawImage(this.lineCanvas, 0, 0);
}
}
Of course you can.
But you will have to handle with:
ElementRef : https://angular.io/api/core/ElementRef
Renderer : https://angular.io/api/core/Renderer
or Renderer 2.
Typescript need you to give the type of the element for HTMLElement.
for example:
svg: SVGElement = ...
In your case, you have to say that your element has the type: HTMLImageElement.

JSPdf with VueJS MultPage error

I'm having a problem trying to use JSPdf with Vuejs. I've got a tutorial on the internet (this one) that shows how to print all the contents of a div (because it was not possible.) But the test works perfectly, in mine, even if I can adapt, the PDF comes in blank, can anyone help?
My code adapted with VueJS:
methods: {
GerarPdf(){
let source = $('#div')
let cache_width = source.width()
let a4 = [595.28, 990.89]
let canvasImage = ''
let winHeight = a4[1]
let formHeight = source.height()
let formWidth = source.width()
let imagePieces = []
imagePieces = [];
imagePieces.length = 0;
source.width((a4[0] * 1.33333) - 80).css('max-width', 'none');
return html2canvas(source, {
imageTimeout: 2000,
removeContainer: true
})
.then(canvas => {
console.log(canvas)
canvasImage = new Image();
canvasImage.src = canvas.toDataURL("image/png");
let totalImgs = Math.round(formHeight/winHeight);
for(let i = 0; i < totalImgs; i++) {
let canvas = document.createElement('canvas'),
ctx = canvas.getContext('2d');
canvas.width = formWidth;
canvas.height = winHeight;
ctx.drawImage(canvasImage, 0, i * winHeight, formWidth, winHeight, 0, 0, canvas.width, canvas.height);
imagePieces.push(canvas.toDataURL("image/png"));
}
console.log(imagePieces.length);
let totalPieces = imagePieces.length - 1;
let doc = new jsPDF({
unit: 'px',
format: 'a4'
});
imagePieces.forEach(function(img){
doc.addImage(img, 'JPEG', 20, 40);
if(totalPieces)
doc.addPage();
totalPieces--;
});
doc.save('techumber-html-to-pdf.pdf');
//source.width(cache_width);
})
}
}

Categories

Resources