i cant see output and also no error .... i try to detect rectangle shape from image and cut it and save it with opencv.js
onFilePicked() {
let imgElement = document.getElementById('imageSrc');
const files = event.target.files;
imgElement.src = URL.createObjectURL(files[0]);
var app = this
imgElement.onload = function () {
let mat = cv.imread(imgElement)
let dst = new cv.Mat();
cv.cvtColor(mat, mat, cv.COLOR_RGB2GRAY);
// gray = cv.bilateralFilter(gray, 11, 17, 17)
cv.Canny(mat, dst, 30, 200, 3, false);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
var transformed = null
cv.findContours(dst, contours, hierarchy, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
var sortableContours = []
for (let i = 0; i < contours.size(); i++) {
let cnt = contours.get(i);
let area = cv.contourArea(cnt, false);
let perim = cv.arcLength(cnt, false);
sortableContours.push({
areaSize: area,
perimiterSize: perim,
contour: cnt
});
let color = new cv.Scalar(255, 0, 0, 255);
let hierarchy2 = new cv.Mat();
cv.drawContours(mat, contours, -1, (0, 255, 0), 3);
}
cv.imshow('canvasOutput', mat);
let foundContour = null;
for (let sortableContour of sortableContours) {
let peri = cv.arcLength(sortableContour.contour, true);
let approx = new cv.Mat();
cv.approxPolyDP(sortableContour.contour, approx, 0.1 * peri, true);
if (approx.rows == 4) {
foundContour = approx
transformed = app.perspective_transform(mat, foundContour)
break;
} else {
approx.delete();
}
}
let rotate = app.rotate_image(transformed, 90)
cv.imshow('canvasOutput', rotate)
};
},
transform
perspective_transform(image, foundContour) {
let corner1 = new cv.Point(foundContour.data32S[0], foundContour.data32S[1]);
let corner2 = new cv.Point(foundContour.data32S[2], foundContour.data32S[3]);
let corner3 = new cv.Point(foundContour.data32S[4], foundContour.data32S[5]);
let corner4 = new cv.Point(foundContour.data32S[6], foundContour.data32S[7]);
//Order the corners
let cornerArray = [{
corner: corner1
}, {
corner: corner2
}, {
corner: corner3
}, {
corner: corner4
}];
//Sort by Y position (to get top-down)
cornerArray.sort((item1, item2) => {
return (item1.corner.y < item2.corner.y) ? -1 : (item1.corner.y > item2.corner.y) ? 1 : 0;
}).slice(0, 5);
//Determine left/right based on x position of top and bottom 2
let tl = cornerArray[0].corner.x < cornerArray[1].corner.x ? cornerArray[0] : cornerArray[1];
let tr = cornerArray[0].corner.x > cornerArray[1].corner.x ? cornerArray[0] : cornerArray[1];
let bl = cornerArray[2].corner.x < cornerArray[3].corner.x ? cornerArray[2] : cornerArray[3];
let br = cornerArray[2].corner.x > cornerArray[3].corner.x ? cornerArray[2] : cornerArray[3];
//Calculate the max width/height
let widthBottom = Math.hypot(br.corner.x - bl.corner.x, br.corner.y - bl.corner.y);
let widthTop = Math.hypot(tr.corner.x - tl.corner.x, tr.corner.y - tl.corner.y);
let theWidth = (widthBottom > widthTop) ? widthBottom : widthTop;
let heightRight = Math.hypot(tr.corner.x - br.corner.x, tr.corner.y - br.corner.y);
let heightLeft = Math.hypot(tl.corner.x - bl.corner.x, tr.corner.y - bl.corner.y);
let theHeight = (heightRight > heightLeft) ? heightRight : heightLeft;
//Transform!
let finalDestCoords = cv.matFromArray(4, 1, cv.CV_32FC2, [0, 0, theWidth - 1, 0, theWidth - 1, theHeight - 1, 0, theHeight - 1]);
// corners
let srcCoords = cv.matFromArray(4, 1, cv.CV_32FC2, [tl.corner.x, tl.corner.y, tr.corner.x, tr.corner.y, br.corner.x, br.corner.y, bl.corner.x, bl.corner.y]);
let dsize = new cv.Size(theWidth, theHeight);
let M = cv.getPerspectiveTransform(srcCoords, finalDestCoords)
let dst = new cv.Mat();
cv.warpPerspective(image, dst, M, dsize);
return dst
},
rotate image
rotate_image(image, angle) {
let dst = new cv.Mat();
let dsize = new cv.Size(image.rows, image.cols);
let center = new cv.Point(image.cols / 2, image.rows / 2);
// You can try more different parameters
let M = cv.getRotationMatrix2D(center, angle, 1);
cv.warpAffine(image, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
return dst
},
You are just grabbing the first contour in the entire array of sortableContours that has 4 points, and running the transform on that one. You need to sort them to those with the largest area first.
//sorts the contours by largest area first
let slicer = Math.min(sortableContours.length, 4);
let sortedContours = sortableContours.sort((a,b) => (a.areaSize < b.areaSize) ? 1 : -1).slice(0, slicer);
Also, I would recommend removing this line of code from within the for loop as it can be performed once outside the loop and slows the process down quite a bit (runs a few thousand times)
cv.drawContours(mat, contours, -1, (0, 255, 0), 3);
My final note would be that the following line might need to be tweaked from .1 to a smaller number like .02 if you are getting poor results. .1 is more forgiving but .02 is more precise. Alternatively, you can do both, keep all the results in an array, and pick the one with the largest area when you are done for the best of both worlds.
cv.approxPolyDP(sortableContour.contour, approx, 0.1 * peri, true);
Best of both worlds:
//iterates through the largest contours and creates transformed images if the contour's shape is a rectangle
let transformedOptions = [];
for (let sortedContour of sortedContours) {
let perimeter = cv.arcLength(sortedContour.contour, true);
let precisePoly = new cv.Mat();
let approxPoly = new cv.Mat();
cv.approxPolyDP(sortedContour.contour, precisePoly, 0.02 * perimeter, true); //the smaller number (0.02) is more precise
cv.approxPolyDP(sortedContour.contour, approxPoly, 0.1 * perimeter, true); //the larger number (0.1) is more forgiving
//if the polygon has 4 points (rectangle-ish)
if (precisePoly.rows == 4) {
transformedOptions.push(this.perspectiveTransform(originalImage, precisePoly, imageHeight, imageWidth))
}
if(approxPoly.rows == 4) {
transformedOptions.push(this.perspectiveTransform(originalImage, approxPoly, imageHeight, imageWidth))
}
precisePoly.delete();
approxPoly.delete();
}
let transformed = this.getLargestTransformation(transformedOptions);
//this could be optimized a bit
private getLargestTransformation(options) {
var transformed = null;
for(let option of options) {
if(option == null) continue;
var largestArea = 0;
var area = option.rows * option.cols;
if(transformed == null || area > largestArea) {
transformed = option;
largestArea = area;
}
}
return transformed;
}
Related
I've got 3 images, 2 are visually identical but they have different file names and one is completely different. I load the images, put them on a canvas, get the image data and compare the image.
The 2 that are visually the same returns true which is correct
when comparing 2 that are not visually the same it also returns true which is not correct.
UPDATED from #obscure answer below
window.onload = function () {
setTimeout(process, 5000);
};
async function process() {
const img1 = document.getElementById("img1");
const img2 = document.getElementById("img2");
const img3 = document.getElementById("img3");
img1.crossOrigin = "Anonymous";
img2.crossOrigin = "Anonymous";
img3.crossOrigin = "Anonymous";
const canvas1 = document.createElement("canvas");
const ctx1 = canvas1.getContext("2d");
canvas1.width = img1.width;
canvas1.height = img1.height;
ctx1.drawImage(img1, 0, 0);
const pixData1 = ctx1.getImageData(0, 0, img1.width, img1.height).data;
const canvas2 = document.createElement("canvas");
const ctx2 = canvas2.getContext("2d");
canvas2.width = img2.width;
canvas2.height = img2.height;
ctx2.drawImage(img2, 0, 0);
const pixData2 = ctx2.getImageData(0, 0, img2.width, img2.height).data;
const canvas3 = document.createElement("canvas");
const ctx3 = canvas3.getContext("2d");
canvas3.width = img3.width;
canvas3.height = img3.height;
ctx3.drawImage(img3, 0, 0);
const pixData3 = ctx3.getImageData(0, 0, img3.width, img3.height).data;
const utf8A = new TextEncoder().encode(pixData1.toString());
let img1Hash = await crypto.subtle
.digest("SHA-256", utf8A)
.then((hashBuffer) => {
return Array.from(new Uint8Array(hashBuffer)).toString();
});
const utf8B = new TextEncoder().encode(pixData2.toString());
let img2Hash = await crypto.subtle
.digest("SHA-256", utf8B)
.then((hashBuffer) => {
return Array.from(new Uint8Array(hashBuffer)).toString();
});
const utf8C = new TextEncoder().encode(pixData3.toString());
let img3Hash = await crypto.subtle
.digest("SHA-256", utf8C)
.then((hashBuffer) => {
return Array.from(new Uint8Array(hashBuffer)).toString();
});
console.log(img1Hash);
console.log(img2Hash);
console.log(img3Hash);
console.log(img1Hash === img2Hash);
console.log(img1Hash === img3Hash); // Should be false
console.log(img2Hash === img3Hash); // Should be false
}
<!DOCTYPE html>
<html>
<head>
<script src="index.js"></script>
</head>
<body>
<img src="https://i.imgur.com/M0K21iS.jpg" id="img1" />
<img src="https://i.imgur.com/uNbsNAd.jpg" id="img2" />
<img src="https://i.imgur.com/QdqhGb9.jpg" id="img3" />
</body>
</html>
To compare two array for equality you can indeed use a hashing algorithm. Utilizing crypto.subtle is an easy solution but I'm afraid you aren't aware what the .digest() method does/returns.
From your code it seems you think it's a synchronous operation:
let img1Hash = "";
const utf8A = new TextEncoder().encode(pixData1.toString());
crypto.subtle.digest("SHA-256", utf8A).then((hashBuffer) => {
img1Hash = Array.from(new Uint8Array(hashBuffer));
});
console.log(img1Hash); // nothing logged
Well it's an asynchronous operation and digest() returns a promise. So if you simply log img1Hash after calling digest() will be an empty string as the promise didn't fulfill yet. Likewise a comparison like img1Hash === img2Hash will yield true as both variables contain empty strings at that point in time.
So you need to wait until both promises are resolved. This can be done by wrapping your whole onload code block inside an async function process() and await the results of calling digest(). Unfortunately this would still not return true if you do a comparison because you make the result an array again:
Array.from(new Uint8Array(hashBuffer))
If you convert it to a String you can compare it for equality.
Here's the complete code:
window.onload = function() {
process();
};
async function process() {
const img1 = document.getElementById("img1");
const img2 = document.getElementById("img2");
img1.crossOrigin = "Anonymous";
img2.crossOrigin = "Anonymous";
const canvas1 = document.createElement("canvas");
const ctx1 = canvas1.getContext("2d");
canvas1.width = img1.width;
canvas1.height = img1.height;
ctx1.drawImage(img1, 0, 0);
const pixData1 = ctx1.getImageData(0, 0, img1.width, img1.height).data;
const canvas2 = document.createElement("canvas");
const ctx2 = canvas2.getContext("2d");
canvas2.width = img2.width;
canvas2.height = img2.height;
ctx2.drawImage(img2, 0, 0);
const pixData2 = ctx2.getImageData(0, 0, img2.width, img2.height).data;
const utf8A = new TextEncoder().encode(pixData1.toString());
let img1Hash = await crypto.subtle.digest("SHA-256", utf8A).then((hashBuffer) => {
return Array.from(new Uint8Array(hashBuffer)).toString();
});
const utf8B = new TextEncoder().encode(pixData2.toString());
let img2Hash = await crypto.subtle.digest("SHA-256", utf8B).then((hashBuffer) => {
return Array.from(new Uint8Array(hashBuffer)).toString();
});
console.log(img1Hash); // nothing logged
console.log(img2Hash); // nothing logged
console.log(img1Hash === img2Hash); // true
}
<img src="https://i.imgur.com/M0K21iS.jpg" id="img1" />
<img src="https://i.imgur.com/uNbsNAd.jpg" id="img2" />
Edit
As you're struggling to get the correct hashes for each of your images, let's do things a bit different. Instead of referencing a real html <img> element, let's create those dynamically and add 'em to the DOM if ready.
So the following snippet:
let sources = ['https://i.imgur.com/M0K21iS.jpg', 'https://i.imgur.com/uNbsNAd.jpg', 'https://i.imgur.com/QdqhGb9.jpg'];
let images = [];
let imageData = [];
let hashes = [];
let counter = 0;
function loaded(e) {
counter++;
if (counter == 3) {
process();
}
}
async function process() {
let utf8;
let canvas = document.createElement("canvas");
let ctx = canvas.getContext("2d");
canvas.width = images[0].width;
canvas.height = images[0].height;
for (let a = 0; a < images.length; a++) {
ctx.drawImage(images[a], 0, 0);
imageData.push(ctx.getImageData(0, 0, canvas.width, canvas.height).data);
utf8 = new TextEncoder().encode(imageData[a].toString());
hashes.push(await crypto.subtle
.digest("SHA-256", utf8)
.then((hashBuffer) => {
return Array.from(new Uint8Array(hashBuffer)).toString();
}));
}
console.log(hashes[0]);
console.log(hashes[1]);
console.log(hashes[2]);
}
let img;
for (let a = 0; a < sources.length; a++) {
img = new Image();
images.push(img);
img.crossOrigin = 'anonymous';
document.body.appendChild(img);
img.onload = loaded;
img.src = sources[a];
}
returns three unique, completely different hashes.
100,172,184,128,122,59,32,239,211,133,243,51,25,159,237,239,175,140,198,232,133,184,77,224,174,85,38,1,164,52,30,68
88,209,142,171,42,213,152,27,60,14,200,193,162,134,50,183,110,70,166,231,237,163,215,129,184,249,106,41,16,147,151,97
72,2,137,13,168,131,212,29,170,19,57,24,39,91,164,32,38,2,170,231,124,72,78,64,168,135,84,1,108,11,161,216
As you've surely guessed by now using hashes for comparing two images visually isn't the way to go. What you could do instead is compare image A's color at x, y with image B's at the same position and sum up the differences. If the total difference is within a certain threshold the images should be considered equal.
To do this we need to convert the RGB colors to the HSV color model, as it's better suited for a 'human' color comparison.
let sources = ['https://i.imgur.com/M0K21iS.jpg', 'https://i.imgur.com/uNbsNAd.jpg', 'https://i.imgur.com/QdqhGb9.jpg'];
let images = [];
let imageData = [];
let hashes = [];
let counter = 0;
function loaded(e) {
counter++;
if (counter == 3) {
process();
}
}
async function process() {
let canvas = document.createElement("canvas");
let ctx = canvas.getContext("2d");
canvas.width = images[0].width;
canvas.height = images[0].height;
for (let a = 0; a < images.length; a++) {
ctx.drawImage(images[a], 0, 0);
imageData.push(ctx.getImageData(0, 0, canvas.width, canvas.height).data);
}
compare(imageData[0], imageData[1]);
compare(imageData[0], imageData[2]);
}
function compare(imgDataA, imgDataB) {
let hslA, hslB, avgH, avgS, avgL, difference;
let differences = 0;
let counter = 0;
for (let a = 0; a < imgDataA.length; a += 4) {
hslA = rgbToHsl(imgDataA[a], imgDataA[a + 1], imgDataA[a + 2]);
hslB = rgbToHsl(imgDataB[a], imgDataB[a + 1], imgDataB[a + 2]);
avgH = (hslA[0] + hslB[0]) / 2;
avgS = (hslA[1] + hslB[1]) / 2;
avgL = (hslA[2] + hslB[2]) / 2;
differences += (Math.abs(hslA[0] - avgH) + Math.abs(hslA[1] - avgS) + Math.abs(hslA[2] - avgL)) / 3;
counter++;
}
console.log(differences / (imgDataA.length / 4));
}
let img;
for (let a = 0; a < sources.length; a++) {
img = new Image();
images.push(img);
img.crossOrigin = 'anonymous';
document.body.appendChild(img);
img.onload = loaded;
img.src = sources[a];
}
// taken from: https://gist.github.com/mjackson/5311256#file-color-conversion-algorithms-js
function rgbToHsl(r, g, b) {
r /= 255, g /= 255, b /= 255;
var max = Math.max(r, g, b),
min = Math.min(r, g, b);
var h, s, l = (max + min) / 2;
if (max == min) {
h = s = 0;
} else {
var d = max - min;
s = l > 0.5 ? d / (2 - max - min) : d / (max + min);
switch (max) {
case r:
h = (g - b) / d + (g < b ? 6 : 0);
break;
case g:
h = (b - r) / d + 2;
break;
case b:
h = (r - g) / d + 4;
break;
}
h /= 6;
}
return [h, s, l];
}
As a side note: The rgbToHsl() function above was taken from here. If you run the example you get a difference of 0.012553120747668494 between the first and the second and 0.02681219030137108 for the first and the third image. So one could determine that images are equal if it's difference is less than or equal 0.018 for example.
I am following this example from Satya Mallick
I hosted a test here https://icollect.money/opencv_align#
Problem: the findHomography() succeeds but the warpPerspective() fails with an 'unhandled exception'
I suspect that the homography is wrong as it looks like its an empty array:
h: Mat {$$: {…}}
cols: 0
data: Uint8Array(0)
data8S: Int8Array(0)
data16S: Int16Array(0)
data16U: Uint16Array(0)
data32F: Float32Array(0)
data64F: Float64Array(0)
matSize: Array(0)
rows: 0
I included the cpp code from the referenced article (above) inline with the javascript code:
function Align_img() {
//im2 is the original reference image we are trying to align to
let im2 = cv.imread(image_A_element);
//im1 is the image we are trying to line up correctly
let im1 = cv.imread(image_B_element);
//17 Convert images to grayscale
//18 Mat im1Gray, im2Gray;
//19 cvtColor(im1, im1Gray, CV_BGR2GRAY);
//20 cvtColor(im2, im2Gray, CV_BGR2GRAY);
let im1Gray = new cv.Mat();
let im2Gray = new cv.Mat();
cv.cvtColor(im1, im1Gray, cv.COLOR_BGRA2GRAY);
cv.cvtColor(im2, im2Gray, cv.COLOR_BGRA2GRAY);
//22 Variables to store keypoints and descriptors
//23 std::vector<KeyPoint> keypoints1, keypoints2;
//24 Mat descriptors1, descriptors2;
let keypoints1 = new cv.KeyPointVector();
let keypoints2 = new cv.KeyPointVector();
let descriptors1 = new cv.Mat();
let descriptors2 = new cv.Mat();
//26 Detect ORB features and compute descriptors.
//27 Ptr<Feature2D> orb = ORB::create(MAX_FEATURES);
//28 orb->detectAndCompute(im1Gray, Mat(), keypoints1, descriptors1);
//29 orb->detectAndCompute(im2Gray, Mat(), keypoints2, descriptors2);
var orb = new cv.ORB(5000);
orb.detectAndCompute(im1Gray, new cv.Mat(), keypoints1, descriptors1);
orb.detectAndCompute(im2Gray, new cv.Mat(), keypoints2, descriptors2);
//31 Match features.
//32 std::vector<DMatch> matches;
//33 Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
//34 matcher->match(descriptors1, descriptors2, matches, Mat());
let bf = new cv.BFMatcher(cv.NORM_HAMMING, true);
let matches = new cv.DMatchVector();
bf.match(descriptors1, descriptors2, matches);
//36 Sort matches by score
//37 std::sort(matches.begin(), matches.end());
//39 Remove not so good matches
//40 const int numGoodMatches = matches.size() * GOOD_MATCH_PERCENT;
//41 matches.erase(matches.begin()+numGoodMatches, matches.end());
let good_matches = new cv.DMatchVector();
for (let i = 0; i < matches.size(); i++) {
if (matches.get(i).distance < 30) {
good_matches.push_back(matches.get(i));
}
}
//44 Draw top matches
//45 Mat imMatches;
//46 drawMatches(im1, keypoints1, im2, keypoints2, matches, imMatches);
//47 imwrite("matches.jpg", imMatches);
let imMatches = new cv.Mat();
let color = new cv.Scalar(0,255,0, 255);
cv.drawMatches(im1, keypoints1, im2, keypoints2, good_matches, imMatches, color);
cv.imshow('imageCompareMatches', imMatches);
//50 Extract location of good matches
//51 std::vector<Point2f> points1, points2;
//53 for( size_t i = 0; i < matches.size(); i++ )
//54 {
//55 points1.push_back( keypoints1[ matches[i].queryIdx ].pt );
//56 points2.push_back( keypoints2[ matches[i].trainIdx ].pt );
//57 }
let points1 = [];
let points2 = [];
for (let i = 0; i < good_matches.size(); i++) {
points1.push(keypoints1.get(good_matches.get(i).queryIdx ).pt );
points2.push(keypoints2.get(good_matches.get(i).trainIdx ).pt );
}
//59 Find homography
//60 h = findHomography( points1, points2, RANSAC );
//The first 2 arguments to findHomography need to be matArray so you must convert your point1 and point2 to matArray
//reference: https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga4abc2ece9fab9398f2e560d53c8c9780
//*********** the issue seems to be here in how mat1 and mat2 are created *****
let mat1 = cv.matFromArray(points1.length, 2, cv.CV_32F, points1);
let mat2 = cv.matFromArray(points2.length, 2, cv.CV_32F, points2);
let h = cv.findHomography(mat1, mat2, cv.RANSAC);
//62 Use homography to warp image
//63 warpPerspective(im1, im1Reg, h, im2.size());
let image_B_final_result = new cv.Mat();
cv.warpPerspective(im1, image_B_final_result, h, im2.size());
cv.imshow('imageAligned', image_B_final_result);
matches.delete();
bf.delete();
orb.delete();
descriptors1.delete();
descriptors2.delete();
keypoints1.delete();
keypoints2.delete();
im1Gray.delete();
im2Gray.delete();
h.delete();
image_B_final_result.delete();
mat1.delete();
mat2.delete();
}
for (let i = 0; i < good_matches.size(); i++) {
points1.push(keypoints1.get(good_matches.get(i).queryIdx).pt);
points2.push(keypoints2.get(good_matches.get(i).trainIdx).pt);
}
shoud be below code
for (let i = 0; i < good_matches.size(); i++) {
points1.push(keypoints1.get(good_matches.get(i).queryIdx).pt.x);
points1.push(keypoints1.get(good_matches.get(i).queryIdx).pt.y);
points2.push(keypoints2.get(good_matches.get(i).trainIdx).pt.x);
points2.push(keypoints2.get(good_matches.get(i).trainIdx).pt.y);
}
I push staggered data into points[], then it's work!
I'm trying to take a jpg photo, and make it grayscale and resize it to 48x48 for my model,
I tried this but it doesn't work:
let image = require("../assets/angry.jpg");
const imageAssetPath = Image.resolveAssetSource(image);
const response = await fetch(imageAssetPath.uri, {}, { isBinary: true });
const imageData = await response.arrayBuffer();
let imageTensor = imageToTensor(imageData);
const imageResize = tf.image.resizeBilinear(imageTensor, [48, 48], true);
const imageToTensor = (rawData: ArrayBuffer) => {
const { width, height, data } = jpeg.decode(rawData, true);
const buffer = new Uint8Array(width * height * 3);
let offset = 0;
for (let i = 0; i < buffer.length; i += 3) {
buffer[i] = data[offset]; //red
buffer[i + 1] = data[offset + 1]; //green
buffer[i + 2] = data[offset + 2]; //blue
offset += 4; //skips Alpha value
}
return tf.tensor4d(buffer, [1, height, width, 3]);
};
the image is resizing to 48x48 but how do I make it grayscale? I tried in imageToTensor function to change the array to [height,width,1] but it only messed up the picture, any suggestions?
I didn't know there are so many methods missing!
You can look at the source of rgb_to_grayscale in python
and you'll see, how they convert rgb images to grayscale.
I tried to implement it the same way in javascript, but there is no function called tf.tensordot.
Here's how you can do it.
image = tf.ones([224, 224, 3])
rgb_weights = [0.2989, 0.5870, 0.1140]
image = tf.mul(image, rgb_weights)
image = tf.sum(image, axis=-1)
image = tf.expandDims(image, axis=-1)
I'm trying to define racecourse with a dynamic line that user can draw on a canvas element. So when line has been drawn, program should add sidelines for it as shown in the picture below:
I have managed to mimic the idea already by using line normals but can't get it done correctly. At the moment I put point in the midway of the lines in the direction of the line normals and draw outlines using those points. While generated line is relatively smooth in cases on large turns, tight turns tend to produce loops.
As seen in image below:
Here is current code that generates points for side lines above (I'm using p5.js JavaScript library):
var sketch = function (p) {
with(p) {
let handpoints;
let walkhandpoints;
let collect;
let parsepath;
let shapse;
let step;
let tmp;
let dorender;
let lineoffset;
p.setup = function() {
createCanvas(600, 600);
handpoints = [];
walkhandpoints = 10;
collect = true;
parsepath = false;
shapes = [];
step = 2;
tmp = [];
dorender = true;
lineoffset = 15;
};
p.draw = function() {
if(dorender) {
background(220);
update();
for (let shape of shapes) {
shape.show();
}
}
};
function update() {
if (mouseIsPressed) {
if (collect) {
let mouse = createVector(mouseX, mouseY);
handpoints.push(mouse);
Shape.drawPath(handpoints);
parsepath = true;
}
} else if (parsepath) {
let tmp1 = Shape.cleanPath(handpoints, step);
let s1 = new Shape(tmp1, 1, 'line', color(175));
shapes.push(s1);
let tmp2 = Line.sidePoints(tmp1, lineoffset);
let s2 = new Shape(tmp2.sideA, 1, 'line', color(175,120,0));
let s3 = new Shape(tmp2.sideB, 1, 'line', color(175,0, 120));
shapes.push(s2);
shapes.push(s3);
handpoints = [];
parsepath = false;
//dorender = false;
}
}
class Shape {
constructor(points, mag, type = 'line', shader = color(200, 0, 100)) {
this.points = points.slice().map(item => item.copy());
this.type = type;
this.mag = mag;
this.shader = shader;
}
static cleanPath(points, step) {
let tmp = [];
let output = [];
for (let i = 1; i < points.length; i++) {
let prev = points[i - 1];
let curr = points[i];
if (!prev.equals(curr)) {
tmp.push(prev.copy())
if (i === points.length - 1) {
tmp.push(curr.copy())
}
}
}
for (let i = 0; i < tmp.length; i++) {
if(i % step === 0) {
output.push(tmp[i]);
}
}
output.push(output[0]);
return output;
}
static drawPath(points, mag = 1, type = 'line', shader = color(175)) {
let s = new Shape(points, mag, type, shader);
s.show();
}
show() {
for (let i = 0; i < this.points.length; i++) {
if (this.type === 'line' && i > 0) {
let prev = this.points[i - 1];
let curr = this.points[i];
strokeWeight(this.mag);
stroke(this.shader);
line(prev.x, prev.y, curr.x, curr.y);
} else if (this.type === 'point') {
noStroke();
fill(this.shader);
ellipse(this.points[i].x, this.points[i].y, this.mag * 2, this.mag * 2);
}
}
}
}
class Line {
static sidePoints(points, lineoffset) {
let sideA = [];
let sideB = [];
for(let i = 1; i < points.length; i++) {
// take consecutive points
let prev = points[i-1];
let curr = points[i];
// calculate normals
let dx = curr.x-prev.x;
let dy = curr.y-prev.y;
let a = createVector(-dy, dx).normalize();
let b = createVector(dy, -dx).normalize();
// calculate midway of the two points
let px = (prev.x+curr.x)/2;
let py = (prev.y+curr.y)/2;
let p = createVector(px,py);
// put created points back along drawed line
a.mult(lineoffset).add(p);
b.mult(lineoffset).add(p);
sideA.push(a);
sideB.push(b);
}
// close paths
if(!sideA[0].equals(sideA[sideA.length-1])) {
sideA.push(sideA[0]);
}
if(!sideB[0].equals(sideB[sideB.length-1])) {
sideB.push(sideB[0]);
}
return {sideA, sideB};
}
}
}
};
let node = document.createElement('div');
window.document.getElementById('p5-container').appendChild(node);
new p5(sketch, node);
body {
background-color:#ffffff;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.1.9/p5.js"></script>
<div id="p5-container"></div>
Firstly I'd like to find a way to draw those points in the corresponding corner points of the drawn line so when drawn line has only few points the outlines would retain the drawn shape.
Secondly is there some good way to reduce points on the areas where there are several of them to reduce those loops in small corners and other type errors in generated lines?
Idea is to get points for lines, so it would be easy to detect with line intersection if race car velocity vector crosses it.
Unfortunately I'm not very familiar with math notations so please try to use easy to understand version of them if there is some fancy math that would do the job.
In the future, please try to post a minimal example. I was not able to run your code as you posted it.
That being said, one option you could consider is using the strokeWeight() function to draw the path at different widths. Here's an example:
const path = [];
function setup() {
createCanvas(400, 400);
// Add some default points to the path.
path.push(createVector(0, 0));
path.push(createVector(width/4, height/4));
}
function draw() {
background(220);
// Draw the path with a thick gray line.
strokeWeight(50);
stroke(200);
for(let i = 1; i < path.length; i++){
const prevPoint = path[i-1];
const nextPoint = path[i];
line(prevPoint.x, prevPoint.y, nextPoint.x, nextPoint.y);
}
// Draw the path with a thin black line.
strokeWeight(1);
stroke(0);
for(let i = 1; i < path.length; i++){
const prevPoint = path[i-1];
const nextPoint = path[i];
line(prevPoint.x, prevPoint.y, nextPoint.x, nextPoint.y);
}
}
// Add a point to the path when the user clicks.
function mousePressed(){
path.push(createVector(mouseX, mouseY));
}
The trick here is to draw the path in two passes. First you draw the path using a thick line, and then you draw the path again, this time using a thin line.
I'm trying to use opencv.js to find a document in a provided image (detect edges, apply perspective transform, etc.
I've got a reasonable set of code that (occasionally) detects edges of a document and grabs the bounding box for that. However, I'm struggling to do the perspective transform steps. There are some helpers for this (not in JS) here and here.
Unfortunately I'm getting stuck on something simple. I can find the matching Mat that has 4 edges. Displaying that shows it to be accurate. However, I have no idea how to get some simple X/Y info out of that Mat. I thought minMaxLoc() would be a good option, but I keep getting an error passing in my matching Mat. Any idea why I can draw foundContour and get bounding box info from it, but I can't call minMaxLoc on it?
Code:
//<Get Image>
//<Convert to Gray, do GaussianBlur, and do Canny edge detection>
let contours = new cv.MatVector();
cv.findContours(matDestEdged, contours, hierarchy, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE);
//<Sort resulting contours by area to get largest>
let foundContour = null;
for (let sortableContour of sortableContours) {
let peri = cv.arcLength(sortableContour.contour, true);
let approx = new cv.Mat();
cv.approxPolyDP(sortableContour.contour, approx, 0.1 * peri, true);
if (approx.rows == 4) {
console.log('found it');
foundContour = approx
break;
}
else {
approx.delete();
}
}
//<Draw foundContour and a bounding box to ensure it's accurate>
//TODO: Do a perspective transform
let result = cv.minMaxLoc(foundContour);
The last line above results in a runtime error (Uncaught (in promise): 6402256 - Exception catching is disabled). I can run minMaxLoc() on other Mat objects.
For anyone else looking to do this in OpenCV.JS, what I commented above seems to still be accurate. The contour found can't be used with minMaxLoc, but the X/Y data can be pulled out of data32S[]. That should be all that's needed to do this perspective transform. Some code is below.
//Find all contours
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(matDest, contours, hierarchy, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE);
//Get area for all contours so we can find the biggest
let sortableContours: SortableContour[] = [];
for (let i = 0; i < contours.size(); i++) {
let cnt = contours.get(i);
let area = cv.contourArea(cnt, false);
let perim = cv.arcLength(cnt, false);
sortableContours.push(new SortableContour({ areaSize: area, perimiterSize: perim, contour: cnt }));
}
//Sort 'em
sortableContours = sortableContours.sort((item1, item2) => { return (item1.areaSize > item2.areaSize) ? -1 : (item1.areaSize < item2.areaSize) ? 1 : 0; }).slice(0, 5);
//Ensure the top area contour has 4 corners (NOTE: This is not a perfect science and likely needs more attention)
let approx = new cv.Mat();
cv.approxPolyDP(sortableContours[0].contour, approx, .05 * sortableContours[0].perimiterSize, true);
if (approx.rows == 4) {
console.log('Found a 4-corner approx');
foundContour = approx;
}
else{
console.log('No 4-corner large contour!');
return;
}
//Find the corners
//foundCountour has 2 channels (seemingly x/y), has a depth of 4, and a type of 12. Seems to show it's a CV_32S "type", so the valid data is in data32S??
let corner1 = new cv.Point(foundContour.data32S[0], foundContour.data32S[1]);
let corner2 = new cv.Point(foundContour.data32S[2], foundContour.data32S[3]);
let corner3 = new cv.Point(foundContour.data32S[4], foundContour.data32S[5]);
let corner4 = new cv.Point(foundContour.data32S[6], foundContour.data32S[7]);
//Order the corners
let cornerArray = [{ corner: corner1 }, { corner: corner2 }, { corner: corner3 }, { corner: corner4 }];
//Sort by Y position (to get top-down)
cornerArray.sort((item1, item2) => { return (item1.corner.y < item2.corner.y) ? -1 : (item1.corner.y > item2.corner.y) ? 1 : 0; }).slice(0, 5);
//Determine left/right based on x position of top and bottom 2
let tl = cornerArray[0].corner.x < cornerArray[1].corner.x ? cornerArray[0] : cornerArray[1];
let tr = cornerArray[0].corner.x > cornerArray[1].corner.x ? cornerArray[0] : cornerArray[1];
let bl = cornerArray[2].corner.x < cornerArray[3].corner.x ? cornerArray[2] : cornerArray[3];
let br = cornerArray[2].corner.x > cornerArray[3].corner.x ? cornerArray[2] : cornerArray[3];
//Calculate the max width/height
let widthBottom = Math.hypot(br.corner.x - bl.corner.x, br.corner.y - bl.corner.y);
let widthTop = Math.hypot(tr.corner.x - tl.corner.x, tr.corner.y - tl.corner.y);
let theWidth = (widthBottom > widthTop) ? widthBottom : widthTop;
let heightRight = Math.hypot(tr.corner.x - br.corner.x, tr.corner.y - br.corner.y);
let heightLeft = Math.hypot(tl.corner.x - bl.corner.x, tr.corner.y - bl.corner.y);
let theHeight = (heightRight > heightLeft) ? heightRight : heightLeft;
//Transform!
let finalDestCoords = cv.matFromArray(4, 1, cv.CV_32FC2, [0, 0, theWidth - 1, 0, theWidth - 1, theHeight - 1, 0, theHeight - 1]); //
let srcCoords = cv.matFromArray(4, 1, cv.CV_32FC2, [tl.corner.x, tl.corner.y, tr.corner.x, tr.corner.y, br.corner.x, br.corner.y, bl.corner.x, bl.corner.y]);
let dsize = new cv.Size(theWidth, theHeight);
let M = cv.getPerspectiveTransform(srcCoords, finalDestCoords)
cv.warpPerspective(matDestTransformed, finalDest, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
For reference, here is the class definition I was using for SortableContour. The code above is meant as a guide, not as something that can run on its own, however.
export class SortableContour {
perimiterSize: number;
areaSize: number;
contour: any;
constructor(fields: Partial<SortableContour>) {
Object.assign(this, fields);
}
}