I am following this example from Satya Mallick
I hosted a test here https://icollect.money/opencv_align#
Problem: the findHomography() succeeds but the warpPerspective() fails with an 'unhandled exception'
I suspect that the homography is wrong as it looks like its an empty array:
h: Mat {$$: {…}}
cols: 0
data: Uint8Array(0)
data8S: Int8Array(0)
data16S: Int16Array(0)
data16U: Uint16Array(0)
data32F: Float32Array(0)
data64F: Float64Array(0)
matSize: Array(0)
rows: 0
I included the cpp code from the referenced article (above) inline with the javascript code:
function Align_img() {
//im2 is the original reference image we are trying to align to
let im2 = cv.imread(image_A_element);
//im1 is the image we are trying to line up correctly
let im1 = cv.imread(image_B_element);
//17 Convert images to grayscale
//18 Mat im1Gray, im2Gray;
//19 cvtColor(im1, im1Gray, CV_BGR2GRAY);
//20 cvtColor(im2, im2Gray, CV_BGR2GRAY);
let im1Gray = new cv.Mat();
let im2Gray = new cv.Mat();
cv.cvtColor(im1, im1Gray, cv.COLOR_BGRA2GRAY);
cv.cvtColor(im2, im2Gray, cv.COLOR_BGRA2GRAY);
//22 Variables to store keypoints and descriptors
//23 std::vector<KeyPoint> keypoints1, keypoints2;
//24 Mat descriptors1, descriptors2;
let keypoints1 = new cv.KeyPointVector();
let keypoints2 = new cv.KeyPointVector();
let descriptors1 = new cv.Mat();
let descriptors2 = new cv.Mat();
//26 Detect ORB features and compute descriptors.
//27 Ptr<Feature2D> orb = ORB::create(MAX_FEATURES);
//28 orb->detectAndCompute(im1Gray, Mat(), keypoints1, descriptors1);
//29 orb->detectAndCompute(im2Gray, Mat(), keypoints2, descriptors2);
var orb = new cv.ORB(5000);
orb.detectAndCompute(im1Gray, new cv.Mat(), keypoints1, descriptors1);
orb.detectAndCompute(im2Gray, new cv.Mat(), keypoints2, descriptors2);
//31 Match features.
//32 std::vector<DMatch> matches;
//33 Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
//34 matcher->match(descriptors1, descriptors2, matches, Mat());
let bf = new cv.BFMatcher(cv.NORM_HAMMING, true);
let matches = new cv.DMatchVector();
bf.match(descriptors1, descriptors2, matches);
//36 Sort matches by score
//37 std::sort(matches.begin(), matches.end());
//39 Remove not so good matches
//40 const int numGoodMatches = matches.size() * GOOD_MATCH_PERCENT;
//41 matches.erase(matches.begin()+numGoodMatches, matches.end());
let good_matches = new cv.DMatchVector();
for (let i = 0; i < matches.size(); i++) {
if (matches.get(i).distance < 30) {
good_matches.push_back(matches.get(i));
}
}
//44 Draw top matches
//45 Mat imMatches;
//46 drawMatches(im1, keypoints1, im2, keypoints2, matches, imMatches);
//47 imwrite("matches.jpg", imMatches);
let imMatches = new cv.Mat();
let color = new cv.Scalar(0,255,0, 255);
cv.drawMatches(im1, keypoints1, im2, keypoints2, good_matches, imMatches, color);
cv.imshow('imageCompareMatches', imMatches);
//50 Extract location of good matches
//51 std::vector<Point2f> points1, points2;
//53 for( size_t i = 0; i < matches.size(); i++ )
//54 {
//55 points1.push_back( keypoints1[ matches[i].queryIdx ].pt );
//56 points2.push_back( keypoints2[ matches[i].trainIdx ].pt );
//57 }
let points1 = [];
let points2 = [];
for (let i = 0; i < good_matches.size(); i++) {
points1.push(keypoints1.get(good_matches.get(i).queryIdx ).pt );
points2.push(keypoints2.get(good_matches.get(i).trainIdx ).pt );
}
//59 Find homography
//60 h = findHomography( points1, points2, RANSAC );
//The first 2 arguments to findHomography need to be matArray so you must convert your point1 and point2 to matArray
//reference: https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga4abc2ece9fab9398f2e560d53c8c9780
//*********** the issue seems to be here in how mat1 and mat2 are created *****
let mat1 = cv.matFromArray(points1.length, 2, cv.CV_32F, points1);
let mat2 = cv.matFromArray(points2.length, 2, cv.CV_32F, points2);
let h = cv.findHomography(mat1, mat2, cv.RANSAC);
//62 Use homography to warp image
//63 warpPerspective(im1, im1Reg, h, im2.size());
let image_B_final_result = new cv.Mat();
cv.warpPerspective(im1, image_B_final_result, h, im2.size());
cv.imshow('imageAligned', image_B_final_result);
matches.delete();
bf.delete();
orb.delete();
descriptors1.delete();
descriptors2.delete();
keypoints1.delete();
keypoints2.delete();
im1Gray.delete();
im2Gray.delete();
h.delete();
image_B_final_result.delete();
mat1.delete();
mat2.delete();
}
for (let i = 0; i < good_matches.size(); i++) {
points1.push(keypoints1.get(good_matches.get(i).queryIdx).pt);
points2.push(keypoints2.get(good_matches.get(i).trainIdx).pt);
}
shoud be below code
for (let i = 0; i < good_matches.size(); i++) {
points1.push(keypoints1.get(good_matches.get(i).queryIdx).pt.x);
points1.push(keypoints1.get(good_matches.get(i).queryIdx).pt.y);
points2.push(keypoints2.get(good_matches.get(i).trainIdx).pt.x);
points2.push(keypoints2.get(good_matches.get(i).trainIdx).pt.y);
}
I push staggered data into points[], then it's work!
Related
I'm trying to define racecourse with a dynamic line that user can draw on a canvas element. So when line has been drawn, program should add sidelines for it as shown in the picture below:
I have managed to mimic the idea already by using line normals but can't get it done correctly. At the moment I put point in the midway of the lines in the direction of the line normals and draw outlines using those points. While generated line is relatively smooth in cases on large turns, tight turns tend to produce loops.
As seen in image below:
Here is current code that generates points for side lines above (I'm using p5.js JavaScript library):
var sketch = function (p) {
with(p) {
let handpoints;
let walkhandpoints;
let collect;
let parsepath;
let shapse;
let step;
let tmp;
let dorender;
let lineoffset;
p.setup = function() {
createCanvas(600, 600);
handpoints = [];
walkhandpoints = 10;
collect = true;
parsepath = false;
shapes = [];
step = 2;
tmp = [];
dorender = true;
lineoffset = 15;
};
p.draw = function() {
if(dorender) {
background(220);
update();
for (let shape of shapes) {
shape.show();
}
}
};
function update() {
if (mouseIsPressed) {
if (collect) {
let mouse = createVector(mouseX, mouseY);
handpoints.push(mouse);
Shape.drawPath(handpoints);
parsepath = true;
}
} else if (parsepath) {
let tmp1 = Shape.cleanPath(handpoints, step);
let s1 = new Shape(tmp1, 1, 'line', color(175));
shapes.push(s1);
let tmp2 = Line.sidePoints(tmp1, lineoffset);
let s2 = new Shape(tmp2.sideA, 1, 'line', color(175,120,0));
let s3 = new Shape(tmp2.sideB, 1, 'line', color(175,0, 120));
shapes.push(s2);
shapes.push(s3);
handpoints = [];
parsepath = false;
//dorender = false;
}
}
class Shape {
constructor(points, mag, type = 'line', shader = color(200, 0, 100)) {
this.points = points.slice().map(item => item.copy());
this.type = type;
this.mag = mag;
this.shader = shader;
}
static cleanPath(points, step) {
let tmp = [];
let output = [];
for (let i = 1; i < points.length; i++) {
let prev = points[i - 1];
let curr = points[i];
if (!prev.equals(curr)) {
tmp.push(prev.copy())
if (i === points.length - 1) {
tmp.push(curr.copy())
}
}
}
for (let i = 0; i < tmp.length; i++) {
if(i % step === 0) {
output.push(tmp[i]);
}
}
output.push(output[0]);
return output;
}
static drawPath(points, mag = 1, type = 'line', shader = color(175)) {
let s = new Shape(points, mag, type, shader);
s.show();
}
show() {
for (let i = 0; i < this.points.length; i++) {
if (this.type === 'line' && i > 0) {
let prev = this.points[i - 1];
let curr = this.points[i];
strokeWeight(this.mag);
stroke(this.shader);
line(prev.x, prev.y, curr.x, curr.y);
} else if (this.type === 'point') {
noStroke();
fill(this.shader);
ellipse(this.points[i].x, this.points[i].y, this.mag * 2, this.mag * 2);
}
}
}
}
class Line {
static sidePoints(points, lineoffset) {
let sideA = [];
let sideB = [];
for(let i = 1; i < points.length; i++) {
// take consecutive points
let prev = points[i-1];
let curr = points[i];
// calculate normals
let dx = curr.x-prev.x;
let dy = curr.y-prev.y;
let a = createVector(-dy, dx).normalize();
let b = createVector(dy, -dx).normalize();
// calculate midway of the two points
let px = (prev.x+curr.x)/2;
let py = (prev.y+curr.y)/2;
let p = createVector(px,py);
// put created points back along drawed line
a.mult(lineoffset).add(p);
b.mult(lineoffset).add(p);
sideA.push(a);
sideB.push(b);
}
// close paths
if(!sideA[0].equals(sideA[sideA.length-1])) {
sideA.push(sideA[0]);
}
if(!sideB[0].equals(sideB[sideB.length-1])) {
sideB.push(sideB[0]);
}
return {sideA, sideB};
}
}
}
};
let node = document.createElement('div');
window.document.getElementById('p5-container').appendChild(node);
new p5(sketch, node);
body {
background-color:#ffffff;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.1.9/p5.js"></script>
<div id="p5-container"></div>
Firstly I'd like to find a way to draw those points in the corresponding corner points of the drawn line so when drawn line has only few points the outlines would retain the drawn shape.
Secondly is there some good way to reduce points on the areas where there are several of them to reduce those loops in small corners and other type errors in generated lines?
Idea is to get points for lines, so it would be easy to detect with line intersection if race car velocity vector crosses it.
Unfortunately I'm not very familiar with math notations so please try to use easy to understand version of them if there is some fancy math that would do the job.
In the future, please try to post a minimal example. I was not able to run your code as you posted it.
That being said, one option you could consider is using the strokeWeight() function to draw the path at different widths. Here's an example:
const path = [];
function setup() {
createCanvas(400, 400);
// Add some default points to the path.
path.push(createVector(0, 0));
path.push(createVector(width/4, height/4));
}
function draw() {
background(220);
// Draw the path with a thick gray line.
strokeWeight(50);
stroke(200);
for(let i = 1; i < path.length; i++){
const prevPoint = path[i-1];
const nextPoint = path[i];
line(prevPoint.x, prevPoint.y, nextPoint.x, nextPoint.y);
}
// Draw the path with a thin black line.
strokeWeight(1);
stroke(0);
for(let i = 1; i < path.length; i++){
const prevPoint = path[i-1];
const nextPoint = path[i];
line(prevPoint.x, prevPoint.y, nextPoint.x, nextPoint.y);
}
}
// Add a point to the path when the user clicks.
function mousePressed(){
path.push(createVector(mouseX, mouseY));
}
The trick here is to draw the path in two passes. First you draw the path using a thick line, and then you draw the path again, this time using a thin line.
I have an array and I am grabbing 3 random locations out of it. The third location cannot be a restaurant. This will make sense when you see the code.
What I would like to do is make sure each location is at least 1 mile away from the previous location.
CODE:
Here is my existing code. Currently it grabs three locations but they could be really close to each other.
for (let i = a.length - 1; i > 0; i--) {
const j = Math.floor(Math.random() * (i + 1));
[a[i], a[j]] = [a[j], a[i]];
}
let third = a[0];
let [first, second] = a.filter(l => !l.restaurant).slice(1);
let selectedLocations = [first, second, third];
function calculateDistance(lat1, lon1, lat2, lon2) {
let Rm = 3961;
let Rk = 6373
lat1 = deg2rad(lat1);
lon1 = deg2rad(lon1);
lat2 = deg2rad(lat2);
lon2 = deg2rad(lon2);
let dlat = lat2 - lat1;
let dlon = lon2 - lon1;
let a = Math.pow(Math.sin(dlat/2),2) + Math.cos(lat1) * Math.cos(lat2) * Math.pow(Math.sin(dlon/2),2);
let c = 2 * Math.atan2(Math.sqrt(a),Math.sqrt(1-a)); // great circle distance in radians
let dm = c * Rm; // great circle distance in miles
let dk = c * Rk; // great circle distance in km
let mi = round(dm);
let km = round(dk);
let ft = Math.round(mi * 5280.0);
return mi
}
function deg2rad(deg) {
let rad = deg * Math.PI/180; // radians = degrees * pi/180
return rad;
}
function round(x) {
return Math.round( x * 100) / 100;
}
Remember the (third) location is grabbed first. Once I have the third location I would like to go through the array until I find another location at least 1 mile away from the third location.
This new location will become the (second) location and then once I have that location (second) I need to go through the array again and find one at least 1 mile away from it. This will be the final (first) location.
UPDATE:
Here is something along the lines of what I am talking about but I know there has to be a cleaner way of writing it
for (let i = a.length - 1; i > 0; i--) {
const j = Math.floor(Math.random() * (i + 1));
[a[i], a[j]] = [a[j], a[i]];
}
// We get our third location.
let third = a[0];
console.log(third)
// Build a new array removing the third location and
// any other locations that is a restaurant
let newarray = a.filter(l => !l.restaurant).slice(1);
// filter our new array based on the location being at
// least 1 mile away from the third location
let second = newarray.filter(function (e) {
return calculateDistance(
third.geolocation.lat,
third.geolocation.lng,
e.geolocation.lat,
e.geolocation.lng
) >= 1
})
// We now have our second location
console.log(second[0]);
// build a new array removing the second location
let thirdarray = second.slice(1);
// filter our new array based on the location being at
// least 1 mile away from the second location
let first = thirdarray.filter(function (e) {
return calculateDistance(
second[0].geolocation.lat,
second[0].geolocation.lng,
e.geolocation.lat,
e.geolocation.lng
) >= 1
})
// we now have our first location
console.log(first[0]);
Based on the link I posted in comments, I've included a tiny library to accommodate your needs:
//<![CDATA[
let get, post, doc, htm, bod, nav, mobile, M, I, S, Q, rad, distance, withinOneMile, sortCityDists; // for use on other loads
addEventListener('load', function(){
get = (url, success, context)=>{
const x = new XMLHttpRequest;
const c = context || this;
x.open('GET', url);
x.onload = ()=>{
if(success)success.call(c, JSON.parse(x.responseText));
}
x.send();
}
post = (url, send, success, context)=>{
const x = new XMLHttpRequest;
const c = context || this;
x.open('POST', url);
x.onload = ()=>{
if(success)success.call(c, JSON.parse(x.responseText));
}
if(typeof send === 'object' && send && !(send instanceof Array)){
if(typeof FormData !== 'undefined' && send instanceof FormData){
x.send(send);
}
else{
let s, r = [];
for(let p in send){
s = send[p];
if(typeof s === 'object')s = JSON.stringify(s);
r.push(encodeURIComponent(p)+'='+encodeURIComponent(s));
}
x.setRequestHeader('Content-type', 'application/x-www-form-urlencoded'); x.send(r.join('&'));
}
}
else{
throw new Error('send argument must be an Object');
}
return x;
}
doc = document; htm = doc.documentElement; bod = doc.body; nav = navigator; M = tag=>doc.createElement(tag); I = id=>doc.getElementById(id);
mobile = nav.userAgent.match(/Mobi/i) ? true : false;
S = (selector, within)=>{
const w = within || doc;
return w.querySelector(selector);
}
Q = (selector, within)=>{
const w = within || doc;
return w.querySelectorAll(selector);
}
rad = n=>n*Math.PI/180;
distance = (latLng, toLatLng)=>{
let ll = latLng, lL = toLatLng;
if(ll instanceof Array){
ll = {lat:ll[0], lng:ll[1]};
}
if(lL instanceof Array){
lL = {lat:lL[0], lng:lL[1]};
}
const lng1 = ll.lng === undefined ? +ll.lon : +ll.lng;
const lng2 = lL.lng === undefined ? +lL.lon : +lL.lng;
const m = 6371e3, lat1 = +ll.lat, lat2 = +lL.lat, lat1R = rad(lat1);
const lat2R = rad(lat2), latD = rad(lat2-lat1), lngD = rad(lng2-lng1);
const n = Math.pow(Math.sin(latD/2), 2)+Math.cos(lat1R)*Math.cos(lat2R)*Math.pow(Math.sin(lngD/2), 2);
return m*2*Math.atan2(Math.sqrt(n), Math.sqrt(1-n));
}
withinOneMile = (latLng, toLatLng)=>{
if(distance(latLng, toLatLng)*3.2808/5280 <= 1){
return true;
}
else{
return false;
}
}
sortCityDists = (citiesArrayOfObjs, originIndex = 0)=>{
const cities = citiesArrayOfObjs.slice(), baseLatLng = cities.splice(originIndex, 1)[0].latLng, dists = [];
cities.forEach(o=>{
dists.push({city:o.city, meters:distance(baseLatLng, o.latLng)});
});
dists.sort((a, b)=>a.meters-b.meters);
return dists;
}
// you can put the below on another page - besides the end load and beyond
const cities = [{city:'Seattle', latLng:[47.6038321, -122.3300624]}, {city:'New York', latLng:[40.7127281, -74.0060152]}, {city:'Chicago', latLng:[41.8755616, -87.6244212]}, {city:'San Francisco', latLng:[37.7790262, -122.4199061]}];
const meterDists = sortCityDists(cities).reverse(), mileDists = [];
console.log(meterDists);
meterDists.forEach(o=>{
mileDists.push({city:o.city, miles:o.meters*3.2808/5280});
});
console.log(mileDists);
const locs = [[47.6038321, -122.3300624], [47.6038321, -122.3500624], [47.6038321, -122.3800624]], res = [];
for(let i=0,n=1,loc1,loc2,l=locs.length; n<l; i++,n++){
loc1 = locs[i]; loc2 = locs[n];
if(withinOneMile(loc1, loc2)){
res.push(loc1, loc2);
}
else{
break;
}
}
console.log(res);
}); // end load
//]]>
As you can see, you pass an Array of Objects to sortCityDists and the result is an Array of Objects in order by distance from least to greatest, based on the first Element of the passed Array. Format can be seen by looking at the cities Array of Objects. Just .reverse() to flip the Array around.
I'm trying to convert one of the javascript functions into Swift 5 function. But even after so much of extensive search I couldn't find anything on it.
function toArrayBuffer(type, ts, x, y, z, sc) {
let userId = userID.getUserId();
//console.log("Found GPS UserID: " + userId);
if (userId == "Unauthor") {
//console.log("GPS did not find the userID")
return null;
}
let buffer = new ArrayBuffer(8 * 4);
// Offset: 0
let float64Bytes = new Float64Array(buffer);
float64Bytes[0] = ts;
// Offset: 8
let bytes = new Float32Array(buffer);
bytes[2] = x;
bytes[3] = y;
bytes[4] = z;
bytes[5] = sc;
// Offset: 24
let uint8Bytes = new Uint8Array(buffer);
uint8Bytes[24] = type;
for (let i = 0; i < 8; i++) {
if (userId.charCodeAt(i)) {
uint8Bytes[i + 25] = userId.charCodeAt(i);
}
}
return buffer;
}
Basically I tried to build the same function with var byteArray = [UInt8](stringVal.utf8) but UInt8 can store only upto 256, but I had to store epoch time stamp. So, it doesn't work too. Any help would be appreciated.
i cant see output and also no error .... i try to detect rectangle shape from image and cut it and save it with opencv.js
onFilePicked() {
let imgElement = document.getElementById('imageSrc');
const files = event.target.files;
imgElement.src = URL.createObjectURL(files[0]);
var app = this
imgElement.onload = function () {
let mat = cv.imread(imgElement)
let dst = new cv.Mat();
cv.cvtColor(mat, mat, cv.COLOR_RGB2GRAY);
// gray = cv.bilateralFilter(gray, 11, 17, 17)
cv.Canny(mat, dst, 30, 200, 3, false);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
var transformed = null
cv.findContours(dst, contours, hierarchy, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
var sortableContours = []
for (let i = 0; i < contours.size(); i++) {
let cnt = contours.get(i);
let area = cv.contourArea(cnt, false);
let perim = cv.arcLength(cnt, false);
sortableContours.push({
areaSize: area,
perimiterSize: perim,
contour: cnt
});
let color = new cv.Scalar(255, 0, 0, 255);
let hierarchy2 = new cv.Mat();
cv.drawContours(mat, contours, -1, (0, 255, 0), 3);
}
cv.imshow('canvasOutput', mat);
let foundContour = null;
for (let sortableContour of sortableContours) {
let peri = cv.arcLength(sortableContour.contour, true);
let approx = new cv.Mat();
cv.approxPolyDP(sortableContour.contour, approx, 0.1 * peri, true);
if (approx.rows == 4) {
foundContour = approx
transformed = app.perspective_transform(mat, foundContour)
break;
} else {
approx.delete();
}
}
let rotate = app.rotate_image(transformed, 90)
cv.imshow('canvasOutput', rotate)
};
},
transform
perspective_transform(image, foundContour) {
let corner1 = new cv.Point(foundContour.data32S[0], foundContour.data32S[1]);
let corner2 = new cv.Point(foundContour.data32S[2], foundContour.data32S[3]);
let corner3 = new cv.Point(foundContour.data32S[4], foundContour.data32S[5]);
let corner4 = new cv.Point(foundContour.data32S[6], foundContour.data32S[7]);
//Order the corners
let cornerArray = [{
corner: corner1
}, {
corner: corner2
}, {
corner: corner3
}, {
corner: corner4
}];
//Sort by Y position (to get top-down)
cornerArray.sort((item1, item2) => {
return (item1.corner.y < item2.corner.y) ? -1 : (item1.corner.y > item2.corner.y) ? 1 : 0;
}).slice(0, 5);
//Determine left/right based on x position of top and bottom 2
let tl = cornerArray[0].corner.x < cornerArray[1].corner.x ? cornerArray[0] : cornerArray[1];
let tr = cornerArray[0].corner.x > cornerArray[1].corner.x ? cornerArray[0] : cornerArray[1];
let bl = cornerArray[2].corner.x < cornerArray[3].corner.x ? cornerArray[2] : cornerArray[3];
let br = cornerArray[2].corner.x > cornerArray[3].corner.x ? cornerArray[2] : cornerArray[3];
//Calculate the max width/height
let widthBottom = Math.hypot(br.corner.x - bl.corner.x, br.corner.y - bl.corner.y);
let widthTop = Math.hypot(tr.corner.x - tl.corner.x, tr.corner.y - tl.corner.y);
let theWidth = (widthBottom > widthTop) ? widthBottom : widthTop;
let heightRight = Math.hypot(tr.corner.x - br.corner.x, tr.corner.y - br.corner.y);
let heightLeft = Math.hypot(tl.corner.x - bl.corner.x, tr.corner.y - bl.corner.y);
let theHeight = (heightRight > heightLeft) ? heightRight : heightLeft;
//Transform!
let finalDestCoords = cv.matFromArray(4, 1, cv.CV_32FC2, [0, 0, theWidth - 1, 0, theWidth - 1, theHeight - 1, 0, theHeight - 1]);
// corners
let srcCoords = cv.matFromArray(4, 1, cv.CV_32FC2, [tl.corner.x, tl.corner.y, tr.corner.x, tr.corner.y, br.corner.x, br.corner.y, bl.corner.x, bl.corner.y]);
let dsize = new cv.Size(theWidth, theHeight);
let M = cv.getPerspectiveTransform(srcCoords, finalDestCoords)
let dst = new cv.Mat();
cv.warpPerspective(image, dst, M, dsize);
return dst
},
rotate image
rotate_image(image, angle) {
let dst = new cv.Mat();
let dsize = new cv.Size(image.rows, image.cols);
let center = new cv.Point(image.cols / 2, image.rows / 2);
// You can try more different parameters
let M = cv.getRotationMatrix2D(center, angle, 1);
cv.warpAffine(image, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
return dst
},
You are just grabbing the first contour in the entire array of sortableContours that has 4 points, and running the transform on that one. You need to sort them to those with the largest area first.
//sorts the contours by largest area first
let slicer = Math.min(sortableContours.length, 4);
let sortedContours = sortableContours.sort((a,b) => (a.areaSize < b.areaSize) ? 1 : -1).slice(0, slicer);
Also, I would recommend removing this line of code from within the for loop as it can be performed once outside the loop and slows the process down quite a bit (runs a few thousand times)
cv.drawContours(mat, contours, -1, (0, 255, 0), 3);
My final note would be that the following line might need to be tweaked from .1 to a smaller number like .02 if you are getting poor results. .1 is more forgiving but .02 is more precise. Alternatively, you can do both, keep all the results in an array, and pick the one with the largest area when you are done for the best of both worlds.
cv.approxPolyDP(sortableContour.contour, approx, 0.1 * peri, true);
Best of both worlds:
//iterates through the largest contours and creates transformed images if the contour's shape is a rectangle
let transformedOptions = [];
for (let sortedContour of sortedContours) {
let perimeter = cv.arcLength(sortedContour.contour, true);
let precisePoly = new cv.Mat();
let approxPoly = new cv.Mat();
cv.approxPolyDP(sortedContour.contour, precisePoly, 0.02 * perimeter, true); //the smaller number (0.02) is more precise
cv.approxPolyDP(sortedContour.contour, approxPoly, 0.1 * perimeter, true); //the larger number (0.1) is more forgiving
//if the polygon has 4 points (rectangle-ish)
if (precisePoly.rows == 4) {
transformedOptions.push(this.perspectiveTransform(originalImage, precisePoly, imageHeight, imageWidth))
}
if(approxPoly.rows == 4) {
transformedOptions.push(this.perspectiveTransform(originalImage, approxPoly, imageHeight, imageWidth))
}
precisePoly.delete();
approxPoly.delete();
}
let transformed = this.getLargestTransformation(transformedOptions);
//this could be optimized a bit
private getLargestTransformation(options) {
var transformed = null;
for(let option of options) {
if(option == null) continue;
var largestArea = 0;
var area = option.rows * option.cols;
if(transformed == null || area > largestArea) {
transformed = option;
largestArea = area;
}
}
return transformed;
}
I'm trying to use opencv.js to find a document in a provided image (detect edges, apply perspective transform, etc.
I've got a reasonable set of code that (occasionally) detects edges of a document and grabs the bounding box for that. However, I'm struggling to do the perspective transform steps. There are some helpers for this (not in JS) here and here.
Unfortunately I'm getting stuck on something simple. I can find the matching Mat that has 4 edges. Displaying that shows it to be accurate. However, I have no idea how to get some simple X/Y info out of that Mat. I thought minMaxLoc() would be a good option, but I keep getting an error passing in my matching Mat. Any idea why I can draw foundContour and get bounding box info from it, but I can't call minMaxLoc on it?
Code:
//<Get Image>
//<Convert to Gray, do GaussianBlur, and do Canny edge detection>
let contours = new cv.MatVector();
cv.findContours(matDestEdged, contours, hierarchy, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE);
//<Sort resulting contours by area to get largest>
let foundContour = null;
for (let sortableContour of sortableContours) {
let peri = cv.arcLength(sortableContour.contour, true);
let approx = new cv.Mat();
cv.approxPolyDP(sortableContour.contour, approx, 0.1 * peri, true);
if (approx.rows == 4) {
console.log('found it');
foundContour = approx
break;
}
else {
approx.delete();
}
}
//<Draw foundContour and a bounding box to ensure it's accurate>
//TODO: Do a perspective transform
let result = cv.minMaxLoc(foundContour);
The last line above results in a runtime error (Uncaught (in promise): 6402256 - Exception catching is disabled). I can run minMaxLoc() on other Mat objects.
For anyone else looking to do this in OpenCV.JS, what I commented above seems to still be accurate. The contour found can't be used with minMaxLoc, but the X/Y data can be pulled out of data32S[]. That should be all that's needed to do this perspective transform. Some code is below.
//Find all contours
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(matDest, contours, hierarchy, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE);
//Get area for all contours so we can find the biggest
let sortableContours: SortableContour[] = [];
for (let i = 0; i < contours.size(); i++) {
let cnt = contours.get(i);
let area = cv.contourArea(cnt, false);
let perim = cv.arcLength(cnt, false);
sortableContours.push(new SortableContour({ areaSize: area, perimiterSize: perim, contour: cnt }));
}
//Sort 'em
sortableContours = sortableContours.sort((item1, item2) => { return (item1.areaSize > item2.areaSize) ? -1 : (item1.areaSize < item2.areaSize) ? 1 : 0; }).slice(0, 5);
//Ensure the top area contour has 4 corners (NOTE: This is not a perfect science and likely needs more attention)
let approx = new cv.Mat();
cv.approxPolyDP(sortableContours[0].contour, approx, .05 * sortableContours[0].perimiterSize, true);
if (approx.rows == 4) {
console.log('Found a 4-corner approx');
foundContour = approx;
}
else{
console.log('No 4-corner large contour!');
return;
}
//Find the corners
//foundCountour has 2 channels (seemingly x/y), has a depth of 4, and a type of 12. Seems to show it's a CV_32S "type", so the valid data is in data32S??
let corner1 = new cv.Point(foundContour.data32S[0], foundContour.data32S[1]);
let corner2 = new cv.Point(foundContour.data32S[2], foundContour.data32S[3]);
let corner3 = new cv.Point(foundContour.data32S[4], foundContour.data32S[5]);
let corner4 = new cv.Point(foundContour.data32S[6], foundContour.data32S[7]);
//Order the corners
let cornerArray = [{ corner: corner1 }, { corner: corner2 }, { corner: corner3 }, { corner: corner4 }];
//Sort by Y position (to get top-down)
cornerArray.sort((item1, item2) => { return (item1.corner.y < item2.corner.y) ? -1 : (item1.corner.y > item2.corner.y) ? 1 : 0; }).slice(0, 5);
//Determine left/right based on x position of top and bottom 2
let tl = cornerArray[0].corner.x < cornerArray[1].corner.x ? cornerArray[0] : cornerArray[1];
let tr = cornerArray[0].corner.x > cornerArray[1].corner.x ? cornerArray[0] : cornerArray[1];
let bl = cornerArray[2].corner.x < cornerArray[3].corner.x ? cornerArray[2] : cornerArray[3];
let br = cornerArray[2].corner.x > cornerArray[3].corner.x ? cornerArray[2] : cornerArray[3];
//Calculate the max width/height
let widthBottom = Math.hypot(br.corner.x - bl.corner.x, br.corner.y - bl.corner.y);
let widthTop = Math.hypot(tr.corner.x - tl.corner.x, tr.corner.y - tl.corner.y);
let theWidth = (widthBottom > widthTop) ? widthBottom : widthTop;
let heightRight = Math.hypot(tr.corner.x - br.corner.x, tr.corner.y - br.corner.y);
let heightLeft = Math.hypot(tl.corner.x - bl.corner.x, tr.corner.y - bl.corner.y);
let theHeight = (heightRight > heightLeft) ? heightRight : heightLeft;
//Transform!
let finalDestCoords = cv.matFromArray(4, 1, cv.CV_32FC2, [0, 0, theWidth - 1, 0, theWidth - 1, theHeight - 1, 0, theHeight - 1]); //
let srcCoords = cv.matFromArray(4, 1, cv.CV_32FC2, [tl.corner.x, tl.corner.y, tr.corner.x, tr.corner.y, br.corner.x, br.corner.y, bl.corner.x, bl.corner.y]);
let dsize = new cv.Size(theWidth, theHeight);
let M = cv.getPerspectiveTransform(srcCoords, finalDestCoords)
cv.warpPerspective(matDestTransformed, finalDest, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
For reference, here is the class definition I was using for SortableContour. The code above is meant as a guide, not as something that can run on its own, however.
export class SortableContour {
perimiterSize: number;
areaSize: number;
contour: any;
constructor(fields: Partial<SortableContour>) {
Object.assign(this, fields);
}
}