Does a getColorBoundsRect() equivalent exist in javascript? - javascript

just wanted to know if anybody has already done this in Javascript or if I have to do it myself - if latter: How would I do it? (not asking for a piece of code, only curious which approach you would use)

I have a better solution. It is not necessary to iterate through all the pixels, only through the ones outside the bounding box. Think of it this way, if you wanted to do the same thing in 1D: finding the first and last position of a value in an array, would you walk through the entire array? It would be better to walk through from the start till you find the first value, then walk from the end till you find the last value. The following code does the same for 2D. I haven't tested it thoroughly (either for correctness or speed), but it seems to work, and common sense say it's faster.
BitmapData.prototype.getColorBoundsRect = function(mask, color, findColor, rect){
findColor = typeof findColor !== 'undefined' ? findColor : true;
rect = typeof rect !== 'undefined' ? rect : new module.Rect(0, 0, this.width, this.height);
var l = rect.w - 1;
var r = 0;
var t = rect.h - 1;
var b = 0;
var data = this.context.getImageData(rect.x, rect.y, rect.w, rect.h).data;
// Scan from top to first pixel.
for (var i = 0; i < data.length; i += 4){
var val = module.RGBToHex({r:data[i], g:data[i+1], b:data[i+2], a:data[i+3]});
// console.log(val, mask, color, (val & mask) >>> 0)
if ((findColor && ((val & mask) >>> 0 == color)) || (!findColor && ((val & mask) >>> 0 != color))){
l = r = ((i / 4) % rect.w);
t = b = Math.floor(i / 4 / rect.w);
break;
}
}
// We found nothing.
if (i >= data.length) {
return null;
}
// Scan from bottom to first pixel
for (var j = data.length - 4; j > i; j -= 4){
var val = module.RGBToHex({r:data[j], g:data[j+1], b:data[j+2], a:data[j+3]});
if ((findColor && ((val & mask) >>> 0 == color)) || (!findColor && ((val & mask) >>> 0 != color))){
l = Math.min(l, ((j / 4) % rect.w))
r = Math.max(r, ((j / 4) % rect.w))
b = Math.floor(j / 4 / rect.w);
break;
}
}
console.log(l, r, t, b);
// Scan from left
for (var x = 0; x < l; x ++){
for (var y = t + 1; y <= b; y ++){
i = (y * rect.w + x) * 4
var val = module.RGBToHex({r:data[i], g:data[i+1], b:data[i+2], a:data[i+3]});
if ((findColor && ((val & mask) >>> 0 == color)) || (!findColor && ((val & mask) >>> 0 != color))){
l = Math.min(l, x);
break;
}
}
}
console.log(l, r, t, b);
// Scan from right
for (var x = rect.w - 1; x > r; x --){
for (var y = t; y < b; y ++){
i = (y * rect.w + x) * 4
var val = module.RGBToHex({r:data[i], g:data[i+1], b:data[i+2], a:data[i+3]});
if ((findColor && ((val & mask) >>> 0 == color)) || (!findColor && ((val & mask) >>> 0 != color))){
r = Math.max(r, x);
break;
}
}
}
console.log(l, r, t, b)
return new module.Rect(l + rect.x, t + rect.y, (r - l), (b - t));
}
In this code BitmapData just wraps a canvas object and its context2d, and Rect is an {x: , y: , w: , h: } object. I had to do some screwing around with RGBToHex to make sure I was getting positive numbers (uint's) too:
module.RGBToHex = function(rgb) {
return (rgb.a << 24 | rgb.r<<16 | rgb.g<<8 | rgb.b) >>> 0;
};

here's my quick'n'dirty solution, maybe somebody'll find it useful ;)
/**
* get a rectangle around color
* #param {...} ctx 2dCanvasObject to be scanned
* #return {Object} object storing the rectangle's data (x, y, w(idth), h(eight))
*/
function getColorBoundsRect(ctx) {
/**
* the canvas' context's data property (shorthand)
* #type {...}
*/
var data = ctx.data,
/**
* counter variable
* #type {Number}
*/
i = 0,
/**
* the "leftest" pixel that is not black (starts right, as we check if currently looped pixel (that is not black) is "lefter" than the current outerLeftPixel)
* #type {Number}
*/
outerLeftPixel = w-1,
/**
* the "rightest" pixel that is not black (starts left, as we check if currently looped pixel (that is not black) is "righter" than the current outerRightPixel)
* #type {Number}
*/
outerRightPixel = 0,
/**
* the "toppest" pixel that is not black (starts at bottom, as we check if currently looped pixel (that is not black) is "topper" than the current outerTopPixel)
* #type {Number}
*/
outerTopPixel = h-1,
/**
* the "bottomest" pixel that is not black (starts at top, as we check if currently looped pixel (that is not black) is "bottomer" than the current outerBottomPixel)
* #type {Number}
*/
outerBottomPixel = 0,
/**
* x coordinate of currently looped pixel
* #type {Number}
*/
x,
/**
* y coordinate of currently looped pixel
* #type {Number}
*/
y;
// loop through all pixels
// i equals the i'th pixel (0 is the upper left pixel, w*h is the bottom right pixel)
while (i < (data.length / 4)) {
// check if currently looped pixel is anything else than black --> color
if ((data[i*4] + data[i*4+1] + data[i*4+2]) > 0) {
// set coordinates for the currently looped pixel
x = i % w; // if one row has 10px and i = 35, the x coordinate of the current pixel is 35 % 10 = 5
y = Math.floor(i / w); // if one row has 10px and i=35, the y coordinate of the current pixel is 35/10 = 3.5 (--> rounded off = 3)
// if the x coordinate of the current (colored) pixel is smaller than the current "leftest" pixel, set the x coordinate as new "leftest pixel"
// same procedure for the other values
if (x < outerLeftPixel) {
outerLeftPixel = x;
}
if (x > outerRightPixel) {
outerRightPixel = x;
}
if (y < outerTopPixel) {
outerTopPixel = y;
}
if (y > outerBottomPixel) {
outerBottomPixel = y;
}
}
++i;
}
// if there is color on the canvas, the outer[Right|Left|Bottom|Top]Pixel properties should have been updated accordingly and the following condition should be true
if (outerRightPixel > outerLeftPixel && outerBottomPixel > outerTopPixel) {
return {
x: outerLeftPixel,
y: outerTopPixel,
w: outerRightPixel - outerLeftPixel,
h: outerBottomPixel - outerTopPixel
};
}
// if there is no color on the canvas, return false, as there is no rectangle
else {
return false;
}
}

Related

Generic function to draw parametric shapes in vanilla JavaScript/WebGL

I am learning WebGL with vanilla javascript. I have been able to follow along my tutorial to draw a simple bell shape using gl.LINES with the following function to create the vertices:
function createVertexData() {
var n = 20; // across
var m = 10; // down
// Positions.
vertices = new Float32Array(3 * (n + 1) * (m + 1));
// Index data for Linestrip.
indices = new Uint16Array(2 * 2 * n * m);
var dt = 2 * Math.PI / n;
var dr = 1 / m;
// Counter for entries in index array.
var iIndex = 0;
// Loop angle t.
for (var i = 0, t = Math.PI; i <= n; i++, t += dt) {
// Loop radius r.
for (var j = 0, r = 0; j <= m; j++, r += dr) {
var iVertex = i * (m + 1) + j;
var x = r * Math.cos(t);
var y = Math.cos(r * Math.PI);
var z = r * Math.sin(t);
// Set vertex positions.
vertices[iVertex * 3] = x;
vertices[iVertex * 3 + 1] = y;
vertices[iVertex * 3 + 2] = z;
// Set index.
// Line on beam.
if (j > 0 && i > 0) {
indices[iIndex++] = iVertex - 1;
indices[iIndex++] = iVertex;
}
// Line on ring.
if (j > 0 && i > 0) {
indices[iIndex++] = iVertex - (m + 1);
indices[iIndex++] = iVertex;
}
}
}
}
Now I want to change the createVertexData() function to let's say draw a pillow (only the mesh, not the texture).
The formula for that pillow is (source):
x = cos(u)
y = cos(v)
z = a sin(u) sin(v)
Where:
- a is a constant
- u is an element from [0, pi]
- v is an element from [-pi, pi]
All my attempts at changing the createVertexData() miserably failed: I calculated x, y, z according to the new formula and set the loops to run according to the domains of u and v. Also renamed variables of course. The resulting shape always looked very off and not even close to the pillow.
How does it need to look like?
Thanks in advance.

Canvas - floodfill leaves white pixels at edges for PNG images with transparent

Now, I tried to perform flood fill algorithm to fill up the transparent PNG images using flood fill algorithm from the article How can I avoid exceeding the max call stack size during a flood fill algorithm? which use non recursive method along with Uint32Array to handle color stack with work quite well.
However, this flood fill algorithm has left the white (actually the light grey edge or anti-alias edges) which remain unfilled. Here is my code:
var BrushColorString = '#F3CDA6'; // skin color
canvas.addEventListener('mousedown', function(e) {
const rect = canvas.getBoundingClientRect()
CanvasMouseX = e.clientX - rect.left;
CanvasMouseY = e.clientY - rect.top;
if (mode === 'flood-fill')
{
// test flood fill algorithm
paintAt(context, CanvasMouseX,CanvasMouseY,hexToRgb(BrushColorString));
}
});
function paintAt(ContextOutput,startX, startY,curColor) {
//function paintAt(ctx,startX, startY,curColor) {
// read the pixels in the canvas
const width = ContextOutput.canvas.width,
height = ContextOutput.canvas.height,pixels = width*height;
const imageData = ContextOutput.getImageData(0, 0, width, height);
var data1 = imageData.data;
const p32 = new Uint32Array(data1.buffer);
const stack = [startX + (startY * width)]; // add starting pos to stack
const targetColor = p32[stack[0]];
var SpanLeft = true, SpanRight = true; // logic for spanding left right
var leftEdge = false, rightEdge = false;
// proper conversion of color to Uint32Array
const newColor = new Uint32Array((new Uint8ClampedArray([curColor.r,curColor.g, curColor.b, curColor.a])).buffer)[0];
// need proper comparison of target color and new Color
if (targetColor === newColor || targetColor === undefined) { return } // avoid endless loop
while (stack.length){
let idx = stack.pop();
while(idx >= width && p32[idx - width] === targetColor) { idx -= width }; // move to top edge
SpanLeft = SpanRight = false; // not going left right yet
leftEdge = (idx % width) === 0;
rightEdge = ((idx +1) % width) === 0;
while (p32[idx] === targetColor) {
p32[idx] = newColor;
if(!leftEdge) {
if (p32[idx - 1] === targetColor) { // check left
if (!SpanLeft) {
stack.push(idx - 1); // found new column to left
SpanLeft = true; //
} else if (SpanLeft) {
SpanLeft = false;
}
}
}
if(!rightEdge) {
if (p32[idx + 1] === targetColor) {
if (!SpanRight) {
stack.push(idx + 1); // new column to right
SpanRight = true;
}else if (SpanRight) {
SpanRight = false;
}
}
}
idx += width;
}
}
clearCanvas(ContextOutput);
ContextOutput.putImageData(imageData,0, 0);
};
function hexToRgb(hex) {
var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
return result ? {
r: parseInt(result[1], 16),
g: parseInt(result[2], 16),
b: parseInt(result[3], 16),
a: 255
} : null;
};
So far, I have tried using the following suggestion:
using matchOutlineColor function using RGBA value mentioned in Canvas - floodfill leaves white pixels at edges
When I tried to implemented "Restrict fill area based on intensity gradient changes instead of simple threshold " mentioned in Canvas - floodfill leaves white pixels at edges which is considered as the most promising algorithm, I still have no clue how to implement this algorithm with minimum change on existing algorithm to handle the anti-alias edge issue for the cases of images with transparent.
When I take a look at the example on how to apply a tolerance and a toleranceFade mentioned in Canvas flood fill not filling to edge, I still have no clue how implement such a tolerance and a toleranceFade in my case.
Color Difference method (colorDiff function) within mentioned tolerance in Canvas Javascript FloodFill algorithm left white pixels without color and so far still not working. Similar thing can be said to colorsMatch function to be within Range Square (rangeSq) mentioned in How can I perform flood fill with HTML Canvas? which still unable to solve the anti-alias edge problem.
If you have any idea on how to deal with anti-alias edge problems of the flood-fill algorithm, please response as soon as possible.
Updated:
Here is the revised code on paintAt fucntion from the suggestion that takes tolerance into account:
<div id="container"><canvas id="control" >Does Not Support Canvas Element</canvas></div>
<div><label for="tolerance">Tolerance</label>
<input id="tolerance" type="range" min="0" max="255" value="32" step="1" oninput="this.nextElementSibling.value = this.value"><output>32</output></div>
var canvas = document.getElementById("control");
var context = canvas.getContext('2d');
var CanvasMouseX = -1; var CanvasMouseY = -1;
var BrushColorString = '#F3CDA6'; // skin color
canvas.addEventListener('mousedown', function(e) {
const rect = canvas.getBoundingClientRect()
CanvasMouseX = e.clientX - rect.left;
CanvasMouseY = e.clientY - rect.top;
// testing
if (mode === 'flood-fill')
{
// test flood fill algorithm
paintAt(context,CanvasMouseX,CanvasMouseY,
hexToRgb(BrushColorString),tolerance.value);
}
});
function hexToRgb(hex) {
var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
return result ? {
r: parseInt(result[1], 16),
g: parseInt(result[2], 16),
b: parseInt(result[3], 16),
a: 255
} : null;
};
function clearCanvas(ctx) {
ctx.clearRect(0, 0,ctx.canvas.width,ctx.canvas.height);
};
function colorDistance(index, R00,G00,B00,A00, data0)
{
var index1 = index << 2; // multiplyed by 4
const R = R00 - data0[index1 + 0];
const G = G00 - data0[index1 + 1];
const B = B00 - data0[index1 + 2];
const A = A00 - data0[index1 + 3];
return Math.sqrt((R * R) + (B * B) + (G * G) + (A * A));
}
function paintAt(ContextOutput,startX, startY,curColor,tolerance) {
// read the pixels in the canvas
const width = ContextOutput.canvas.width,
height = ContextOutput.canvas.height, pixels = width*height;
const rightEdgeNum = width - 1, bottomEdgeNum = height - 1;
const imageData = ContextOutput.getImageData(0, 0, width, height);
var data1 = imageData.data;
const p32 = new Uint32Array(data1.buffer);
const stack = [startX + (startY * width)]; // add starting pos to stack
const targetColor = p32[stack[0]];
var SpanLeft = true, SpanRight = true; // logic for spanning left right
var leftEdge = false, rightEdge = false, IsBlend = false;
const DistancesArray = new Uint16Array(pixels); // array distance value
var R=-1,G=-1,B=-1,A = -1,idx =0,Distance=0;
var R0 = data1[(4*(startX + (startY * width)))+0],
G0 = data1[(4*(startX + (startY * width)))+1],
B0 = data1[(4*(startX + (startY * width)))+2],
A0 = data1[(4*(startX + (startY * width)))+3];
var CalculatedTolerance = Math.sqrt(tolerance * tolerance * 4);
const BlendR = curColor.r |0, BlendG = curColor.g |0,
BlendB = curColor.b |0, BlendA = curColor.a|0;
// color variable for blending
const newColor = new Uint32Array((new Uint8ClampedArray([BlendR,BlendG,BlendB,BlendA])).buffer)[0];
if (targetColor === newColor || targetColor === undefined) { return }
// avoid endless loop
while (stack.length){
idx = stack.pop();
while (idx >= width &&
colorDistance(idx - width,R0,G0,B0,A0,data1) <= CalculatedTolerance) { idx -= width }; // move to top edge
SpanLeft = SpanRight = false; // not going left right yet
leftEdge = (idx % width) === 0;
rightEdge = ((idx +1) % width) === 0;
while ((Distance = colorDistance(idx,R0,G0,B0,A0,data1)) <= CalculatedTolerance) {
DistancesArray[idx] = (Distance / CalculatedTolerance) * 255 | 0x8000;
p32[idx] = newColor;
if(!leftEdge) {
if (colorDistance(idx - 1,R0,G0,B0,A0,data1) <= CalculatedTolerance) { // check left
if (!SpanLeft) {
stack.push(idx - 1); // found new column to left
SpanLeft = true; //
} else if (SpanLeft) {
SpanLeft = false;
}
}
}
if(!rightEdge) {
if (colorDistance(idx + 1,R0,G0,B0,A0,data1) <= CalculatedTolerance) {
if (!SpanRight) {
stack.push(idx + 1); // new column to right
SpanRight = true;
}else if (SpanRight) {
SpanRight = false;
}
}
}
idx += width;
}
}
idx = 0;
while (idx <= pixels-1) {
Distance = DistancesArray[idx];
if (Distance !== 0) {
if (Distance === 0x8000) {
p32[idx] = newColor;
} else {
IsBlend = false;
const x = idx % width;
const y = idx / width | 0;
if (x >= 1 && DistancesArray[idx - 1] === 0) { IsBlend = true }
else if (x <= rightEdgeNum -1 && DistancesArray[idx + 1] === 0) { IsBlend = true }
else if (y >=1 && DistancesArray[idx - width] === 0) { IsBlend = true }
else if (y <=bottomEdgeNum-1 && DistancesArray[idx + width] === 0) { IsBlend = true }
if (IsBlend) {
// blending at the edge
Distance &= 0xFF;
Distance = Distance / 255;
const invDist = 1 - Distance;
const idx1 = idx << 2;
data1[idx1 + 0] = data1[idx1 + 0] * Distance + BlendR * invDist;
data1[idx1 + 1] = data1[idx1 + 1] * Distance + BlendG * invDist;
data1[idx1 + 2] = data1[idx1 + 2] * Distance + BlendB * invDist;
data1[idx1 + 3] = data1[idx1 + 3] * Distance + BlendA * invDist;
} else {
p32[idx] = newColor;
}
}
}
idx++;
}
// this recursive algorithm works but still not working well due to the issue stack overflow!
clearCanvas(ContextOutput);
ContextOutput.putImageData(imageData,0, 0);
// way to deal with memory leak at the array.
DistancesArray = [];
newColor = [];
p32 = [];
};
However, the results of flood fill have been found wanting as shown in the transition tolerance as shown here:'
How can I deal with this kind problem when tolerance has become too much. Any alternative algorithm would be appreciated.
Double pass Flood fill in the 4th dimension
I am the Author of the accepted answers for How can I avoid exceeding the max call stack size during a flood fill algorithm? and Canvas flood fill not filling to edge
Unfortunately there is no perfect solution.
The following method has problems.
Setting the tolerance so that it gets all edge aliasing will often fill unwanted areas.
Setting the tolerance too low can make the edges look even worse than the standard fill.
Repeated fills will result in harder edge aliasing.
Uses a simple blend function. The correct blend function can be found at W3C Compositing and Blending Level "blending normal" Sorry I am out of time to complete this answer.
Does not easily convert to gradients or pattern fills.
There is a much better solution but it is 1000+ lines long and the code alone would not fit in the 32K answer limit.
This answer is a walk through of how to change your function to reduce edge aliasing using a tolerance and simple edge blending.
Note
The various snippets in the answer may have typos or wrong names. For the correct working code see example at bottom.
Tolerance
The simplest method to detect edges is to use a tolerance and fill pixels that are within the tolerance of the pixel color at the fill origin.
This lets the fill overlap the aliased edges that can then be detected and blended to reduce the artifacts due to anti-aliasing.
The problem is that to get a good coverage of the aliasing requires a large tolerance and this ends up filling areas you intuitively would not want colored.
Calculating color distance
A color can be represented by the 3 values red, green, blue. If one substitutes the names with x, y, z it is easy to see how each color has a unique position in 3D space.
Event better is that the distance between any two colors in this 3D space directly correlates to perceived difference in color. We can thus us simple math to calculate the difference (Pythagoras).
As we need to also consider the alpha channel we need to step up one dimension. Each color and its alpha part have a unique point in 4D space. The distance between any of these 4D colors directly correlates to perceived difference in color and transparency.
Lucky we do not need to imagine 4D space, all we do is extend the math (Pythagoras works in all euclidean dimensions).
Thus we get the function and prep code you can add to your flood fill function.
var idx = stack[0] << 2; // remove let first line inside while (stack.length){
const r = data1[idx] ;
const g = data1[idx + 1] ;
const b = data1[idx + 2];
const a = data1[idx + 3]
function colorDist(idx) { // returns the spacial distance from the target color of pixel at idx
idx <<= 2;
const R = r - data1[i];
const G = g - data1[i + 1];
const B = b - data1[i + 2];
const A = a - data1[i + 3];
return (R * R + B * B + G * G + A * A) ** 0.5;
}
To the function declaration we add an argument tolerance specified as a value 0 to 255
The function declaration changes from
function paintAt(contextOutput, startX, startY, curColor) {
To
function paintAt(contextOutput, startX, startY, curColor, tolerance = 0) {
With tolerance as an optional argument.
A tolerance of 0 only fills the targetColor
A tolerance of 255 should fill all pixels
We need to convert the tolerance from a channel value to a 4D distance value so that the 255 covers the greatest distance between two colors in the 4D color space.
Add the following line to the top of the function paintAt
tolerance = (tolerance * tolerance * 4) ** 0.5; // normalize to 4D RGBA space
We now need to change the pixel match statements to use the tolerance. Anywhere you have
p32[idx] === targetColor or similar needs to be replaced with colorDist(idx) <= tolerance. The exception is the inner while loop as we need to use the 4D color distance
while (checkPixel(ind)) {
becomes
// declare variable dist at top of function
while ((dist = colorDist(idx)) <= tolerance) {
Double pass solution
To combat the aliasing we need to blend the fill color by an amount proportional to the color distance.
Doing this for all pixels means that pixels away from the edge of the fill will get the wrong color if the color distance is not 0 and less than tolerance.
We only want to blend pixels if they are at the edge of the fill, excluding those at the edge of the canvas. For many of the pixels there is no way of knowing if a pixel is at the edge of the fill as we come across them. We can only know when we have found all filled pixels.
First pass the flood fill
Thus we must keep an array that holds the color distance for all pixels filled
At the top of the function create a buffer to hold pixel color distances.
const distances = new Uint16Array(width*height);
Then in the inner loop along with setting the pixel color set the matching locations distance.
while ((dist = colorDist(idx)) <= tolerance) {
//Must not fill color here do in second pass p32[idx] = newColor;
distances[idx] = (dist / tolerance) * 255 | 0x8000;
To track which pixels are filled we set the top bit of the distance value. That means that distances will hold a non zero value for all pixels to fill and zero for pixels to ignore. This is done with the | 0x8000
The main part of the fill is no done. We let the fill do its thing before we start on the next pass.
Second pass edge detect and blend
After the outer loop has exited we step over each pixel one at a time. Check if it needs to be filled.
If it needs filling we extract the color distance. If zero set that pixels color in the p32 array. If the distance is not zero we then check the 4 pixels around it. If any of the 4 neighboring pixels is marked as do not fill distances[idx] === 0 and that pixel is not outside the canvas bounds we know it is an edge and needs to be blended.
// declare at top of function
var blend, dist, rr, gg, bb, aa;
// need fill color's channels for quickest possible access.
const fr = curColor.r | 0;
const fg = curColor.g | 0;
const fb = curColor.b | 0;
const fa = curColor.a | 0;
// after main fill loop.
idx = 0;
const rightEdge = width - 1, bottomEdge = height - 1;
while (idx < width * height){
dist = distances[idx];
if (dist !== 0) {
if (dist === 0x8000) {
p32[idx] = newColor;
} else {
blend = false;
const x = idx % width;
const y = idx / width | 0;
if (x > 0 && distances[idx - 1] === 0) { blend = true }
else if (x < rightEdge && distances[idx + 1] === 0) { blend = true }
else if (y > 0 && distances[idx - width] === 0) { blend = true }
else if (y < bottomEdge && distances[idx + width] === 0) { blend = true }
if (blend) { // pixels is at fill edge an needs to blend
dist &= 0xFF; // remove fill bit
dist = dist / 255; // normalize to range 0-1
const invDist = 1 - dist; // invert distance
// get index in byte array
const idx1 = idx << 2; // same as idx * 4
// simple blend function (not the same as used by 2D API)
data[idx1] = data[idx1 ] * dist + fr * invDist;
data[idx1 + 1] = data[idx1 + 1] * dist + fg * invDist;
data[idx1 + 2] = data[idx1 + 2] * dist + fb * invDist;
data[idx1 + 3] = data[idx1 + 3] * dist + fa * invDist;
} else {
p32[idx] = newColor;
}
}
}
idx++;
}
And now just put the new pixel array onto the canvas.
Example
This example is a bare bones wrapper around a modified version of your code. It is there to make sure I did not make any algorithmic mistakes and to highlight the quality or lack of quality when using this method.
Click first button to add random circle.
Use slider to set tolerance 0 - 255
Click clear to clear canvas.
Click canvas to fill random color at mouse pos.
Canvas has been scaled by 2 to make artifacts more visible.
The function floodFill replaces your paintAt and is too big and should be broken into two parts, one for the fill pass, and another for edge detect and blend.
const ctx = canvas.getContext("2d");
var circle = true;
test();
canvas.addEventListener("click", e => {circle = false; test(e)});
toggleFill.addEventListener("click",e => {circle = true; test(e)});
clear.addEventListener("click",()=>ctx.clearRect(0,0,500,500));
function randomCircle() {
ctx.beginPath();
ctx.strokeStyle = "black";
ctx.lineWidth = 4;
const x = Math.random() * 100 | 0;
const y = Math.random() * 100 | 0;
ctx.arc(x, y, Math.random() * 25 + 25, 0 , Math.PI * 2);
ctx.stroke();
return {x,y};
}
function test(e) {
if (circle) {
toggleFill.textContent = "Click canvas to fill";
randomCircle();
} else {
toggleFill.textContent = "Click button add random circle";
const col = {
r: Math.random() * 255 | 0,
g: Math.random() * 255 | 0,
b: Math.random() * 255 | 0,
a: Math.random() * 255 | 0,
};
floodFill(ctx, (event.offsetX - 1) / 2 | 0, (event.offsetY -1) / 2| 0, col, tolerance.value);
}
}
// Original function from SO question https://stackoverflow.com/q/65359146/3877726
function floodFill(ctx, startX, startY, curColor, tolerance = 0) {
var idx, blend, dist, rr, gg, bb, aa, spanLeft = true, spanRight = true, leftEdge = false, rightEdge = false;
const width = ctx.canvas.width, height = ctx.canvas.height, pixels = width*height;
const imageData = ctx.getImageData(0, 0, width, height);
const data = imageData.data;
const p32 = new Uint32Array(data.buffer);
const stack = [startX + (startY * width)];
const targetColor = p32[stack[0]];
const fr = curColor.r | 0;
const fg = curColor.g | 0;
const fb = curColor.b | 0;
const fa = curColor.a | 0;
const newColor = (fa << 24) + (fb << 16) + (fg << 8) + fr;
if (targetColor === newColor || targetColor === undefined) { return }
idx = stack[0] << 2;
const rightE = width - 1, bottomE = height - 1;
const distances = new Uint16Array(width*height);
tolerance = (tolerance * tolerance * 4) ** 0.5;
const r = data[idx] ;
const g = data[idx + 1] ;
const b = data[idx + 2];
const a = data[idx + 3]
function colorDist(idx) {
if (distances[idx]) { return Infinity }
idx <<= 2;
const R = r - data[idx];
const G = g - data[idx + 1];
const B = b - data[idx + 2];
const A = a - data[idx + 3];
return (R * R + B * B + G * G + A * A) ** 0.5;
}
while (stack.length) {
idx = stack.pop();
while (idx >= width && colorDist(idx - width) <= tolerance) { idx -= width }; // move to top edge
spanLeft = spanRight = false; // not going left right yet
leftEdge = (idx % width) === 0;
rightEdge = ((idx + 1) % width) === 0;
while ((dist = colorDist(idx)) <= tolerance) {
distances[idx] = (dist / tolerance) * 255 | 0x8000;
if (!leftEdge) {
if (colorDist(idx - 1) <= tolerance) {
if (!spanLeft) {
stack.push(idx - 1);
spanLeft = true;
} else if (spanLeft) {
spanLeft = false;
}
}
}
if (!rightEdge) {
if (colorDist(idx + 1) <= tolerance) {
if (!spanRight) {
stack.push(idx + 1);
spanRight = true;
}else if (spanRight) {
spanRight = false;
}
}
}
idx += width;
}
}
idx = 0;
while (idx < pixels) {
dist = distances[idx];
if (dist !== 0) {
if (dist === 0x8000) {
p32[idx] = newColor;
} else {
blend = false;
const x = idx % width;
const y = idx / width | 0;
if (x > 0 && distances[idx - 1] === 0) { blend = true }
else if (x < rightE && distances[idx + 1] === 0) { blend = true }
else if (y > 0 && distances[idx - width] === 0) { blend = true }
else if (y < bottomE && distances[idx + width] === 0) { blend = true }
if (blend) {
dist &= 0xFF;
dist = dist / 255;
const invDist = 1 - dist;
const idx1 = idx << 2;
data[idx1] = data[idx1 ] * dist + fr * invDist;
data[idx1 + 1] = data[idx1 + 1] * dist + fg * invDist;
data[idx1 + 2] = data[idx1 + 2] * dist + fb * invDist;
data[idx1 + 3] = data[idx1 + 3] * dist + fa * invDist;
} else {
p32[idx] = newColor;
}
}
}
idx++;
}
ctx.putImageData(imageData,0, 0);
}
canvas {
width: 200px;
height: 200px;
border: 1px solid black;
}
<label for="tolerance">Tolerance</label>
<input id="tolerance" type="range" min="0" max="255" value="32" step="1"></input>
<button id ="toggleFill" >Click add random circle</button>
<button id ="clear" >Clear</button><br>
<canvas id="canvas" width="100" height="100"></canvas>

Programmatically determine best foreground color to be placed onto an image

I'm working on a node module that will return the color that will look best onto a background image which of course will have multiple colors.
Here's what I have so far:
'use strict';
var randomcolor = require('randomcolor');
var tinycolor = require('tinycolor2');
module.exports = function(colors, tries) {
var topColor, data = {};
if (typeof colors == 'string') { colors = [colors]; }
if (!tries) { tries = 10000; }
for (var t = 0; t < tries; t++) {
var score = 0, color = randomcolor(); //tinycolor.random();
for (var i = 0; i < colors.length; i++) {
score += tinycolor.readability(colors[i], color);
}
data[color] = (score / colors.length);
if (!topColor || data[color] > data[topColor]) {
topColor = color;
}
}
return tinycolor(topColor);
};
So the way it works is first I provide this script with the 6 most dominant colors in an image like this:
[ { r: 44, g: 65, b: 54 },
{ r: 187, g: 196, b: 182 },
{ r: 68, g: 106, b: 124 },
{ r: 126, g: 145, b: 137 },
{ r: 147, g: 176, b: 169 },
{ r: 73, g: 138, b: 176 } ]
and then it will generate 10,000 different random colors and then pick the one that has the best average contrast ratio with the 6 given colors.
The problem is that depending on which script I use to generate the random colors, I'll basically get the same results regardless of the image given.
With tinycolor2 I'll always end up with either a very dark gray (almost black) or a very light gray (almost white). And with randomcolor I'll either end up with a dark blue or a light peach color.
My script might not be the best way of going about this but does anybody have any ideas?
Thank you
Finding dominant hue.
The provided snippet show an example of how to find a dominant colour. It works by breaking the image into its Hue, saturation and luminance components.
The image reduction
To speed up the process the image is reduced to a smaller image (in this case 128 by 128 pixels). Part of the reduction process also trims some of the outside pixels from the image.
const IMAGE_WORK_SIZE = 128;
const ICOUNT = IMAGE_WORK_SIZE * IMAGE_WORK_SIZE;
if(event.type === "load"){
rImage = imageTools.createImage(IMAGE_WORK_SIZE, IMAGE_WORK_SIZE); // reducing image
c = rImage.ctx;
// This is where you can crop the image. In this example I only look at the center of the image
c.drawImage(this,-16,-16,IMAGE_WORK_SIZE + 32, IMAGE_WORK_SIZE + 32); // reduce image size
Find mean luminance
Once reduced I scan the pixels converting them to hsl values and get the mean luminance.
Note that luminance is a logarithmic scale so the mean is the square root of the sum of the squares divided by the count.
pixels = imageTools.getImageData(rImage).data;
l = 0;
for(i = 0; i < pixels.length; i += 4){
hsl = imageTools.rgb2hsl(pixels[i],pixels[i + 1],pixels[i + 2]);
l += hsl.l * hsl.l;
}
l = Math.sqrt(l/ICOUNT);
Hue histograms for luminance and saturation ranges.
The code can find the dominant colour in a range of saturation and luminance extents. In the example I only use one extent, but you can use as many as you wish. Only pixels that are inside the lum (luminance) and sat (saturation) ranges are used. I record a histogram of the hue for pixels that pass.
Example of hue ranges (one of)
hues = [{ // lum and sat have extent 0-100. high test is no inclusive hence high = 101 if you want the full range
lum : {
low :20, // low limit lum >= this.lum.low
high : 60, // high limit lum < this.lum.high
tot : 0, // sum of lum values
},
sat : { // all saturations from 0 to 100
low : 0,
high : 101,
tot : 0, // sum of sat
},
count : 0, // count of pixels that passed
histo : new Uint16Array(360), // hue histogram
}]
In the example I use the mean Luminance to automatically set the lum range.
hues[0].lum.low = l - 30;
hues[0].lum.high = l + 30;
Once the range is set I get the hue histogram for each range (one in this case)
for(i = 0; i < pixels.length; i += 4){
hsl = imageTools.rgb2hsl(pixels[i],pixels[i + 1],pixels[i + 2]);
for(j = 0; j < hues.length; j ++){
hr = hues[j]; // hue range
if(hsl.l >= hr.lum.low && hsl.l < hr.lum.high){
if(hsl.s >= hr.sat.low && hsl.s < hr.sat.high){
hr.histo[hsl.h] += 1;
hr.count += 1;
hr.lum.tot += hsl.l * hsl.l;
hr.sat.tot += hsl.s;
}
}
}
}
Weighted mean hue from hue histogram.
Then using the histogram I find the weighted mean hue for the range
// get weighted hue for image
// just to simplify code hue 0 and 1 (reds) can combine
for(j = 0; j < hues.length; j += 1){
hr = hues[j];
wHue = 0;
hueCount = 0;
hr.histo[1] += hr.histo[0];
for(i = 1; i < 360; i ++){
wHue += (i) * hr.histo[i];
hueCount += hr.histo[i];
}
h = Math.floor(wHue / hueCount);
s = Math.floor(hr.sat.tot / hr.count);
l = Math.floor(Math.sqrt(hr.lum.tot / hr.count));
hr.rgb = imageTools.hsl2rgb(h,s,l);
hr.rgba = imageTools.hex2RGBA(imageTools.rgba2Hex4(hr.rgb));
}
And that is about it. The rest is just display and stuff. The above code requires the imageTools interface (provided) that has tools for manipulating images.
The ugly complement
What you do with the colour/s found is up to you. If you want the complementary colour just convert the rgb to hsl imageTools.rgb2hsl and rotate the hue 180 deg, then convert back to rgb.
var hsl = imageTools.rgb2hsl(rgb.r, rgb.g, rgb.b);
hsl.h += 180;
var complementRgb = imageTools.rgb2hsl(hsl.h, hsl.s, hsl.l);
Personally only some colours work well with their complement. Adding to a pallet is risky, doing it via code is just crazy. Stick with colours in the image. Reduce the lum and sat range if you wish to find accented colours. Each range will have a count of the number of pixels found, use that to find the extent of pixels using the colors in the associated histogram.
Demo "Border the birds"
The demo finds the dominant hue around the mean luminance and uses that hue and mean saturation and luminance to create a border.
The demo using images from wikipedia's image of the day collection as they allow cross site access.
var images = [
// "https://upload.wikimedia.org/wikipedia/commons/f/fe/Goldcrest_1.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/2/22/Cistothorus_palustris_CT.jpg/450px-Cistothorus_palustris_CT.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/3/37/Black-necked_Stilt_%28Himantopus_mexicanus%29%2C_Corte_Madera.jpg/362px-Black-necked_Stilt_%28Himantopus_mexicanus%29%2C_Corte_Madera.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/c/cc/Daurian_redstart_at_Daisen_Park_in_Osaka%2C_January_2016.jpg/573px-Daurian_redstart_at_Daisen_Park_in_Osaka%2C_January_2016.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/Myioborus_torquatus_Santa_Elena.JPG/675px-Myioborus_torquatus_Santa_Elena.JPG",
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/ef/Great_tit_side-on.jpg/645px-Great_tit_side-on.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/55/Sarcoramphus_papa_%28K%C3%B6nigsgeier_-_King_Vulture%29_-_Weltvogelpark_Walsrode_2013-01.jpg/675px-Sarcoramphus_papa_%28K%C3%B6nigsgeier_-_King_Vulture%29_-_Weltvogelpark_Walsrode_2013-01.jpg",,
];
function loadImageAddBorder(){
if(images.length === 0){
return ; // all done
}
var imageSrc = images.shift();
imageTools.loadImage(
imageSrc,true,
function(event){
var pixels, topRGB, c, rImage, wImage, botRGB, grad, i, hsl, h, s, l, hues, hslMap, wHue, hueCount, j, hr, gradCols, border;
const IMAGE_WORK_SIZE = 128;
const ICOUNT = IMAGE_WORK_SIZE * IMAGE_WORK_SIZE;
if(event.type === "load"){
rImage = imageTools.createImage(IMAGE_WORK_SIZE, IMAGE_WORK_SIZE); // reducing image
c = rImage.ctx;
// This is where you can crop the image. In this example I only look at the center of the image
c.drawImage(this,-16,-16,IMAGE_WORK_SIZE + 32, IMAGE_WORK_SIZE + 32); // reduce image size
pixels = imageTools.getImageData(rImage).data;
h = 0;
s = 0;
l = 0;
// these are the colour ranges you wish to look at
hues = [{
lum : {
low :20,
high : 60,
tot : 0,
},
sat : { // all saturations
low : 0,
high : 101,
tot : 0,
},
count : 0,
histo : new Uint16Array(360),
}]
for(i = 0; i < pixels.length; i += 4){
hsl = imageTools.rgb2hsl(pixels[i],pixels[i + 1],pixels[i + 2]);
l += hsl.l * hsl.l;
}
l = Math.sqrt(l/ICOUNT);
hues[0].lum.low = l - 30;
hues[0].lum.high = l + 30;
for(i = 0; i < pixels.length; i += 4){
hsl = imageTools.rgb2hsl(pixels[i], pixels[i + 1], pixels[i + 2]);
for(j = 0; j < hues.length; j ++){
hr = hues[j]; // hue range
if(hsl.l >= hr.lum.low && hsl.l < hr.lum.high){
if(hsl.s >= hr.sat.low && hsl.s < hr.sat.high){
hr.histo[hsl.h] += 1;
hr.count += 1;
hr.lum.tot += hsl.l * hsl.l;
hr.sat.tot += hsl.s;
}
}
}
}
// get weighted hue for image
// just to simplify code hue 0 and 1 (reds) can combine
for(j = 0; j < hues.length; j += 1){
hr = hues[j];
wHue = 0;
hueCount = 0;
hr.histo[1] += hr.histo[0];
for(i = 1; i < 360; i ++){
wHue += (i) * hr.histo[i];
hueCount += hr.histo[i];
}
h = Math.floor(wHue / hueCount);
s = Math.floor(hr.sat.tot / hr.count);
l = Math.floor(Math.sqrt(hr.lum.tot / hr.count));
hr.rgb = imageTools.hsl2rgb(h,s,l);
hr.rgba = imageTools.hex2RGBA(imageTools.rgba2Hex4(hr.rgb));
}
gradCols = hues.map(h=>h.rgba);
if(gradCols.length === 1){
gradCols.push(gradCols[0]); // this is a quick fix if only one colour the gradient needs more than one
}
border = Math.floor(Math.min(this.width / 10,this.height / 10, 64));
wImage = imageTools.padImage(this,border,border);
wImage.ctx.fillStyle = imageTools.createGradient(
c, "linear", 0, 0, 0, wImage.height,gradCols
);
wImage.ctx.fillRect(0, 0, wImage.width, wImage.height);
wImage.ctx.fillStyle = "black";
wImage.ctx.fillRect(border - 2, border - 2, wImage.width - border * 2 + 4, wImage.height - border * 2 + 4);
wImage.ctx.drawImage(this,border,border);
wImage.style.width = (innerWidth -64) + "px";
document.body.appendChild(wImage);
setTimeout(loadImageAddBorder,1000);
}
}
)
}
setTimeout(loadImageAddBorder,0);
/** ImageTools.js begin **/
var imageTools = (function () {
// This interface is as is.
// No warenties no garenties, and
/*****************************/
/* NOT to be used comercialy */
/*****************************/
var workImg,workImg1,keep; // for internal use
keep = false;
const toHex = v => (v < 0x10 ? "0" : "") + Math.floor(v).toString(16);
var tools = {
canvas(width, height) { // create a blank image (canvas)
var c = document.createElement("canvas");
c.width = width;
c.height = height;
return c;
},
createImage (width, height) {
var i = this.canvas(width, height);
i.ctx = i.getContext("2d");
return i;
},
loadImage (url, crossSite, cb) { // cb is calback. Check first argument for status
var i = new Image();
if(crossSite){
i.setAttribute('crossOrigin', 'anonymous');
}
i.src = url;
i.addEventListener('load', cb);
i.addEventListener('error', cb);
return i;
},
image2Canvas(img) {
var i = this.canvas(img.width, img.height);
i.ctx = i.getContext("2d");
i.ctx.drawImage(img, 0, 0);
return i;
},
rgb2hsl(r,g,b){ // integers in the range 0-255
var min, max, dif, h, l, s;
h = l = s = 0;
r /= 255; // normalize channels
g /= 255;
b /= 255;
min = Math.min(r, g, b);
max = Math.max(r, g, b);
if(min === max){ // no colour so early exit
return {
h, s,
l : Math.floor(min * 100), // Note there is loss in this conversion
}
}
dif = max - min;
l = (max + min) / 2;
if (l > 0.5) { s = dif / (2 - max - min) }
else { s = dif / (max + min) }
if (max === r) {
if (g < b) { h = (g - b) / dif + 6.0 }
else { h = (g - b) / dif }
} else if(max === g) { h = (b - r) / dif + 2.0 }
else {h = (r - g) / dif + 4.0 }
h = Math.floor(h * 60);
s = Math.floor(s * 100);
l = Math.floor(l * 100);
return {h, s, l};
},
hsl2rgb (h, s, l) { // h in range integer 0-360 (cyclic) and s,l 0-100 both integers
var p, q;
const hue2Channel = (h) => {
h = h < 0.0 ? h + 1 : h > 1 ? h - 1 : h;
if (h < 1 / 6) { return p + (q - p) * 6 * h }
if (h < 1 / 2) { return q }
if (h < 2 / 3) { return p + (q - p) * (2 / 3 - h) * 6 }
return p;
}
s = Math.floor(s)/100;
l = Math.floor(l)/100;
if (s <= 0){ // no colour
return {
r : Math.floor(l * 255),
g : Math.floor(l * 255),
b : Math.floor(l * 255),
}
}
h = (((Math.floor(h) % 360) + 360) % 360) / 360; // normalize
if (l < 1 / 2) { q = l * (1 + s) }
else { q = l + s - l * s }
p = 2 * l - q;
return {
r : Math.floor(hue2Channel(h + 1 / 3) * 255),
g : Math.floor(hue2Channel(h) * 255),
b : Math.floor(hue2Channel(h - 1 / 3) * 255),
}
},
rgba2Hex4(r,g,b,a=255){
if(typeof r === "object"){
g = r.g;
b = r.b;
a = r.a !== undefined ? r.a : a;
r = r.r;
}
return `#${toHex(r)}${toHex(g)}${toHex(b)}${toHex(a)}`;
},
hex2RGBA(hex){ // Not CSS colour as can have extra 2 or 1 chars for alpha
// #FFFF & #FFFFFFFF last F and FF are the alpha range 0-F & 00-FF
if(typeof hex === "string"){
var str = "rgba(";
if(hex.length === 4 || hex.length === 5){
str += (parseInt(hex.substr(1,1),16) * 16) + ",";
str += (parseInt(hex.substr(2,1),16) * 16) + ",";
str += (parseInt(hex.substr(3,1),16) * 16) + ",";
if(hex.length === 5){
str += (parseInt(hex.substr(4,1),16) / 16);
}else{
str += "1";
}
return str + ")";
}
if(hex.length === 7 || hex.length === 9){
str += parseInt(hex.substr(1,2),16) + ",";
str += parseInt(hex.substr(3,2),16) + ",";
str += parseInt(hex.substr(5,2),16) + ",";
if(hex.length === 9){
str += (parseInt(hex.substr(7,2),16) / 255).toFixed(3);
}else{
str += "1";
}
return str + ")";
}
return "rgba(0,0,0,0)";
}
},
createGradient(ctx, type, x, y, xx, yy, colours){ // Colours MUST be array of hex colours NOT CSS colours
// See this.hex2RGBA for details of format
var i,g,c;
var len = colours.length;
if(type.toLowerCase() === "linear"){
g = ctx.createLinearGradient(x,y,xx,yy);
}else{
g = ctx.createRadialGradient(x,y,xx,x,y,yy);
}
for(i = 0; i < len; i++){
c = colours[i];
if(typeof c === "string"){
if(c[0] === "#"){
c = this.hex2RGBA(c);
}
g.addColorStop(Math.min(1,i / (len -1)),c); // need to clamp top to 1 due to floating point errors causes addColorStop to throw rangeError when number over 1
}
}
return g;
},
padImage(img,amount){
var image = this.canvas(img.width + amount * 2, img.height + amount * 2);
image.ctx = image.getContext("2d");
image.ctx.drawImage(img, amount, amount);
return image;
},
getImageData(image, w = image.width, h = image.height) { // cut down version to prevent intergration
if(image.ctx && image.ctx.imageData){
return image.ctx.imageData;
}
return (image.ctx || (this.image2Canvas(image).ctx)).getImageData(0, 0, w, h);
},
};
return tools;
})();
/** ImageTools.js end **/
Sounds like an interesting problem to have!
Each algorithm you're using to generate colors likely has a bias toward certain colors in their respective random color algorithms.
What you're likely seeing is the end result of that bias for each. Both are selecting darker and lighter colors independently.
It may make more sense to keep a hash of common colors and use that hash as opposed to using randomly generated colors.
Either way your 'fitness' check, the algorithm that checks to see which color has the best average contrast is picking lighter and darker colors for both color sets. This makes sense, lighter images should have darker backgrounds and darker images should have lighter backgrounds.
Although you don't explicitly say, I'd bet my bottom dollar you're getting dark background for lighter average images and brighter backgrounds on darker images.
Alternatively rather than using a hash of colors, you could generate multiple random color palettes and combine the result sets to average them out.
Or rather than taking the 6 most commonly occurring colors, why not take the overall color gradient and try against that?
I've put together an example where I get the most commonly occurring color and invert it to get the complementary color. This in theory at least should provide a good contrast ratio for the image as a whole.
Using the most commonly occurring color in the image seems to work quite well. as outlined in my example below. This is a similar technique that Blindman67 uses without the massive bloating of including libraries and performing un-necessary steps, I borrowed the same images that Blindman67 uses for a fair comparison of the result set.
See Get average color of image via Javascript for getting average color (getAverageRGB() function written by James).
var images = [
"https://upload.wikimedia.org/wikipedia/commons/thumb/2/22/Cistothorus_palustris_CT.jpg/450px-Cistothorus_palustris_CT.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/3/37/Black-necked_Stilt_%28Himantopus_mexicanus%29%2C_Corte_Madera.jpg/362px-Black-necked_Stilt_%28Himantopus_mexicanus%29%2C_Corte_Madera.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/c/cc/Daurian_redstart_at_Daisen_Park_in_Osaka%2C_January_2016.jpg/573px-Daurian_redstart_at_Daisen_Park_in_Osaka%2C_January_2016.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/Myioborus_torquatus_Santa_Elena.JPG/675px-Myioborus_torquatus_Santa_Elena.JPG",
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/ef/Great_tit_side-on.jpg/645px-Great_tit_side-on.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/55/Sarcoramphus_papa_%28K%C3%B6nigsgeier_-_King_Vulture%29_-_Weltvogelpark_Walsrode_2013-01.jpg/675px-Sarcoramphus_papa_%28K%C3%B6nigsgeier_-_King_Vulture%29_-_Weltvogelpark_Walsrode_2013-01.jpg",
];
// append images
for (var i = 0; i < images.length; i++) {
var img = document.createElement('img'),
div = document.createElement('div');
img.crossOrigin = "Anonymous";
img.style.border = '1px solid black';
img.style.margin = '5px';
div.appendChild(img);
document.body.appendChild(div);
(function(img, div) {
img.addEventListener('load', function() {
var avg = getAverageRGB(img);
div.style = 'background: rgb(' + avg.r + ',' + avg.g + ',' + avg.b + ')';
img.style.height = '128px';
img.style.width = '128px';
});
img.src = images[i];
}(img, div));
}
function getAverageRGB(imgEl) { // not my work, see http://jsfiddle.net/xLF38/818/
var blockSize = 5, // only visit every 5 pixels
defaultRGB = {
r: 0,
g: 0,
b: 0
}, // for non-supporting envs
canvas = document.createElement('canvas'),
context = canvas.getContext && canvas.getContext('2d'),
data, width, height,
i = -4,
length,
rgb = {
r: 0,
g: 0,
b: 0
},
count = 0;
if (!context) {
return defaultRGB;
}
height = canvas.height = imgEl.offsetHeight || imgEl.height;
width = canvas.width = imgEl.offsetWidth || imgEl.width;
context.drawImage(imgEl, 0, 0);
try {
data = context.getImageData(0, 0, width, height);
} catch (e) {
return defaultRGB;
}
length = data.data.length;
while ((i += blockSize * 4) < length) {
++count;
rgb.r += data.data[i];
rgb.g += data.data[i + 1];
rgb.b += data.data[i + 2];
}
// ~~ used to floor values
rgb.r = ~~(rgb.r / count);
rgb.g = ~~(rgb.g / count);
rgb.b = ~~(rgb.b / count);
return rgb;
}
It depends on where the text is that is overlayed on the background image. If the background has some large feature on part of it, the text will likely be placed away from that, so must contrast with that part of the image, but you may also want to pick up a certain color or complement the other colors in the image. I think practically speaking you will need to create a widget for people to easily slide/adjust the foreground color interactively. Or you will need to create a deep learning system in order to do this really effectively.

Get the length of a SVG line,rect,polygon and circle tags

I managed to find the length of the paths in svg, but now i want to find the length for the line, rect, polygon and circle tags from SVG, I am really lost right now, and clues ? or are there already some functions like there is for path?
In case anyone else wants to find the length of these tags I made some functions for each of them, tested them and I say they work pretty ok, this was what i needed.
var tools = {
/**
*
* Used to get the length of a rect
*
* #param el is the rect element ex $('.rect')
* #return the length of the rect in px
*/
getRectLength:function(el){
var w = el.attr('width');
var h = el.attr('height');
return (w*2)+(h*2);
},
/**
*
* Used to get the length of a Polygon
*
* #param el is the Polygon element ex $('.polygon')
* #return the length of the Polygon in px
*/
getPolygonLength:function(el){
var points = el.attr('points');
points = points.split(" ");
var x1 = null, x2, y1 = null, y2 , lineLength = 0, x3, y3;
for(var i = 0; i < points.length; i++){
var coords = points[i].split(",");
if(x1 == null && y1 == null){
if(/(\r\n|\n|\r)/gm.test(coords[0])){
coords[0] = coords[0].replace(/(\r\n|\n|\r)/gm,"");
coords[0] = coords[0].replace(/\s+/g,"");
}
if(/(\r\n|\n|\r)/gm.test(coords[1])){
coords[0] = coords[1].replace(/(\r\n|\n|\r)/gm,"");
coords[0] = coords[1].replace(/\s+/g,"");
}
x1 = coords[0];
y1 = coords[1];
x3 = coords[0];
y3 = coords[1];
}else{
if(coords[0] != "" && coords[1] != ""){
if(/(\r\n|\n|\r)/gm.test(coords[0])){
coords[0] = coords[0].replace(/(\r\n|\n|\r)/gm,"");
coords[0] = coords[0].replace(/\s+/g,"");
}
if(/(\r\n|\n|\r)/gm.test(coords[1])){
coords[0] = coords[1].replace(/(\r\n|\n|\r)/gm,"");
coords[0] = coords[1].replace(/\s+/g,"");
}
x2 = coords[0];
y2 = coords[1];
lineLength += Math.sqrt(Math.pow((x2-x1), 2)+Math.pow((y2-y1),2));
x1 = x2;
y1 = y2;
if(i == points.length-2){
lineLength += Math.sqrt(Math.pow((x3-x1), 2)+Math.pow((y3-y1),2));
}
}
}
}
return lineLength;
},
/**
*
* Used to get the length of a line
*
* #param el is the line element ex $('.line')
* #return the length of the line in px
*/
getLineLength:function(el){
var x1 = el.attr('x1');
var x2 = el.attr('x2');
var y1 = el.attr('y1');
var y2 = el.attr('y2');
var lineLength = Math.sqrt(Math.pow((x2-x1), 2)+Math.pow((y2-y1),2));
return lineLength;
},
/**
*
* Used to get the length of a circle
*
* #param el is the circle element
* #return the length of the circle in px
*/
getCircleLength:function(el){
var r = el.attr('r');
var circleLength = 2 * Math.PI * r;
return circleLength;
},
/**
*
* Used to get the length of the path
*
* #param el is the path element
* #return the length of the path in px
*/
getPathLength:function(el){
var pathCoords = el.get(0);
var pathLength = pathCoords.getTotalLength();
return pathLength;
}
}
I think you are looking at the problem incorrectly :
length of rectangle = 2 * (width + height)
length of line ( use pythagorean theorem for any non vertical line c^2 = a^2 + b^2 ) or use ( x1 to x2 ) for horizontal , ( y1 to y2 ) for vertical
length of circle = 2 × π × radius ... etc
I tried to use the answer specified by ZetCoby for polygons, but in testing I found that the path length it returns is wrong.
Example:
<polygon points="10.524,10.524 10.524,24.525 24.525,24.525 24.525,10.524" style="fill:none;stroke-width:0.2;stroke:black"></polygon>
The the above polygon should have a length of 56, but the getPolygonLength(el) function returns a value of 61.79898987322332.
I wrote an algorithm to correctly calculate the path length of an SVG polygon, so I thought I should contribute it back since this is the first hit on google when searching for this problem.
Here is my function. Enjoy...
function polygon_length(el) {
var points = el.attr('points');
points = points.split(' ');
if (points.length > 1) {
function coord(c_str) {
var c = c_str.split(',');
if (c.length != 2) {
return; // return undefined
}
if (isNaN(c[0]) || isNaN(c[1])) {
return;
}
return [parseFloat(c[0]), parseFloat(c[1])];
}
function dist(c1, c2) {
if (c1 != undefined && c2 != undefined) {
return Math.sqrt(Math.pow((c2[0]-c1[0]), 2) + Math.pow((c2[1]-c1[1]), 2));
} else {
return 0;
}
}
var len = 0;
// measure polygon
if (points.length > 2) {
for (var i=0; i<points.length-1; i++) {
len += dist(coord(points[i]), coord(points[i+1]));
}
}
// measure line or measure polygon close line
len += dist(coord(points[0]), coord(points[points.length-1]));
return len;
} else {
return 0;
}
}
In SVG 2 all geometry elements will have a pathLength property but as of May 2017 this is still to be implemented in most browsers.
See https://developer.mozilla.org/en-US/docs/Web/API/SVGGeometryElement for more info.
We can future proof #zetcoby 's answer with:
if( el.pathLength ) {
return el.pathLength;
}
else {
// rest of code...
}

Fixing every other row is displaced in Value Noise algorithm implementation

Below is my Value Noise implementation, which I'm using for terrain generation. It creates strange artifacts when the terrain's length (Y size) is longer than its width (X size), but not otherwise.
I've been staring at this for hours. Any idea what's causing this?
(Screenshots from the demo. You can mess with the code in your browser console and see the results immediately by putting THREE.Terrain.Value = ValueNoise; rebuild(); after the code below.)
1:1 Aspect Ratio:
1:1.1 Aspect Ratio:
/**
* Generate a heightmap using white noise.
*
* #param {Vector3[]} g The terrain vertices.
* #param {Object} options Settings
* #param {Number} scale The resolution of the resulting heightmap.
* #param {Number} segments The width of the target heightmap.
* #param {Number} range The altitude of the noise.
* #param {Number[]} data The target heightmap.
*/
function WhiteNoise(g, options, scale, segments, range, data) {
if (scale > segments) return;
var i = 0,
j = 0,
xl = segments,
yl = segments,
inc = Math.floor(segments / scale),
k;
// Walk over the target. For a target of size W and a resolution of N,
// set every W/N points (in both directions).
for (i = 0; i <= xl; i += inc) {
for (j = 0; j <= yl; j += inc) {
k = j * xl + i;
data[k] = Math.random() * range;
/* c b *
* l t */
var t = data[k],
l = data[ j * xl + (i-inc)] || t, // left
b = data[(j-inc) * xl + i ] || t, // bottom
c = data[(j-inc) * xl + (i-inc)] || t; // corner
// Interpolate between adjacent points to set the height of
// higher-resolution target data.
for (var lastX = i-inc, x = lastX; x < i; x++) {
for (var lastY = j-inc, y = lastY; y < j; y++) {
if (x === lastX && y === lastY) continue;
var px = ((x-lastX) / inc),
py = ((y-lastY) / inc),
r1 = px * b + (1-px) * c,
r2 = px * t + (1-px) * l;
data[y * xl + x] = py * r2 + (1-py) * r1;
}
}
}
}
// Assign the temporary data back to the actual terrain heightmap.
// Accumulate additively across multiple calls to WhiteNoise.
for (i = 0, xl = options.xSegments + 1; i < xl; i++) {
for (j = 0, yl = options.ySegments + 1; j < yl; j++) {
k = j * xl + i;
g[k].z += data[k] || 0;
}
}
}
/**
* Generate random terrain using value noise.
*
* The basic approach of value noise is to generate white noise at a
* smaller octave than the target and then interpolate to get a higher-
* resolution result. This is then repeated at different resolutions.
*
* #param {Vector3[]} g The terrain vertices.
* #param {Object} options Settings
*/
ValueNoise = function(g, options) {
// Set the segment length to the smallest power of 2 that is greater
// than the number of vertices in either dimension of the plane
var segments = Math.max(options.xSegments, options.ySegments) + 1, n;
for (n = 1; Math.pow(2, n) < segments; n++) {}
segments = Math.pow(2, n);
// Store the array of white noise outside of the WhiteNoise function to
// avoid allocating a bunch of unnecessary arrays; we can just
// overwrite old data each time WhiteNoise() is called.
var data = new Array(segments*(segments+1));
// Layer white noise at different resolutions.
var range = options.maxHeight - options.minHeight;
for (var i = 2; i < 7; i++) {
WhiteNoise(g, options, Math.pow(2, i), segments, range * Math.pow(2, 2.4-i*1.2), data);
}
// Clamp and stretch the results
THREE.Terrain.Clamp(g, {
maxHeight: options.maxHeight,
minHeight: options.minHeight,
stretch: true,
});
};
When you assign the height change of the temporary data field, you really have two different indices, because you have two different map sizes: the original map and the temporary map inflated to the next power of 2. So:
for (i = 0, xl = options.xSegments + 1; i < xl; i++) {
for (j = 0, yl = options.ySegments + 1; j < yl; j++) {
var kg = j * xl + i;
var kd = j * segments + i;
g[kg] += data[kd];
}
}
I also think that you might have an off-by-one error in your data index. The size of data should be (segments + 1) * (segments + 1), because you need the outer cells in both dimensions and your xl and yl should be segments + 1.

Categories

Resources