I am required to build a simple application that converts a color image or a grayscale image into a black and white one, I was thinking looping through each pixel and checking the RGB values and if all of them are less than an specific value (lets say 20) redraw the pixel to black and if it is greater than that value, redraw the pixel to white. Something like this.
function blackWhite(context, canvas) {
var imgData = context.getImageData(0, 0, canvas.width, canvas.height);
var pixels = imgData.data;
for (var i = 0, n = pixels.length; i < n; i += 4) {
if (pixels[i] <= 20 || pixels[i+1] <= 20 || pixels[i+2] <= 20){
pixels[i ] = 0; // red
pixels[i+1] = 0; // green
pixels[i+2] = 0; // blue
}else{
pixels[i ] = 255; // red
pixels[i+1] = 255; // green
pixels[i+2] = 255; // blue
}
}
//redraw the image in black & white
context.putImageData(imgData, 0, 0);
}
The big question is, what is the correct combination of Red, green and blue to define a pixel to be black, this taking into account that colors are perceived different by the human eye, as an example for our eyes it is more important the green color than the red and the blue, I’ve tried experimentally some values, but I don’t get close to a black and with image like the one you could get by digitalizing a sheet in an scanner as black and white.
Of course if there is a much faster way to do this, I will totally appreciate it.
I believe what you're looking for is the relative luminance. While not the most advanced method of thresholding, it better follows the way humans perceive light, which is what I think you want.
https://en.wikipedia.org/wiki/Relative_luminance
From the wikipedia article the luminance can be calculated as follows:
let lum = .2126 * red + .7152 * green + .0722 * blue
This value will be some fraction of one so if you want to split it right in the middle use a threshold of .5
EDIT
The real issue comes with selecting the threshold. Not all images are lit the same way and a picture with more pixels with a low luminosity (i.e., more blacks) will benefit from a lower threshold.
There's a couple techniques you can consider using such as analyzing the histogram of the image.
(I'm adding this for future visitors.) For converting images to black & white where the characteristics such as luma dominance, gamma etc. are unknown, the "Otsu's method" tends to provide good results.
It's a fairly simple algorithm which uses a luma histogram of the image combined with pixel count to find an optimal cluster-based threshold value.
The main steps are (source: ibid):
Building a Histogram
So the first thing we need to do is to build a histogram. This will require converting RGB to luminance using either a flat 33.3% factor or as in Khauri's answer a Rec.709 (for HD) formula (Rec. 601 can be used as well). Note that Rec.* factors assumes that the RGB is converted to linear format; modern browsers will typically apply gamma (non-linear) to the image used for canvas. But lets ignore that here.
A flat conversion can be beneficial performance wise but provides less accurate result:
var luma = Math.round((r + g + b) * 0.3333);
while Rec.709 will give a better result (with linear data):
var luma = Math.round(r * 0.2126 + g * 0.7152 + b * 0.0722);
So, convert each pixel to integer luma value, use the resulting value as index in a 256 large array and increment for the index:
var data = ctx.getImageData(0, 0, width, height).data;
var histogram = new Uint16Array(256); // assuming smaller images here, ow: Uint32
// build the histogram using Rec. 709 for luma
for(var i = 0; i < data.length; i++) {
var luma = Math.round(data[i++] * 0.2126 + data[i++] * 0.7152 + data[i++] * 0.0722);
histogram[luma]++; // increment for this luma value
}
Find Optimal Cluster-based Threshold value
Now that we have a histogram we can feed that to the Oto's method and obtain a black & white version of the image.
Translated into JavaScript we would do (source for method part based on variant 2 from ibid):
// Otsu's method, from: https://en.wikipedia.org/wiki/Otsu%27s_Method#Variant_2
//
// The input argument pixelsNumber is the number of pixels in the given image. The
// input argument histogram is a 256-element histogram of a grayscale image
// different gray-levels.
// This function outputs the threshold for the image.
function otsu(histogram, pixelsNumber) {
var sum = 0, sumB = 0, wB = 0, wF = 0, mB, mF, max = 0, between, threshold = 0;
for (var i = 0; i < 256; i++) {
wB += histogram[i];
if (wB === 0) continue;
wF = pixelsNumber - wB;
if (wF === 0) break;
sumB += i * histogram[i];
mB = sumB / wB;
mF = (sum - sumB) / wF;
between = wB * wF * Math.pow(mB - mF, 2);
if (between > max) {
max = between;
threshold = i;
}
}
return threshold>>1;
}
// Build luma histogram
var c = document.createElement("canvas"),
ctx = c.getContext("2d"),
img = new Image();
img.crossOrigin = "";
img.onload = go;
img.src = "//i.imgur.com/tbRxrWA.jpg";
function go() {
c.width = this.width;
c.height = this.height;
ctx.drawImage(this, 0, 0);
var idata = ctx.getImageData(0, 0, c.width, c.height);
var data = idata.data;
var histogram = new Uint16Array(256);
// build the histogram using flat factors for RGB
for(var i = 0; i < data.length; i += 4) {
// note: here we also store luma to red-channel for reuse later.
var luma = data[i] = Math.round(data[i]*.2126+data[i+1]*.7152+data[i+2]*.0722);
histogram[luma]++;
}
// Get threshold
var threshold = otsu(histogram, c.width * c.height);
console.log("Threshold:", threshold);
// convert image
for(i = 0; i < data.length; i += 4) {
// remember we stored luma to red channel.. or use a separate array for luma values
data[i] = data[i+1] = data[i+2] = data[i] >= threshold ? 255 : 0;
}
// show result
ctx.putImageData(idata, 0, 0);
document.body.appendChild(c); // b&w version
document.body.appendChild(this); // original image below
}
Also see the improvements section.
UPDATE
I did not read the question correctly so I have updated the answer to reflect the question. Will leave old answer in just as a point of interest for those that are.
To create the simplest threshold filter just sum the RGB channels and if over the threshold value the make pixel white else black.
// assumes canvas and ctx defined;
// image to process, threshold level range 0 - 255
function twoTone(image, threshold) {
ctx.drawImage(image,0,0):
const imgD = ctx.getImageData(0, 0, canvas.width, canvas.height);
const d = imgD.data;
var v,i = 0;
while (i < d.length) {
v = (d[i++] + d[i++] + d[i]) < (threshold * 3) ? 0 : 255;
i -= 2;
d[i++] = d[i++] = d[i++] = v;
i++;
}
ctx.putImageData(imgD, 0, 0);
}
But there are other ways. A modification of the above lets you create a gradient at the threshold. This softens the hard boundary that the above method can produce.
Sometimes you need the function to be quick, or you may not be able to access the pixel data due to cross origin security restrictions. In that case you can use a stack composite operation approch that creates a threshold by layering the image in a succession of "multiply" and "lighter" globalCompositeOperations. Though this method can produce high quality results, the input values are a little fuzzy as presented in the example below. You would have to calibrate it if you wanted to match the specific threshold and cutoff width.
Demo
UPDATE
As there is more information in the form of answers i have updated the code to keep the comparisons fair.
I have updated the demo to include K3N image of the bear and have provided 3 methods for finding the threshold via a mean. (I have modified the code in K3N's answer to fit the demo. It is functionally the same). The Buttons at the top let you select from the two images and display size and the last three find and apply the threshold value using three methods.
Use the sliders to change the threshold and cutoff and amount values where applicable.
const image = new Image;
const imageSrcs = ["https://upload.wikimedia.org/wikipedia/en/2/24/Lenna.png", "//i.imgur.com/tbRxrWA.jpg"];
var scaleFull = false;
var imageBWA;
var imageBWB;
var imageBWC;
var amountA = -1;
var thresholdA = -1;
var thresholdB = -1;
var cutoffC = -1;
var thresholdC = -1;
start();
//Using stacked global composite operations.
function twoTone(bw, amount, threshold) {
bw.ctx.save();
bw.ctx.globalCompositeOperation = "saturation";
bw.ctx.fillStyle = "#888"; // no saturation
bw.ctx.fillRect(0, 0, bw.width, bw.height);
amount /= 16;
threshold = 255 - threshold;
while (amount-- > 0) {
bw.ctx.globalAlpha = 1;
bw.ctx.globalCompositeOperation = "multiply";
bw.ctx.drawImage(bw, 0, 0);
const a = (threshold / 127);
bw.ctx.globalAlpha = a > 1 ? 1 : a;
bw.ctx.globalCompositeOperation = "lighter";
bw.ctx.drawImage(bw, 0, 0);
if (a > 1) {
bw.ctx.globalAlpha = a - 1 > 1 ? 1 : a - 1;
bw.ctx.drawImage(bw, 0, 0);
bw.ctx.drawImage(bw, 0, 0);
}
}
bw.ctx.restore();
}
// Using per pixel processing simple threshold.
function twoTonePixelP(bw, threshold) {
const imgD = bw.ctx.getImageData(0, 0, bw.width, bw.height);
const d = imgD.data;
var i = 0;
var v;
while (i < d.length) {
v = (d[i++] + d[i++] + d[i]) < (threshold * 3) ? 0 : 255;
i -= 2;
d[i++] = d[i++] = d[i++] = v;
i++;
}
bw.ctx.putImageData(imgD, 0, 0);
}
//Using per pixel processing with cutoff width
function twoTonePixelCutoff(bw, cutoff, threshold) {
if (cutoff === 0) {
twoTonePixelP(bw, threshold);
return;
}
const eCurve = (v, p) => {
var vv;
return (vv = Math.pow(v, 2)) / (vv + Math.pow(1 - v, 2))
}
const imgD = bw.ctx.getImageData(0, 0, bw.width, bw.height);
const d = imgD.data;
var i = 0;
var v;
const mult = 255 / cutoff;
const offset = -(threshold * mult) + 127;
while (i < d.length) {
v = ((d[i++] + d[i++] + d[i]) / 3) * mult + offset;
v = v < 0 ? 0 : v > 255 ? 255 : eCurve(v / 255) * 255;
i -= 2;
d[i++] = d[i++] = d[i++] = v;
i++;
}
bw.ctx.putImageData(imgD, 0, 0);
}
function OtsuMean(image, type) {
// Otsu's method, from: https://en.wikipedia.org/wiki/Otsu%27s_Method#Variant_2
//
// The input argument pixelsNumber is the number of pixels in the given image. The
// input argument histogram is a 256-element histogram of a grayscale image
// different gray-levels.
// This function outputs the threshold for the image.
function otsu(histogram, pixelsNumber) {
var sum = 0, sumB = 0, wB = 0, wF = 0, mB, mF, max = 0, between, threshold = 0;
for (var i = 0; i < 256; i++) {
wB += histogram[i];
if (wB === 0) continue;
wF = pixelsNumber - wB;
if (wF === 0) break;
sumB += i * histogram[i];
mB = sumB / wB;
mF = (sum - sumB) / wF;
between = wB * wF * Math.pow(mB - mF, 2);
if (between > max) {
max = between;
threshold = i;
}
}
return threshold>>1;
}
const imgD = image.ctx.getImageData(0, 0, image.width, image.height);
const d = imgD.data;
var histogram = new Uint16Array(256);
if(type == 2){
for(var i = 0; i < d.length; i += 4) {
histogram[Math.round(d[i]*.2126+d[i+1]*.7152+d[i+2]*.0722)]++;
}
}else{
for(var i = 0; i < d.length; i += 4) {
histogram[Math.round(Math.sqrt(d[i]*d[i]*.2126+d[i+1]*d[i+1]*.7152+d[i+2]*d[i+2]*.0722))]++;
}
}
return otsu(histogram, image.width * image.height);
}
// finds mean via the perceptual 2,7,1 approx rule rule
function calcMean(image, rule = 0){
if(rule == 2 || rule == 3){
return OtsuMean(image, rule);
}
const imgD = image.ctx.getImageData(0, 0, image.width, image.height);
const d = imgD.data;
var i = 0;
var sum = 0;
var count = 0
while (i < d.length) {
if(rule == 0){
sum += d[i++] * 0.2 + d[i++] * 0.7 + d[i++] * 0.1;
count += 1;
}else{
sum += d[i++] + d[i++] + d[i++];
count += 3;
}
i++;
}
return (sum / count) | 0;
}
// creates a canvas copy of an image.
function makeImageEditable(image) {
const c = document.createElement("canvas");
c.width = (image.width / 2) | 0;
c.height = (image.height / 2) | 0;
c.ctx = c.getContext("2d");
c.ctx.drawImage(image, 0, 0, c.width, c.height);
return c;
}
function updateEditableImage(image,editable) {
editable.width = (image.width / (scaleFull ? 1 : 2)) | 0;
editable.height = (image.height / (scaleFull ? 1 : 2)) | 0;
editable.ctx.drawImage(image, 0, 0, editable.width, editable.height);
}
// load test image and when loaded start UI
function start() {
image.crossOrigin = "anonymous";
image.src = imageSrcs[0];
imageStatus.textContent = "Loading image 1";
image.onload = ()=>{
imageBWA = makeImageEditable(image);
imageBWB = makeImageEditable(image);
imageBWC = makeImageEditable(image);
canA.appendChild(imageBWA);
canB.appendChild(imageBWB);
canC.appendChild(imageBWC);
imageStatus.textContent = "Loaded image 1.";
startUI();
}
}
function selectImage(idx){
imageStatus.textContent = "Loading image " + idx;
image.src = imageSrcs[idx];
image.onload = ()=>{
updateEditableImage(image, imageBWA);
updateEditableImage(image, imageBWB);
updateEditableImage(image, imageBWC);
thresholdC = thresholdB = thresholdA = -1; // force update
imageStatus.textContent = "Loaded image " + idx;
}
}
function toggleScale(){
scaleFull = !scaleFull;
imageStatus.textContent = scaleFull ? "Image full scale." : "Image half scale";
updateEditableImage(image, imageBWA);
updateEditableImage(image, imageBWB);
updateEditableImage(image, imageBWC);
thresholdC = thresholdB = thresholdA = -1; // force update
}
function findMean(e){
imageBWB.ctx.drawImage(image, 0, 0, imageBWB.width, imageBWB.height);
var t = inputThresholdB.value = inputThresholdC.value = calcMean(imageBWB,e.target.dataset.method);
imageStatus.textContent = "New threshold calculated " + t + ". Method : "+ e.target.dataset.name;
thresholdB = thresholdC = -1;
};
// start the UI
function startUI() {
imageControl.className = "imageSel";
selImage1Btn.addEventListener("click",(e)=>selectImage(0));
selImage2Btn.addEventListener("click",(e)=>selectImage(1));
togFullsize.addEventListener("click",toggleScale);
findMean1.addEventListener("click",findMean);
findMean2.addEventListener("click",findMean);
findMean3.addEventListener("click",findMean);
// updates top image
function update1() {
if (amountA !== inputAmountA.value || thresholdA !== inputThresholdA.value) {
amountA = inputAmountA.value;
thresholdA = inputThresholdA.value;
inputAmountValueA.textContent = amountA;
inputThresholdValueA.textContent = thresholdA;
imageBWA.ctx.drawImage(image, 0, 0, imageBWA.width, imageBWA.height);
twoTone(imageBWA, amountA, thresholdA);
}
requestAnimationFrame(update1);
}
requestAnimationFrame(update1);
// updates center image
function update2() {
if (thresholdB !== inputThresholdB.value) {
thresholdB = inputThresholdB.value;
inputThresholdValueB.textContent = thresholdB;
imageBWB.ctx.drawImage(image, 0, 0, imageBWB.width, imageBWB.height);
twoTonePixelP(imageBWB, thresholdB);
}
requestAnimationFrame(update2);
}
requestAnimationFrame(update2);
// updates bottom image
function update3() {
if (cutoffC !== inputCutoffC.value || thresholdC !== inputThresholdC.value) {
cutoffC = inputCutoffC.value;
thresholdC = inputThresholdC.value;
inputCutoffValueC.textContent = cutoffC;
inputThresholdValueC.textContent = thresholdC;
imageBWC.ctx.drawImage(image, 0, 0, imageBWC.width, imageBWC.height);
twoTonePixelCutoff(imageBWC, cutoffC, thresholdC);
}
requestAnimationFrame(update3);
}
requestAnimationFrame(update3);
}
.imageIso {
border: 2px solid black;
padding: 5px;
margin: 5px;
font-size : 12px;
}
.imageSel {
border: 2px solid black;
padding: 5px;
margin: 5px;
}
#imageStatus {
margin: 5px;
font-size: 12px;
}
.btn {
margin: 2px;
font-size : 12px;
border: 1px solid black;
background : white;
padding: 5px;
cursor : pointer;
}
.btn:hover {
background : #DDD;
}
body {
font-family: arial;
font-siae: 12px;
}
canvas {
border: 2px solid black;
padding: 5px;
}
.hide {
display: none;
}
<div class="imageSel hide" id="imageControl">
<input class="btn" id="selImage1Btn" type="button" value="Image 1"></input>
<input class="btn" id="selImage2Btn" type="button" value="Image 2"></input>
<input class="btn" id="togFullsize" type="button" value="Toggle fullsize"></input>
<input class="btn" id="findMean1" type="button" value="Mean M1" data-method=0 data-name="perceptual mean approximation" title="Get the image mean to use as threshold value using perceptual mean approximation"></input>
<input class="btn" id="findMean2" type="button" value="Mean M2" data-method=1 data-name="Pixel RGB sum mean" title="Get threshold value using RGB sum mean"></input>
<input class="btn" id="findMean3" type="button" value="Mean Otsu" data-method=2 data-name="Otsu's method" title="Get threshold value using Otsu's method"></input>
<div id="imageStatus"></div>
</div>
<div class="imageIso">
Using per pixel processing simple threshold. Quick in terms of pixel processing but produces a hard boundary at the threshold value.<br>
<div id="canB"></div>
Threshold<input id="inputThresholdB" type="range" min="1" max="255" step="1" value="128"></input><span id="inputThresholdValueB"></span>
</div>
<div class="imageIso">
Using per pixel processing with cutoff width. This softens the cutoff boundary by gray scaling the values at the threshold.<br>
<div id="canC"></div>
Cutoff width<input id="inputCutoffC" type="range" min="0" max="64" step="0.1" value="8"></input><span id="inputCutoffValueC"></span><br> Threshold
<input id="inputThresholdC" type="range" min="1" max="255" step="1" value="128"></input><span id="inputThresholdValueC"></span>
</div>
<div class="imageIso">
<h2>Means not applied to this image</h2>
Using stacked global composite operations. The quickest method and does not require secure pixel access. Though threshold and cutoff are imprecise.<br>
<div id="canA"></div>
Amount<input id="inputAmountA" type="range" min="1" max="100" step="1" value="75"></input><span id="inputAmountValueA"></span><br> Threshold
<input id="inputThresholdA" type="range" min="1" max="255" step="1" value="127"></input><span id="inputThresholdValueA"></span>
</div>
old answer
The quickest color to black&white using the 2D API
The quickest method for color to BW is as follows
ctx.drawImage(image,0,0);
ctx.globalCompositeOperation = "saturation";
ctx.fillStyle = "#888"; // no saturation
ctx.fillRect(0,0,image.width,image.height);
and provides a good result.
As there is always the debate about which way is the right way the rest of the answer and snippets let you compare the various methods and see which you prefer.
BW conversion comparison.
Many people tend to use the perceptual conversion either as a linear RGB->BW or logarithmic RGB->BW and will swear by it. Personal it's over rated and needs a experiance eye to detect.
There is technically no correct method as the correct method of conversion depends on so many factors, Overall image brightness and contrast, viewing environment's ambient lighting, individual preferences, media type (display, print, other), any existing image processing, origin image source (jpg,png,etc), camera settings, author's intent, viewing context (is it full screen, a thumb, on a bright blue border, etc)
The demo shows some methods of conversion including the common perceptual linear and log, some methods use direct pixel processing via "ctx.getImageData" and others use the GPU via 2D API to do the processing (up 100 times quicker)
Start the snippet the image will load and then be processed. Once all versions are complete they will be display alongside the original. Click on the image to see what function was used and the time it took to process the image.
Image source: served by Wiki. Attribution : public domain.
const methods = {quickBW, quickPerceptualBW, PerceptualLinear, PerceptualLog, directLum, directLumLog}
const image = new Image;
status("Loading test image.");
setTimeout(start,0);
function status(text){
const d = document.createElement("div");
d.textContent = text;
info.appendChild(d);
}
function makeImageEditable(image){
const c = document.createElement("canvas");
c.width = image.width;
c.height = image.height;
c.ctx = c.getContext("2d");
c.ctx.drawImage(image,0,0);
return c;
}
function makeImageSideBySide(image,image1){
const c = document.createElement("canvas");
c.width = image.width + image1.width;
c.height = image.height;
c.ctx = c.getContext("2d");
c.ctx.drawImage(image,0,0);
c.ctx.drawImage(image1,image.width,0);
return c;
}
function text(ctx, text, y = ctx.canvas.height / 2){
ctx.font= "32px arial";
ctx.textAlign = "center";
ctx.fillStyle = "black";
ctx.globalCompositeOperation = "source-over";
ctx.globalAlpha = 1;
ctx.setTransform(1,0,0,1,0,0);
ctx.fillText(text,ctx.canvas.width / 2, y+2);
ctx.fillStyle = "white";
ctx.fillText(text,ctx.canvas.width / 2, y);
}
function quickBW(bw){
bw.ctx.save();
bw.ctx.globalCompositeOperation = "saturation";
bw.ctx.fillStyle = "#888"; // no saturation
bw.ctx.fillRect(0,0,bw.width,bw.height);
bw.ctx.restore();
return bw;
}
function quickPerceptualBW(bw){
bw.ctx.save();
bw.ctx.globalCompositeOperation = "multiply";
var col = "rgb(";
col += ((255 * 0.2126 * 1.392) | 0) + ",";
col += ((255 * 0.7152 * 1.392) | 0) + ",";
col += ((255 * 0.0722 * 1.392) | 0) + ")";
bw.ctx.fillStyle = col;
bw.ctx.fillRect(0,0,bw.width,bw.height);
bw.ctx.globalCompositeOperation = "saturation";
bw.ctx.fillStyle = "#888"; // no saturation
bw.ctx.fillRect(0,0,bw.width,bw.height);
bw.ctx.globalCompositeOperation = "lighter";
bw.ctx.globalAlpha = 0.5;
bw.ctx.drawImage(bw,0,0);
bw.ctx.restore();
return bw;
}
function PerceptualLinear(bw){
const imgD = bw.ctx.getImageData(0,0,bw.width, bw.height);
const d = imgD.data;
var i = 0;
var v;
while(i < d.length){
v = d[i++] * 0.2126 + d[i++] * 0.7152 + d[i] * 0.0722;
i -= 2;
d[i++] = d[i++] = d[i++] = v;
i++;
}
bw.ctx.putImageData(imgD,0,0);
return bw;
}
function PerceptualLog(bw){
const imgD = bw.ctx.getImageData(0,0,bw.width, bw.height);
const d = imgD.data;
var i = 0;
var v;
while(i < d.length){
v = Math.sqrt(d[i] * d[i++] * 0.2126 + d[i] * d[i++] * 0.7152 + d[i] *d[i] * 0.0722);
i -= 2;
d[i++] = d[i++] = d[i++] = v;
i++;
}
bw.ctx.putImageData(imgD,0,0);
return bw;
}
function directLum(bw){
const imgD = bw.ctx.getImageData(0,0,bw.width, bw.height);
const d = imgD.data;
var i = 0;
var r,g,b,v;
while(i < d.length){
r = d[i++];
g = d[i++];
b = d[i];
v = (Math.min(r, g, b) + Math.max(r, g, b)) / 2.2;
i -= 2;
d[i++] = d[i++] = d[i++] = v;
i++;
}
bw.ctx.putImageData(imgD,0,0);
return bw;
}
function directLumLog(bw){
const imgD = bw.ctx.getImageData(0,0,bw.width, bw.height);
const d = imgD.data;
var i = 0;
var r,g,b,v;
while(i < d.length){
r = d[i] * d[i++];
g = d[i] * d[i++];
b = d[i] * d[i];
v = Math.pow((Math.min(r, g, b) + Math.max(r, g, b)/2) ,1/2.05);
i -= 2;
d[i++] = d[i++] = d[i++] = v;
i++;
}
bw.ctx.putImageData(imgD,0,0);
return bw;
}
function start(){
image.crossOrigin = "Anonymous"
image.src = "https://upload.wikimedia.org/wikipedia/en/2/24/Lenna.png";
status("Image loaded, pre processing.");
image.onload = ()=>setTimeout(process,0);
}
function addImageToDOM(element,data){
element.style.width = "512px"
element.style.height = "256px"
element.addEventListener("click",()=>{
text(element.ctx,"Method : " + data.name + " Time : " + data.time.toFixed(3) + "ms",36);
});
document.body.appendChild(element);
}
function process(){
const pKeys = Object.keys(methods);
const images = pKeys.map(()=>makeImageEditable(image));
const results = {};
status("Convert to BW");
setTimeout(()=>{
pKeys.forEach((key,i)=>{
const now = performance.now();
methods[key](images[i]);
results[key] = {};
results[key].time = performance.now() - now;
results[key].name = key;
});
pKeys.forEach((key,i)=>{
addImageToDOM(makeImageSideBySide(images[i],image),results[key]);
})
status("Complete!");
status("Click on image that you think best matches");
status("The original luminance to see which method best suits your perception.");
status("The function used and the time to process in ms 1/1000th sec");
},1000);
}
canvas {border : 2px solid black;}
body {font-family : arial; font-size : 12px; }
<div id="info"></div>
Related
Now, I tried to perform flood fill algorithm to fill up the transparent PNG images using flood fill algorithm from the article How can I avoid exceeding the max call stack size during a flood fill algorithm? which use non recursive method along with Uint32Array to handle color stack with work quite well.
However, this flood fill algorithm has left the white (actually the light grey edge or anti-alias edges) which remain unfilled. Here is my code:
var BrushColorString = '#F3CDA6'; // skin color
canvas.addEventListener('mousedown', function(e) {
const rect = canvas.getBoundingClientRect()
CanvasMouseX = e.clientX - rect.left;
CanvasMouseY = e.clientY - rect.top;
if (mode === 'flood-fill')
{
// test flood fill algorithm
paintAt(context, CanvasMouseX,CanvasMouseY,hexToRgb(BrushColorString));
}
});
function paintAt(ContextOutput,startX, startY,curColor) {
//function paintAt(ctx,startX, startY,curColor) {
// read the pixels in the canvas
const width = ContextOutput.canvas.width,
height = ContextOutput.canvas.height,pixels = width*height;
const imageData = ContextOutput.getImageData(0, 0, width, height);
var data1 = imageData.data;
const p32 = new Uint32Array(data1.buffer);
const stack = [startX + (startY * width)]; // add starting pos to stack
const targetColor = p32[stack[0]];
var SpanLeft = true, SpanRight = true; // logic for spanding left right
var leftEdge = false, rightEdge = false;
// proper conversion of color to Uint32Array
const newColor = new Uint32Array((new Uint8ClampedArray([curColor.r,curColor.g, curColor.b, curColor.a])).buffer)[0];
// need proper comparison of target color and new Color
if (targetColor === newColor || targetColor === undefined) { return } // avoid endless loop
while (stack.length){
let idx = stack.pop();
while(idx >= width && p32[idx - width] === targetColor) { idx -= width }; // move to top edge
SpanLeft = SpanRight = false; // not going left right yet
leftEdge = (idx % width) === 0;
rightEdge = ((idx +1) % width) === 0;
while (p32[idx] === targetColor) {
p32[idx] = newColor;
if(!leftEdge) {
if (p32[idx - 1] === targetColor) { // check left
if (!SpanLeft) {
stack.push(idx - 1); // found new column to left
SpanLeft = true; //
} else if (SpanLeft) {
SpanLeft = false;
}
}
}
if(!rightEdge) {
if (p32[idx + 1] === targetColor) {
if (!SpanRight) {
stack.push(idx + 1); // new column to right
SpanRight = true;
}else if (SpanRight) {
SpanRight = false;
}
}
}
idx += width;
}
}
clearCanvas(ContextOutput);
ContextOutput.putImageData(imageData,0, 0);
};
function hexToRgb(hex) {
var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
return result ? {
r: parseInt(result[1], 16),
g: parseInt(result[2], 16),
b: parseInt(result[3], 16),
a: 255
} : null;
};
So far, I have tried using the following suggestion:
using matchOutlineColor function using RGBA value mentioned in Canvas - floodfill leaves white pixels at edges
When I tried to implemented "Restrict fill area based on intensity gradient changes instead of simple threshold " mentioned in Canvas - floodfill leaves white pixels at edges which is considered as the most promising algorithm, I still have no clue how to implement this algorithm with minimum change on existing algorithm to handle the anti-alias edge issue for the cases of images with transparent.
When I take a look at the example on how to apply a tolerance and a toleranceFade mentioned in Canvas flood fill not filling to edge, I still have no clue how implement such a tolerance and a toleranceFade in my case.
Color Difference method (colorDiff function) within mentioned tolerance in Canvas Javascript FloodFill algorithm left white pixels without color and so far still not working. Similar thing can be said to colorsMatch function to be within Range Square (rangeSq) mentioned in How can I perform flood fill with HTML Canvas? which still unable to solve the anti-alias edge problem.
If you have any idea on how to deal with anti-alias edge problems of the flood-fill algorithm, please response as soon as possible.
Updated:
Here is the revised code on paintAt fucntion from the suggestion that takes tolerance into account:
<div id="container"><canvas id="control" >Does Not Support Canvas Element</canvas></div>
<div><label for="tolerance">Tolerance</label>
<input id="tolerance" type="range" min="0" max="255" value="32" step="1" oninput="this.nextElementSibling.value = this.value"><output>32</output></div>
var canvas = document.getElementById("control");
var context = canvas.getContext('2d');
var CanvasMouseX = -1; var CanvasMouseY = -1;
var BrushColorString = '#F3CDA6'; // skin color
canvas.addEventListener('mousedown', function(e) {
const rect = canvas.getBoundingClientRect()
CanvasMouseX = e.clientX - rect.left;
CanvasMouseY = e.clientY - rect.top;
// testing
if (mode === 'flood-fill')
{
// test flood fill algorithm
paintAt(context,CanvasMouseX,CanvasMouseY,
hexToRgb(BrushColorString),tolerance.value);
}
});
function hexToRgb(hex) {
var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
return result ? {
r: parseInt(result[1], 16),
g: parseInt(result[2], 16),
b: parseInt(result[3], 16),
a: 255
} : null;
};
function clearCanvas(ctx) {
ctx.clearRect(0, 0,ctx.canvas.width,ctx.canvas.height);
};
function colorDistance(index, R00,G00,B00,A00, data0)
{
var index1 = index << 2; // multiplyed by 4
const R = R00 - data0[index1 + 0];
const G = G00 - data0[index1 + 1];
const B = B00 - data0[index1 + 2];
const A = A00 - data0[index1 + 3];
return Math.sqrt((R * R) + (B * B) + (G * G) + (A * A));
}
function paintAt(ContextOutput,startX, startY,curColor,tolerance) {
// read the pixels in the canvas
const width = ContextOutput.canvas.width,
height = ContextOutput.canvas.height, pixels = width*height;
const rightEdgeNum = width - 1, bottomEdgeNum = height - 1;
const imageData = ContextOutput.getImageData(0, 0, width, height);
var data1 = imageData.data;
const p32 = new Uint32Array(data1.buffer);
const stack = [startX + (startY * width)]; // add starting pos to stack
const targetColor = p32[stack[0]];
var SpanLeft = true, SpanRight = true; // logic for spanning left right
var leftEdge = false, rightEdge = false, IsBlend = false;
const DistancesArray = new Uint16Array(pixels); // array distance value
var R=-1,G=-1,B=-1,A = -1,idx =0,Distance=0;
var R0 = data1[(4*(startX + (startY * width)))+0],
G0 = data1[(4*(startX + (startY * width)))+1],
B0 = data1[(4*(startX + (startY * width)))+2],
A0 = data1[(4*(startX + (startY * width)))+3];
var CalculatedTolerance = Math.sqrt(tolerance * tolerance * 4);
const BlendR = curColor.r |0, BlendG = curColor.g |0,
BlendB = curColor.b |0, BlendA = curColor.a|0;
// color variable for blending
const newColor = new Uint32Array((new Uint8ClampedArray([BlendR,BlendG,BlendB,BlendA])).buffer)[0];
if (targetColor === newColor || targetColor === undefined) { return }
// avoid endless loop
while (stack.length){
idx = stack.pop();
while (idx >= width &&
colorDistance(idx - width,R0,G0,B0,A0,data1) <= CalculatedTolerance) { idx -= width }; // move to top edge
SpanLeft = SpanRight = false; // not going left right yet
leftEdge = (idx % width) === 0;
rightEdge = ((idx +1) % width) === 0;
while ((Distance = colorDistance(idx,R0,G0,B0,A0,data1)) <= CalculatedTolerance) {
DistancesArray[idx] = (Distance / CalculatedTolerance) * 255 | 0x8000;
p32[idx] = newColor;
if(!leftEdge) {
if (colorDistance(idx - 1,R0,G0,B0,A0,data1) <= CalculatedTolerance) { // check left
if (!SpanLeft) {
stack.push(idx - 1); // found new column to left
SpanLeft = true; //
} else if (SpanLeft) {
SpanLeft = false;
}
}
}
if(!rightEdge) {
if (colorDistance(idx + 1,R0,G0,B0,A0,data1) <= CalculatedTolerance) {
if (!SpanRight) {
stack.push(idx + 1); // new column to right
SpanRight = true;
}else if (SpanRight) {
SpanRight = false;
}
}
}
idx += width;
}
}
idx = 0;
while (idx <= pixels-1) {
Distance = DistancesArray[idx];
if (Distance !== 0) {
if (Distance === 0x8000) {
p32[idx] = newColor;
} else {
IsBlend = false;
const x = idx % width;
const y = idx / width | 0;
if (x >= 1 && DistancesArray[idx - 1] === 0) { IsBlend = true }
else if (x <= rightEdgeNum -1 && DistancesArray[idx + 1] === 0) { IsBlend = true }
else if (y >=1 && DistancesArray[idx - width] === 0) { IsBlend = true }
else if (y <=bottomEdgeNum-1 && DistancesArray[idx + width] === 0) { IsBlend = true }
if (IsBlend) {
// blending at the edge
Distance &= 0xFF;
Distance = Distance / 255;
const invDist = 1 - Distance;
const idx1 = idx << 2;
data1[idx1 + 0] = data1[idx1 + 0] * Distance + BlendR * invDist;
data1[idx1 + 1] = data1[idx1 + 1] * Distance + BlendG * invDist;
data1[idx1 + 2] = data1[idx1 + 2] * Distance + BlendB * invDist;
data1[idx1 + 3] = data1[idx1 + 3] * Distance + BlendA * invDist;
} else {
p32[idx] = newColor;
}
}
}
idx++;
}
// this recursive algorithm works but still not working well due to the issue stack overflow!
clearCanvas(ContextOutput);
ContextOutput.putImageData(imageData,0, 0);
// way to deal with memory leak at the array.
DistancesArray = [];
newColor = [];
p32 = [];
};
However, the results of flood fill have been found wanting as shown in the transition tolerance as shown here:'
How can I deal with this kind problem when tolerance has become too much. Any alternative algorithm would be appreciated.
Double pass Flood fill in the 4th dimension
I am the Author of the accepted answers for How can I avoid exceeding the max call stack size during a flood fill algorithm? and Canvas flood fill not filling to edge
Unfortunately there is no perfect solution.
The following method has problems.
Setting the tolerance so that it gets all edge aliasing will often fill unwanted areas.
Setting the tolerance too low can make the edges look even worse than the standard fill.
Repeated fills will result in harder edge aliasing.
Uses a simple blend function. The correct blend function can be found at W3C Compositing and Blending Level "blending normal" Sorry I am out of time to complete this answer.
Does not easily convert to gradients or pattern fills.
There is a much better solution but it is 1000+ lines long and the code alone would not fit in the 32K answer limit.
This answer is a walk through of how to change your function to reduce edge aliasing using a tolerance and simple edge blending.
Note
The various snippets in the answer may have typos or wrong names. For the correct working code see example at bottom.
Tolerance
The simplest method to detect edges is to use a tolerance and fill pixels that are within the tolerance of the pixel color at the fill origin.
This lets the fill overlap the aliased edges that can then be detected and blended to reduce the artifacts due to anti-aliasing.
The problem is that to get a good coverage of the aliasing requires a large tolerance and this ends up filling areas you intuitively would not want colored.
Calculating color distance
A color can be represented by the 3 values red, green, blue. If one substitutes the names with x, y, z it is easy to see how each color has a unique position in 3D space.
Event better is that the distance between any two colors in this 3D space directly correlates to perceived difference in color. We can thus us simple math to calculate the difference (Pythagoras).
As we need to also consider the alpha channel we need to step up one dimension. Each color and its alpha part have a unique point in 4D space. The distance between any of these 4D colors directly correlates to perceived difference in color and transparency.
Lucky we do not need to imagine 4D space, all we do is extend the math (Pythagoras works in all euclidean dimensions).
Thus we get the function and prep code you can add to your flood fill function.
var idx = stack[0] << 2; // remove let first line inside while (stack.length){
const r = data1[idx] ;
const g = data1[idx + 1] ;
const b = data1[idx + 2];
const a = data1[idx + 3]
function colorDist(idx) { // returns the spacial distance from the target color of pixel at idx
idx <<= 2;
const R = r - data1[i];
const G = g - data1[i + 1];
const B = b - data1[i + 2];
const A = a - data1[i + 3];
return (R * R + B * B + G * G + A * A) ** 0.5;
}
To the function declaration we add an argument tolerance specified as a value 0 to 255
The function declaration changes from
function paintAt(contextOutput, startX, startY, curColor) {
To
function paintAt(contextOutput, startX, startY, curColor, tolerance = 0) {
With tolerance as an optional argument.
A tolerance of 0 only fills the targetColor
A tolerance of 255 should fill all pixels
We need to convert the tolerance from a channel value to a 4D distance value so that the 255 covers the greatest distance between two colors in the 4D color space.
Add the following line to the top of the function paintAt
tolerance = (tolerance * tolerance * 4) ** 0.5; // normalize to 4D RGBA space
We now need to change the pixel match statements to use the tolerance. Anywhere you have
p32[idx] === targetColor or similar needs to be replaced with colorDist(idx) <= tolerance. The exception is the inner while loop as we need to use the 4D color distance
while (checkPixel(ind)) {
becomes
// declare variable dist at top of function
while ((dist = colorDist(idx)) <= tolerance) {
Double pass solution
To combat the aliasing we need to blend the fill color by an amount proportional to the color distance.
Doing this for all pixels means that pixels away from the edge of the fill will get the wrong color if the color distance is not 0 and less than tolerance.
We only want to blend pixels if they are at the edge of the fill, excluding those at the edge of the canvas. For many of the pixels there is no way of knowing if a pixel is at the edge of the fill as we come across them. We can only know when we have found all filled pixels.
First pass the flood fill
Thus we must keep an array that holds the color distance for all pixels filled
At the top of the function create a buffer to hold pixel color distances.
const distances = new Uint16Array(width*height);
Then in the inner loop along with setting the pixel color set the matching locations distance.
while ((dist = colorDist(idx)) <= tolerance) {
//Must not fill color here do in second pass p32[idx] = newColor;
distances[idx] = (dist / tolerance) * 255 | 0x8000;
To track which pixels are filled we set the top bit of the distance value. That means that distances will hold a non zero value for all pixels to fill and zero for pixels to ignore. This is done with the | 0x8000
The main part of the fill is no done. We let the fill do its thing before we start on the next pass.
Second pass edge detect and blend
After the outer loop has exited we step over each pixel one at a time. Check if it needs to be filled.
If it needs filling we extract the color distance. If zero set that pixels color in the p32 array. If the distance is not zero we then check the 4 pixels around it. If any of the 4 neighboring pixels is marked as do not fill distances[idx] === 0 and that pixel is not outside the canvas bounds we know it is an edge and needs to be blended.
// declare at top of function
var blend, dist, rr, gg, bb, aa;
// need fill color's channels for quickest possible access.
const fr = curColor.r | 0;
const fg = curColor.g | 0;
const fb = curColor.b | 0;
const fa = curColor.a | 0;
// after main fill loop.
idx = 0;
const rightEdge = width - 1, bottomEdge = height - 1;
while (idx < width * height){
dist = distances[idx];
if (dist !== 0) {
if (dist === 0x8000) {
p32[idx] = newColor;
} else {
blend = false;
const x = idx % width;
const y = idx / width | 0;
if (x > 0 && distances[idx - 1] === 0) { blend = true }
else if (x < rightEdge && distances[idx + 1] === 0) { blend = true }
else if (y > 0 && distances[idx - width] === 0) { blend = true }
else if (y < bottomEdge && distances[idx + width] === 0) { blend = true }
if (blend) { // pixels is at fill edge an needs to blend
dist &= 0xFF; // remove fill bit
dist = dist / 255; // normalize to range 0-1
const invDist = 1 - dist; // invert distance
// get index in byte array
const idx1 = idx << 2; // same as idx * 4
// simple blend function (not the same as used by 2D API)
data[idx1] = data[idx1 ] * dist + fr * invDist;
data[idx1 + 1] = data[idx1 + 1] * dist + fg * invDist;
data[idx1 + 2] = data[idx1 + 2] * dist + fb * invDist;
data[idx1 + 3] = data[idx1 + 3] * dist + fa * invDist;
} else {
p32[idx] = newColor;
}
}
}
idx++;
}
And now just put the new pixel array onto the canvas.
Example
This example is a bare bones wrapper around a modified version of your code. It is there to make sure I did not make any algorithmic mistakes and to highlight the quality or lack of quality when using this method.
Click first button to add random circle.
Use slider to set tolerance 0 - 255
Click clear to clear canvas.
Click canvas to fill random color at mouse pos.
Canvas has been scaled by 2 to make artifacts more visible.
The function floodFill replaces your paintAt and is too big and should be broken into two parts, one for the fill pass, and another for edge detect and blend.
const ctx = canvas.getContext("2d");
var circle = true;
test();
canvas.addEventListener("click", e => {circle = false; test(e)});
toggleFill.addEventListener("click",e => {circle = true; test(e)});
clear.addEventListener("click",()=>ctx.clearRect(0,0,500,500));
function randomCircle() {
ctx.beginPath();
ctx.strokeStyle = "black";
ctx.lineWidth = 4;
const x = Math.random() * 100 | 0;
const y = Math.random() * 100 | 0;
ctx.arc(x, y, Math.random() * 25 + 25, 0 , Math.PI * 2);
ctx.stroke();
return {x,y};
}
function test(e) {
if (circle) {
toggleFill.textContent = "Click canvas to fill";
randomCircle();
} else {
toggleFill.textContent = "Click button add random circle";
const col = {
r: Math.random() * 255 | 0,
g: Math.random() * 255 | 0,
b: Math.random() * 255 | 0,
a: Math.random() * 255 | 0,
};
floodFill(ctx, (event.offsetX - 1) / 2 | 0, (event.offsetY -1) / 2| 0, col, tolerance.value);
}
}
// Original function from SO question https://stackoverflow.com/q/65359146/3877726
function floodFill(ctx, startX, startY, curColor, tolerance = 0) {
var idx, blend, dist, rr, gg, bb, aa, spanLeft = true, spanRight = true, leftEdge = false, rightEdge = false;
const width = ctx.canvas.width, height = ctx.canvas.height, pixels = width*height;
const imageData = ctx.getImageData(0, 0, width, height);
const data = imageData.data;
const p32 = new Uint32Array(data.buffer);
const stack = [startX + (startY * width)];
const targetColor = p32[stack[0]];
const fr = curColor.r | 0;
const fg = curColor.g | 0;
const fb = curColor.b | 0;
const fa = curColor.a | 0;
const newColor = (fa << 24) + (fb << 16) + (fg << 8) + fr;
if (targetColor === newColor || targetColor === undefined) { return }
idx = stack[0] << 2;
const rightE = width - 1, bottomE = height - 1;
const distances = new Uint16Array(width*height);
tolerance = (tolerance * tolerance * 4) ** 0.5;
const r = data[idx] ;
const g = data[idx + 1] ;
const b = data[idx + 2];
const a = data[idx + 3]
function colorDist(idx) {
if (distances[idx]) { return Infinity }
idx <<= 2;
const R = r - data[idx];
const G = g - data[idx + 1];
const B = b - data[idx + 2];
const A = a - data[idx + 3];
return (R * R + B * B + G * G + A * A) ** 0.5;
}
while (stack.length) {
idx = stack.pop();
while (idx >= width && colorDist(idx - width) <= tolerance) { idx -= width }; // move to top edge
spanLeft = spanRight = false; // not going left right yet
leftEdge = (idx % width) === 0;
rightEdge = ((idx + 1) % width) === 0;
while ((dist = colorDist(idx)) <= tolerance) {
distances[idx] = (dist / tolerance) * 255 | 0x8000;
if (!leftEdge) {
if (colorDist(idx - 1) <= tolerance) {
if (!spanLeft) {
stack.push(idx - 1);
spanLeft = true;
} else if (spanLeft) {
spanLeft = false;
}
}
}
if (!rightEdge) {
if (colorDist(idx + 1) <= tolerance) {
if (!spanRight) {
stack.push(idx + 1);
spanRight = true;
}else if (spanRight) {
spanRight = false;
}
}
}
idx += width;
}
}
idx = 0;
while (idx < pixels) {
dist = distances[idx];
if (dist !== 0) {
if (dist === 0x8000) {
p32[idx] = newColor;
} else {
blend = false;
const x = idx % width;
const y = idx / width | 0;
if (x > 0 && distances[idx - 1] === 0) { blend = true }
else if (x < rightE && distances[idx + 1] === 0) { blend = true }
else if (y > 0 && distances[idx - width] === 0) { blend = true }
else if (y < bottomE && distances[idx + width] === 0) { blend = true }
if (blend) {
dist &= 0xFF;
dist = dist / 255;
const invDist = 1 - dist;
const idx1 = idx << 2;
data[idx1] = data[idx1 ] * dist + fr * invDist;
data[idx1 + 1] = data[idx1 + 1] * dist + fg * invDist;
data[idx1 + 2] = data[idx1 + 2] * dist + fb * invDist;
data[idx1 + 3] = data[idx1 + 3] * dist + fa * invDist;
} else {
p32[idx] = newColor;
}
}
}
idx++;
}
ctx.putImageData(imageData,0, 0);
}
canvas {
width: 200px;
height: 200px;
border: 1px solid black;
}
<label for="tolerance">Tolerance</label>
<input id="tolerance" type="range" min="0" max="255" value="32" step="1"></input>
<button id ="toggleFill" >Click add random circle</button>
<button id ="clear" >Clear</button><br>
<canvas id="canvas" width="100" height="100"></canvas>
var stars = function() {
this.x = Math.floor(Math.random()* 1000) ;
this.y = Math.floor(Math.random()* 900) ;
this.radius = 2 ;
this.starColour = "gold";
}
var starNum = 20;
var starry = new Array(starNum);
for(var s = 0 ; s < 100 ; s++){
starry[s] = new stars()
}
var starDraw = function() {
var starCanvas = document.getElementById("stars");
var starCtx = starCanvas.getContext("2d");
starCtx.clearRect(0, 0, 1000, 900);
for(i = 0; i < 100 ; i++){
var star = starry[i];
starCtx.fillStyle= "white";
starCtx.shadowBlur = 5;
starCtx.shadowColor = "white";
starCtx.beginPath();
// draw it
starCtx.arc(star.x, star.y, star.radius, Math.PI * 2, false);
starCtx.stroke();
starCtx.fill();
}
}
function starLoop(){
starDraw();
requestAnimationFrame(starLoop);
}
requestAnimationFrame(starLoop);
So I am trying to create a twinkling effect for the stars using only javascript and I can't figure out how to do it.
I have searched around and found no real answers up to now so I would appreciate if I could get an answer here. I am very new to coding so please take it easy on me.
A random star field. A little exaggerated, but easy to tone down (or up) if needed.
The important part is to avoid direct random values as most things in nature are not random but tend to fall close to a fixed point. This is call a gaussian distribution. There are several ways to generate such random values.
// gRandom is far more likely to be near 0.5 than 1 or zero
var gRandom = (Math.random()+Math.random()+Math.random()+Math.random()) / 4;
// or
// gRandom is more likely to be near zero than near 1
var gRandom = Math.random() * Math.random();
I use these method to set the sizes of stars (far more small stars than big) and create the colour and movement.
To try and get a more realistic effect I also move the stars by less than a pixel. This has the effect of changing the brightness but not look like movement.
Code has plenty of comments
const ctx = canvas.getContext("2d");
// function calls a callback count times. Saves typing out for loops all the time
const doFor = (count, callback) => {
var i = 0;
while (i < count) {
callback(i++)
}
};
// creates a random integer between min and max. If min only given the between 0 and the value
const randI = (min, max = min + (min = 0)) => (Math.random() * (max - min) + min) | 0;
// same as above but as floats.
const rand = (min, max = min + (min = 0)) => Math.random() * (max - min) + min;
// creates a 2d point at x,y. If only x is a point than set to that point
const point = (x = 0, y) => {
if (x.x && y === undefined) {return { x: x.x,y: x.y} }
return {x,y: y === undefined ? 0 : y }
};
function ease (time, amount = 2) { return Math.pow(time % 1,amount) };
const clamp = (v, min = 1,max = min + (min = 0)) => v < min ? min : v > max ? max : v;
// stuff for stars
const skyColour = [10,30,50];
const density = 1000; // number of star per every density pixels
const colourChangeRate = 16; // Time in frames to change a colour
const stars = [];
const star = { // define a star
draw() {
this.count += 1; // integer counter used to triger color change every 16 frames
if (this.count % colourChangeRate === 0) { // change colour ?
// colour is a gaussian distrabution (NOT random) centered at #888
var c = (Math.random() + Math.random() + Math.random() + Math.random()) * 4;
var str = "#";
str += Math.floor(c * this.red).toString(16); // change color
str += Math.floor(c * this.green).toString(16); // change color
str += Math.floor(c * this.blue).toString(16); // change color
this.col = str;
}
ctx.fillStyle = this.col;
// move star around a pixel. Again its not random
// but a gaussian distrabution. The movement is sub pixel and will only
// make the stars brightness vary not look like its moving
var ox = (Math.random() + Math.random() + Math.random() + Math.random()) / 4;
var oy = (Math.random() + Math.random() + Math.random() + Math.random()) / 4;
ctx.fillRect(this.pos.x + ox, this.pos.y + oy, this.size, this.size);
}
}
// create a random star
// the size is caculated to produce many more smaller stars than big
function createStar(pos) {
stars.push(Object.assign({}, star, {
pos,
col: "#ccc",
count: randI(colourChangeRate),
size: rand(1) * rand(1) * 2 + 0.5,
red: 1-(rand(1) * rand(1) *rand(1)), // reduces colour channels
green: 1-(rand(1) * rand(1) *rand(1)), // but only by a very small amount
blue: 1-(rand(1) * rand(1) *rand(1)), // most of the time but occasional
// star will have a distinct colour
}));
}
var starCount;
var skyGrad;
// render the stars
function mainLoop(time) {
// resize canva if page size changes
if (canvas.width !== innerWidth || canvas.height !== innerHeight) {
canvas.width = innerWidth;
canvas.height = innerHeight;
// create a new set of stars
stars.length = 0;
// density is number of pixels one the canvas that has one star
starCount = Math.floor((canvas.width * canvas.height) / density);
// create the random stars;
doFor(starCount, () => createStar(point(randI(canvas.width), randI(canvas.height))));
skyGrad = ctx.createLinearGradient(0,0,0,canvas.height);
skyGrad.addColorStop(0,"black");
doFor(100,(i)=>{
var pos = clamp(i/100,0,1);
var col = ease(pos);
skyGrad.addColorStop(
pos,
"rgb(" +
Math.floor(skyColour[0] * col) + "," +
Math.floor(skyColour[1] * col) + "," +
Math.floor(skyColour[2] * col) + ")"
);
});
// floating point error can cause problems if we dont set the top
// at 1
skyGrad.addColorStop(1,"rgb("+skyColour[0]+","+skyColour[1]+","+skyColour[2]+")");
}
ctx.fillStyle = skyGrad;
ctx.fillRect(0, 0, canvas.width, canvas.height);
doFor(starCount, (i) => stars[i].draw());
requestAnimationFrame(mainLoop);
}
requestAnimationFrame(mainLoop);
canvas {
position: absolute;
top: 0px;
left: 0px;
}
<canvas id="canvas"></canvas>
I'm working on a node module that will return the color that will look best onto a background image which of course will have multiple colors.
Here's what I have so far:
'use strict';
var randomcolor = require('randomcolor');
var tinycolor = require('tinycolor2');
module.exports = function(colors, tries) {
var topColor, data = {};
if (typeof colors == 'string') { colors = [colors]; }
if (!tries) { tries = 10000; }
for (var t = 0; t < tries; t++) {
var score = 0, color = randomcolor(); //tinycolor.random();
for (var i = 0; i < colors.length; i++) {
score += tinycolor.readability(colors[i], color);
}
data[color] = (score / colors.length);
if (!topColor || data[color] > data[topColor]) {
topColor = color;
}
}
return tinycolor(topColor);
};
So the way it works is first I provide this script with the 6 most dominant colors in an image like this:
[ { r: 44, g: 65, b: 54 },
{ r: 187, g: 196, b: 182 },
{ r: 68, g: 106, b: 124 },
{ r: 126, g: 145, b: 137 },
{ r: 147, g: 176, b: 169 },
{ r: 73, g: 138, b: 176 } ]
and then it will generate 10,000 different random colors and then pick the one that has the best average contrast ratio with the 6 given colors.
The problem is that depending on which script I use to generate the random colors, I'll basically get the same results regardless of the image given.
With tinycolor2 I'll always end up with either a very dark gray (almost black) or a very light gray (almost white). And with randomcolor I'll either end up with a dark blue or a light peach color.
My script might not be the best way of going about this but does anybody have any ideas?
Thank you
Finding dominant hue.
The provided snippet show an example of how to find a dominant colour. It works by breaking the image into its Hue, saturation and luminance components.
The image reduction
To speed up the process the image is reduced to a smaller image (in this case 128 by 128 pixels). Part of the reduction process also trims some of the outside pixels from the image.
const IMAGE_WORK_SIZE = 128;
const ICOUNT = IMAGE_WORK_SIZE * IMAGE_WORK_SIZE;
if(event.type === "load"){
rImage = imageTools.createImage(IMAGE_WORK_SIZE, IMAGE_WORK_SIZE); // reducing image
c = rImage.ctx;
// This is where you can crop the image. In this example I only look at the center of the image
c.drawImage(this,-16,-16,IMAGE_WORK_SIZE + 32, IMAGE_WORK_SIZE + 32); // reduce image size
Find mean luminance
Once reduced I scan the pixels converting them to hsl values and get the mean luminance.
Note that luminance is a logarithmic scale so the mean is the square root of the sum of the squares divided by the count.
pixels = imageTools.getImageData(rImage).data;
l = 0;
for(i = 0; i < pixels.length; i += 4){
hsl = imageTools.rgb2hsl(pixels[i],pixels[i + 1],pixels[i + 2]);
l += hsl.l * hsl.l;
}
l = Math.sqrt(l/ICOUNT);
Hue histograms for luminance and saturation ranges.
The code can find the dominant colour in a range of saturation and luminance extents. In the example I only use one extent, but you can use as many as you wish. Only pixels that are inside the lum (luminance) and sat (saturation) ranges are used. I record a histogram of the hue for pixels that pass.
Example of hue ranges (one of)
hues = [{ // lum and sat have extent 0-100. high test is no inclusive hence high = 101 if you want the full range
lum : {
low :20, // low limit lum >= this.lum.low
high : 60, // high limit lum < this.lum.high
tot : 0, // sum of lum values
},
sat : { // all saturations from 0 to 100
low : 0,
high : 101,
tot : 0, // sum of sat
},
count : 0, // count of pixels that passed
histo : new Uint16Array(360), // hue histogram
}]
In the example I use the mean Luminance to automatically set the lum range.
hues[0].lum.low = l - 30;
hues[0].lum.high = l + 30;
Once the range is set I get the hue histogram for each range (one in this case)
for(i = 0; i < pixels.length; i += 4){
hsl = imageTools.rgb2hsl(pixels[i],pixels[i + 1],pixels[i + 2]);
for(j = 0; j < hues.length; j ++){
hr = hues[j]; // hue range
if(hsl.l >= hr.lum.low && hsl.l < hr.lum.high){
if(hsl.s >= hr.sat.low && hsl.s < hr.sat.high){
hr.histo[hsl.h] += 1;
hr.count += 1;
hr.lum.tot += hsl.l * hsl.l;
hr.sat.tot += hsl.s;
}
}
}
}
Weighted mean hue from hue histogram.
Then using the histogram I find the weighted mean hue for the range
// get weighted hue for image
// just to simplify code hue 0 and 1 (reds) can combine
for(j = 0; j < hues.length; j += 1){
hr = hues[j];
wHue = 0;
hueCount = 0;
hr.histo[1] += hr.histo[0];
for(i = 1; i < 360; i ++){
wHue += (i) * hr.histo[i];
hueCount += hr.histo[i];
}
h = Math.floor(wHue / hueCount);
s = Math.floor(hr.sat.tot / hr.count);
l = Math.floor(Math.sqrt(hr.lum.tot / hr.count));
hr.rgb = imageTools.hsl2rgb(h,s,l);
hr.rgba = imageTools.hex2RGBA(imageTools.rgba2Hex4(hr.rgb));
}
And that is about it. The rest is just display and stuff. The above code requires the imageTools interface (provided) that has tools for manipulating images.
The ugly complement
What you do with the colour/s found is up to you. If you want the complementary colour just convert the rgb to hsl imageTools.rgb2hsl and rotate the hue 180 deg, then convert back to rgb.
var hsl = imageTools.rgb2hsl(rgb.r, rgb.g, rgb.b);
hsl.h += 180;
var complementRgb = imageTools.rgb2hsl(hsl.h, hsl.s, hsl.l);
Personally only some colours work well with their complement. Adding to a pallet is risky, doing it via code is just crazy. Stick with colours in the image. Reduce the lum and sat range if you wish to find accented colours. Each range will have a count of the number of pixels found, use that to find the extent of pixels using the colors in the associated histogram.
Demo "Border the birds"
The demo finds the dominant hue around the mean luminance and uses that hue and mean saturation and luminance to create a border.
The demo using images from wikipedia's image of the day collection as they allow cross site access.
var images = [
// "https://upload.wikimedia.org/wikipedia/commons/f/fe/Goldcrest_1.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/2/22/Cistothorus_palustris_CT.jpg/450px-Cistothorus_palustris_CT.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/3/37/Black-necked_Stilt_%28Himantopus_mexicanus%29%2C_Corte_Madera.jpg/362px-Black-necked_Stilt_%28Himantopus_mexicanus%29%2C_Corte_Madera.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/c/cc/Daurian_redstart_at_Daisen_Park_in_Osaka%2C_January_2016.jpg/573px-Daurian_redstart_at_Daisen_Park_in_Osaka%2C_January_2016.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/Myioborus_torquatus_Santa_Elena.JPG/675px-Myioborus_torquatus_Santa_Elena.JPG",
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/ef/Great_tit_side-on.jpg/645px-Great_tit_side-on.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/55/Sarcoramphus_papa_%28K%C3%B6nigsgeier_-_King_Vulture%29_-_Weltvogelpark_Walsrode_2013-01.jpg/675px-Sarcoramphus_papa_%28K%C3%B6nigsgeier_-_King_Vulture%29_-_Weltvogelpark_Walsrode_2013-01.jpg",,
];
function loadImageAddBorder(){
if(images.length === 0){
return ; // all done
}
var imageSrc = images.shift();
imageTools.loadImage(
imageSrc,true,
function(event){
var pixels, topRGB, c, rImage, wImage, botRGB, grad, i, hsl, h, s, l, hues, hslMap, wHue, hueCount, j, hr, gradCols, border;
const IMAGE_WORK_SIZE = 128;
const ICOUNT = IMAGE_WORK_SIZE * IMAGE_WORK_SIZE;
if(event.type === "load"){
rImage = imageTools.createImage(IMAGE_WORK_SIZE, IMAGE_WORK_SIZE); // reducing image
c = rImage.ctx;
// This is where you can crop the image. In this example I only look at the center of the image
c.drawImage(this,-16,-16,IMAGE_WORK_SIZE + 32, IMAGE_WORK_SIZE + 32); // reduce image size
pixels = imageTools.getImageData(rImage).data;
h = 0;
s = 0;
l = 0;
// these are the colour ranges you wish to look at
hues = [{
lum : {
low :20,
high : 60,
tot : 0,
},
sat : { // all saturations
low : 0,
high : 101,
tot : 0,
},
count : 0,
histo : new Uint16Array(360),
}]
for(i = 0; i < pixels.length; i += 4){
hsl = imageTools.rgb2hsl(pixels[i],pixels[i + 1],pixels[i + 2]);
l += hsl.l * hsl.l;
}
l = Math.sqrt(l/ICOUNT);
hues[0].lum.low = l - 30;
hues[0].lum.high = l + 30;
for(i = 0; i < pixels.length; i += 4){
hsl = imageTools.rgb2hsl(pixels[i], pixels[i + 1], pixels[i + 2]);
for(j = 0; j < hues.length; j ++){
hr = hues[j]; // hue range
if(hsl.l >= hr.lum.low && hsl.l < hr.lum.high){
if(hsl.s >= hr.sat.low && hsl.s < hr.sat.high){
hr.histo[hsl.h] += 1;
hr.count += 1;
hr.lum.tot += hsl.l * hsl.l;
hr.sat.tot += hsl.s;
}
}
}
}
// get weighted hue for image
// just to simplify code hue 0 and 1 (reds) can combine
for(j = 0; j < hues.length; j += 1){
hr = hues[j];
wHue = 0;
hueCount = 0;
hr.histo[1] += hr.histo[0];
for(i = 1; i < 360; i ++){
wHue += (i) * hr.histo[i];
hueCount += hr.histo[i];
}
h = Math.floor(wHue / hueCount);
s = Math.floor(hr.sat.tot / hr.count);
l = Math.floor(Math.sqrt(hr.lum.tot / hr.count));
hr.rgb = imageTools.hsl2rgb(h,s,l);
hr.rgba = imageTools.hex2RGBA(imageTools.rgba2Hex4(hr.rgb));
}
gradCols = hues.map(h=>h.rgba);
if(gradCols.length === 1){
gradCols.push(gradCols[0]); // this is a quick fix if only one colour the gradient needs more than one
}
border = Math.floor(Math.min(this.width / 10,this.height / 10, 64));
wImage = imageTools.padImage(this,border,border);
wImage.ctx.fillStyle = imageTools.createGradient(
c, "linear", 0, 0, 0, wImage.height,gradCols
);
wImage.ctx.fillRect(0, 0, wImage.width, wImage.height);
wImage.ctx.fillStyle = "black";
wImage.ctx.fillRect(border - 2, border - 2, wImage.width - border * 2 + 4, wImage.height - border * 2 + 4);
wImage.ctx.drawImage(this,border,border);
wImage.style.width = (innerWidth -64) + "px";
document.body.appendChild(wImage);
setTimeout(loadImageAddBorder,1000);
}
}
)
}
setTimeout(loadImageAddBorder,0);
/** ImageTools.js begin **/
var imageTools = (function () {
// This interface is as is.
// No warenties no garenties, and
/*****************************/
/* NOT to be used comercialy */
/*****************************/
var workImg,workImg1,keep; // for internal use
keep = false;
const toHex = v => (v < 0x10 ? "0" : "") + Math.floor(v).toString(16);
var tools = {
canvas(width, height) { // create a blank image (canvas)
var c = document.createElement("canvas");
c.width = width;
c.height = height;
return c;
},
createImage (width, height) {
var i = this.canvas(width, height);
i.ctx = i.getContext("2d");
return i;
},
loadImage (url, crossSite, cb) { // cb is calback. Check first argument for status
var i = new Image();
if(crossSite){
i.setAttribute('crossOrigin', 'anonymous');
}
i.src = url;
i.addEventListener('load', cb);
i.addEventListener('error', cb);
return i;
},
image2Canvas(img) {
var i = this.canvas(img.width, img.height);
i.ctx = i.getContext("2d");
i.ctx.drawImage(img, 0, 0);
return i;
},
rgb2hsl(r,g,b){ // integers in the range 0-255
var min, max, dif, h, l, s;
h = l = s = 0;
r /= 255; // normalize channels
g /= 255;
b /= 255;
min = Math.min(r, g, b);
max = Math.max(r, g, b);
if(min === max){ // no colour so early exit
return {
h, s,
l : Math.floor(min * 100), // Note there is loss in this conversion
}
}
dif = max - min;
l = (max + min) / 2;
if (l > 0.5) { s = dif / (2 - max - min) }
else { s = dif / (max + min) }
if (max === r) {
if (g < b) { h = (g - b) / dif + 6.0 }
else { h = (g - b) / dif }
} else if(max === g) { h = (b - r) / dif + 2.0 }
else {h = (r - g) / dif + 4.0 }
h = Math.floor(h * 60);
s = Math.floor(s * 100);
l = Math.floor(l * 100);
return {h, s, l};
},
hsl2rgb (h, s, l) { // h in range integer 0-360 (cyclic) and s,l 0-100 both integers
var p, q;
const hue2Channel = (h) => {
h = h < 0.0 ? h + 1 : h > 1 ? h - 1 : h;
if (h < 1 / 6) { return p + (q - p) * 6 * h }
if (h < 1 / 2) { return q }
if (h < 2 / 3) { return p + (q - p) * (2 / 3 - h) * 6 }
return p;
}
s = Math.floor(s)/100;
l = Math.floor(l)/100;
if (s <= 0){ // no colour
return {
r : Math.floor(l * 255),
g : Math.floor(l * 255),
b : Math.floor(l * 255),
}
}
h = (((Math.floor(h) % 360) + 360) % 360) / 360; // normalize
if (l < 1 / 2) { q = l * (1 + s) }
else { q = l + s - l * s }
p = 2 * l - q;
return {
r : Math.floor(hue2Channel(h + 1 / 3) * 255),
g : Math.floor(hue2Channel(h) * 255),
b : Math.floor(hue2Channel(h - 1 / 3) * 255),
}
},
rgba2Hex4(r,g,b,a=255){
if(typeof r === "object"){
g = r.g;
b = r.b;
a = r.a !== undefined ? r.a : a;
r = r.r;
}
return `#${toHex(r)}${toHex(g)}${toHex(b)}${toHex(a)}`;
},
hex2RGBA(hex){ // Not CSS colour as can have extra 2 or 1 chars for alpha
// #FFFF & #FFFFFFFF last F and FF are the alpha range 0-F & 00-FF
if(typeof hex === "string"){
var str = "rgba(";
if(hex.length === 4 || hex.length === 5){
str += (parseInt(hex.substr(1,1),16) * 16) + ",";
str += (parseInt(hex.substr(2,1),16) * 16) + ",";
str += (parseInt(hex.substr(3,1),16) * 16) + ",";
if(hex.length === 5){
str += (parseInt(hex.substr(4,1),16) / 16);
}else{
str += "1";
}
return str + ")";
}
if(hex.length === 7 || hex.length === 9){
str += parseInt(hex.substr(1,2),16) + ",";
str += parseInt(hex.substr(3,2),16) + ",";
str += parseInt(hex.substr(5,2),16) + ",";
if(hex.length === 9){
str += (parseInt(hex.substr(7,2),16) / 255).toFixed(3);
}else{
str += "1";
}
return str + ")";
}
return "rgba(0,0,0,0)";
}
},
createGradient(ctx, type, x, y, xx, yy, colours){ // Colours MUST be array of hex colours NOT CSS colours
// See this.hex2RGBA for details of format
var i,g,c;
var len = colours.length;
if(type.toLowerCase() === "linear"){
g = ctx.createLinearGradient(x,y,xx,yy);
}else{
g = ctx.createRadialGradient(x,y,xx,x,y,yy);
}
for(i = 0; i < len; i++){
c = colours[i];
if(typeof c === "string"){
if(c[0] === "#"){
c = this.hex2RGBA(c);
}
g.addColorStop(Math.min(1,i / (len -1)),c); // need to clamp top to 1 due to floating point errors causes addColorStop to throw rangeError when number over 1
}
}
return g;
},
padImage(img,amount){
var image = this.canvas(img.width + amount * 2, img.height + amount * 2);
image.ctx = image.getContext("2d");
image.ctx.drawImage(img, amount, amount);
return image;
},
getImageData(image, w = image.width, h = image.height) { // cut down version to prevent intergration
if(image.ctx && image.ctx.imageData){
return image.ctx.imageData;
}
return (image.ctx || (this.image2Canvas(image).ctx)).getImageData(0, 0, w, h);
},
};
return tools;
})();
/** ImageTools.js end **/
Sounds like an interesting problem to have!
Each algorithm you're using to generate colors likely has a bias toward certain colors in their respective random color algorithms.
What you're likely seeing is the end result of that bias for each. Both are selecting darker and lighter colors independently.
It may make more sense to keep a hash of common colors and use that hash as opposed to using randomly generated colors.
Either way your 'fitness' check, the algorithm that checks to see which color has the best average contrast is picking lighter and darker colors for both color sets. This makes sense, lighter images should have darker backgrounds and darker images should have lighter backgrounds.
Although you don't explicitly say, I'd bet my bottom dollar you're getting dark background for lighter average images and brighter backgrounds on darker images.
Alternatively rather than using a hash of colors, you could generate multiple random color palettes and combine the result sets to average them out.
Or rather than taking the 6 most commonly occurring colors, why not take the overall color gradient and try against that?
I've put together an example where I get the most commonly occurring color and invert it to get the complementary color. This in theory at least should provide a good contrast ratio for the image as a whole.
Using the most commonly occurring color in the image seems to work quite well. as outlined in my example below. This is a similar technique that Blindman67 uses without the massive bloating of including libraries and performing un-necessary steps, I borrowed the same images that Blindman67 uses for a fair comparison of the result set.
See Get average color of image via Javascript for getting average color (getAverageRGB() function written by James).
var images = [
"https://upload.wikimedia.org/wikipedia/commons/thumb/2/22/Cistothorus_palustris_CT.jpg/450px-Cistothorus_palustris_CT.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/3/37/Black-necked_Stilt_%28Himantopus_mexicanus%29%2C_Corte_Madera.jpg/362px-Black-necked_Stilt_%28Himantopus_mexicanus%29%2C_Corte_Madera.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/c/cc/Daurian_redstart_at_Daisen_Park_in_Osaka%2C_January_2016.jpg/573px-Daurian_redstart_at_Daisen_Park_in_Osaka%2C_January_2016.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/Myioborus_torquatus_Santa_Elena.JPG/675px-Myioborus_torquatus_Santa_Elena.JPG",
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/ef/Great_tit_side-on.jpg/645px-Great_tit_side-on.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/55/Sarcoramphus_papa_%28K%C3%B6nigsgeier_-_King_Vulture%29_-_Weltvogelpark_Walsrode_2013-01.jpg/675px-Sarcoramphus_papa_%28K%C3%B6nigsgeier_-_King_Vulture%29_-_Weltvogelpark_Walsrode_2013-01.jpg",
];
// append images
for (var i = 0; i < images.length; i++) {
var img = document.createElement('img'),
div = document.createElement('div');
img.crossOrigin = "Anonymous";
img.style.border = '1px solid black';
img.style.margin = '5px';
div.appendChild(img);
document.body.appendChild(div);
(function(img, div) {
img.addEventListener('load', function() {
var avg = getAverageRGB(img);
div.style = 'background: rgb(' + avg.r + ',' + avg.g + ',' + avg.b + ')';
img.style.height = '128px';
img.style.width = '128px';
});
img.src = images[i];
}(img, div));
}
function getAverageRGB(imgEl) { // not my work, see http://jsfiddle.net/xLF38/818/
var blockSize = 5, // only visit every 5 pixels
defaultRGB = {
r: 0,
g: 0,
b: 0
}, // for non-supporting envs
canvas = document.createElement('canvas'),
context = canvas.getContext && canvas.getContext('2d'),
data, width, height,
i = -4,
length,
rgb = {
r: 0,
g: 0,
b: 0
},
count = 0;
if (!context) {
return defaultRGB;
}
height = canvas.height = imgEl.offsetHeight || imgEl.height;
width = canvas.width = imgEl.offsetWidth || imgEl.width;
context.drawImage(imgEl, 0, 0);
try {
data = context.getImageData(0, 0, width, height);
} catch (e) {
return defaultRGB;
}
length = data.data.length;
while ((i += blockSize * 4) < length) {
++count;
rgb.r += data.data[i];
rgb.g += data.data[i + 1];
rgb.b += data.data[i + 2];
}
// ~~ used to floor values
rgb.r = ~~(rgb.r / count);
rgb.g = ~~(rgb.g / count);
rgb.b = ~~(rgb.b / count);
return rgb;
}
It depends on where the text is that is overlayed on the background image. If the background has some large feature on part of it, the text will likely be placed away from that, so must contrast with that part of the image, but you may also want to pick up a certain color or complement the other colors in the image. I think practically speaking you will need to create a widget for people to easily slide/adjust the foreground color interactively. Or you will need to create a deep learning system in order to do this really effectively.
Hi I want to make a blur effect particle like this:
Can I use shadowBlur and shadowOffsetX/shadowOffsetY to do this? The actual shine will glow and fade a little bit repeatedly, so if I have to write some kind of animation how can I achieve this?
I have tried this code (jsfiddle example) but it doesn't look like the effect. So I wonder how to blur and glow the particle at the same time?
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
const ra = window.requestAnimationFrame
|| window.webkitRequestAnimationFrame
|| window.mozRequestAnimationFrame
|| window.oRequestAnimationFrame
|| window.msRequestAnimationFrame
|| function(callback) {
window.setTimeout(callback, 1000 / 60);
};
class Particle {
constructor(options) {
this.ctx = options.context;
this.x = options.x;
this.y = options.y;
this.radius = options.radius;
this.lightSize = this.radius;
this.color = options.color;
this.lightDirection = true;
}
glow() {
const lightSpeed = 0.5;
this.lightSize += this.lightDirection ? lightSpeed : -lightSpeed;
if (this.lightSize > this.radius || this.lightSize < this.radius) {
this.lightDirection = !this.lightDirection;
}
}
render() {
this.ctx.clearRect(0, 0, canvas.width, canvas.height);
this.glow();
this.ctx.globalAlpha = 0.5;
this.ctx.fillStyle = this.color;
this.ctx.beginPath();
this.ctx.arc(this.x, this.y, this.lightSize,
0, Math.PI * 2
);
this.ctx.fill();
this.ctx.globalAlpha = 0.62;
this.ctx.beginPath();
this.ctx.arc(this.x, this.y, this.radius * 0.7, 0, Math.PI * 2);
this.ctx.shadowColor = this.color;
this.ctx.shadowBlur = 6;
this.ctx.shadowOffsetX = 0;
this.ctx.shadowOffsetY = 0;
this.ctx.fill();
}
}
var particle = new Particle({
context: ctx,
x: 60,
y: 80,
radius: 12,
color: '#4d88ff'
});
function run() {
particle.render();
ra(run);
}
run();
<canvas id='canvas'></canvas>
There are several ways to do this. For a particle system my option is to pre render the blur using a blur filter. A common filter is the convolution filter. It uses a small array to determine the amount neighboring pixels contribute to each pixel of the image. You are best to look up convolution functions to understand it.
Wiki Convolution and Wiki Gaussian blur for more info.
I am not much of a fan of the standard Gaussian blur or the convolution filter used so in the demo snippet below you can find my version that I think creates a much better blur. The convolution blur filter is procedurally created and is in the imageTools object.
To use create a filter pass an object with properties size the blur amount in pixels and power is the strength. Lower powers is less spread on the blur.
// image must be loaded or created
var blurFilter = imageTools.createBlurConvolutionArray({size:17,power:1}); // size must be greater than 2 and must be odd eg 3,5,7,9...
// apply the convolution filter on the image. The returned image may be a new
//image if the input image does not have a ctx property pointing to a 2d canvas context
image = imageTools.applyConvolutionFilter(image,blurFilter);
In the demo I create a image, draw a circle on it, copy it and pad it so that there is room for the blur. Then create a blur filter and apply it to the image.
When I render the particles I first draw all the unblurred images, then draw the blurred copies with the ctx.globalCompositeOperation = "screen"; so that they have a shine. To vary the amount of shine I use the ctx.globalAlpha to vary the intensity of the rendered blurred image. To improve the FX I have drawn the blur image twice, once with oscillating scale and next at fixed scale and alpha.
The demo is simple, image tools can be found at the top. Then there is some stuff to setup the canvas and handle resize event. Then there is the code that creates the images, and apply the filters. Then starts the render adds some particles and renders everything.
Look in the function drawParticles for how I draw everything.
imageTools has all the image functions you will need. The imageTools.applyConvolutionFilter will apply any filter (sharpen, outline, and many more) you just need to create the appropriate filter. The apply uses the photon count colour model so gives a very high quality result especially for blurs type effects. (though for sharpen you may want to get in and change the squaring of the RGB values, I personally like it other do not)
The blur filter is not fast so if you apply it to larger images It would be best that you break it up in so you do not block the page execution.
A cheap way to get a blur is to copy the image to blur to a smaller version of itself, eg 1/4 then render it scaled back to normal size, the canvas will apply bilinear filtering on the image give a blur effect. Not the best quality but for most situations it is indistinguishable from the more sophisticated blur that I have presented.
UPDATE
Change the code so that the particles have a bit of a 3dFX to show that the blur can work up to larger scales. The blue particles are 32 by 32 image and the blur is 9 pixels with the blur image being 50by 50 pixels.
var imageTools = (function () {
var tools = {
canvas : function (width, height) { // create a blank image (canvas)
var c = document.createElement("canvas");
c.width = width;
c.height = height;
return c;
},
createImage : function (width, height) {
var image = this.canvas(width, height);
image.ctx = image.getContext("2d");
return image;
},
image2Canvas : function (img) {
var image = this.canvas(img.width, img.height);
image.ctx = image.getContext("2d");
image.drawImage(img, 0, 0);
return image;
},
padImage : function(img,amount){
var image = this.canvas(img.width + amount * 2, img.height + amount * 2);
image.ctx = image.getContext("2d");
image.ctx.drawImage(img, amount, amount);
return image;
},
getImageData : function (image) {
return (image.ctx || (this.image2Canvas(image).ctx)).getImageData(0, 0, image.width, image.height);
},
putImageData : function (image, imgData){
(image.ctx || (this.image2Canvas(image).ctx)).putImageData(imgData,0, 0);
return image;
},
createBlurConvolutionArray : function(options){
var i, j, d; // misc vars
var filterArray = []; // the array to create
var size = options.size === undefined ? 3: options.size; // array size
var center = Math.floor(size / 2); // center of array
// the power ? needs descriptive UI options
var power = options.power === undefined ? 1: options.power;
// dist to corner
var maxDist = Math.sqrt(center * center + center * center);
var dist = 0; // distance sum
var sum = 0; // weight sum
var centerWeight; // center calculated weight
var totalDistance; // calculated total distance from center
// first pass get the total distance
for(i = 0; i < size; i++){
for(j = 0; j < size; j++){
d = (maxDist-Math.sqrt((center-i)*(center-i)+(center-j)*(center-j)));
d = Math.pow(d,power)
dist += d;
}
}
totalDistance = dist; // total distance to all points;
// second pass get the total weight of all but center
for(i = 0; i < size; i++){
for(j = 0; j < size; j++){
d = (maxDist-Math.sqrt((center-i)*(center-i)+(center-j)*(center-j)));
d = Math.pow(d,power)
d = d/totalDistance;
sum += d;
}
}
var scale = 1/sum;
sum = 0; // used to check
for(i = 0; i < size; i++){
for(j = 0; j < size; j++){
d = (maxDist-Math.sqrt((center-i)*(center-i)+(center-j)*(center-j)));
d = Math.pow(d,power)
d = d/totalDistance;
filterArray.push(d*scale);
}
}
return filterArray;
},
applyConvolutionFilter : function(image,filter){
imageData = this.getImageData(image);
imageDataResult = this.getImageData(image);
var w = imageData.width;
var h = imageData.height;
var data = imageData.data;
var data1 = imageDataResult.data;
var side = Math.round(Math.sqrt(filter.length));
var halfSide = Math.floor(side/2);
var r,g,b,a,c;
for(var y = 0; y < h; y++){
for(var x = 0; x < w; x++){
var ind = y*4*w+x*4;
r = 0;
g = 0;
b = 0;
a = 0;
for (var cy=0; cy<side; cy++) {
for (var cx=0; cx<side; cx++) {
var scy = y + cy - halfSide;
var scx = x + cx - halfSide;
if (scy >= 0 && scy < h && scx >= 0 && scx < w) {
var srcOff = (scy*w+scx)*4;
var wt = filter[cy*side+cx];
r += data[srcOff+0] * data[srcOff+0] * wt;
g += data[srcOff+1] * data[srcOff+1] * wt;
b += data[srcOff+2] * data[srcOff+2] * wt;
a += data[srcOff+3] * data[srcOff+3] * wt;
}
}
}
data1[ind+0] = Math.sqrt(Math.max(0,r));
data1[ind+1] = Math.sqrt(Math.max(0,g));
data1[ind+2] = Math.sqrt(Math.max(0,b));
data1[ind+3] = Math.sqrt(Math.max(0,a));
}
}
return this.putImageData(image,imageDataResult);
}
};
return tools;
})();
/** SimpleFullCanvasMouse.js begin **/
const CANVAS_ELEMENT_ID = "canv";
const U = undefined;
var w, h, cw, ch; // short cut vars
var canvas, ctx;
var globalTime = 0;
var createCanvas, resizeCanvas, setGlobals;
var L = typeof log === "function" ? log : function(d){ console.log(d); }
createCanvas = function () {
var c,cs;
cs = (c = document.createElement("canvas")).style;
c.id = CANVAS_ELEMENT_ID;
cs.position = "absolute";
cs.top = cs.left = "0px";
cs.zIndex = 1000;
document.body.appendChild(c);
return c;
}
resizeCanvas = function () {
if (canvas === U) { canvas = createCanvas(); }
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
ctx = canvas.getContext("2d");
if (typeof setGlobals === "function") { setGlobals(); }
}
setGlobals = function(){
cw = (w = canvas.width) / 2; ch = (h = canvas.height) / 2;
if(particles && particles.length > 0){
particles.length = 0;
}
}
resizeCanvas(); // create and size canvas
window.addEventListener("resize",resizeCanvas); // add resize event
const IMAGE_SIZE = 32;
const IMAGE_SIZE_HALF = 16;
const GRAV = 2001;
const NUM_PARTICLES = 90;
var background = imageTools.createImage(8,8);
var grad = ctx.createLinearGradient(0,0,0,8);
grad.addColorStop(0,"#000");
grad.addColorStop(1,"#048");
background.ctx.fillStyle = grad;
background.ctx.fillRect(0,0,8,8);
var circle = imageTools.createImage(IMAGE_SIZE,IMAGE_SIZE);
circle.ctx.fillStyle = "#5BF";
circle.ctx.arc(IMAGE_SIZE_HALF, IMAGE_SIZE_HALF, IMAGE_SIZE_HALF -2,0, Math.PI * 2);
circle.ctx.fill();
var blurFilter = imageTools.createBlurConvolutionArray({size:9,power:1}); // size must be greater than 2 and must be odd eg 3,5,7,9...
var blurCircle = imageTools.padImage(circle,9);
blurCircle = imageTools.applyConvolutionFilter(blurCircle,blurFilter)
var sun = imageTools.createImage(64,64);
grad = ctx.createRadialGradient(32,32,0,32,32,32);
grad.addColorStop(0,"#FF0");
grad.addColorStop(1,"#A40");
sun.ctx.fillStyle = grad;
sun.ctx.arc(32,32,32 -2,0, Math.PI * 2);
sun.ctx.fill();
var sunBlur = imageTools.padImage(sun,17);
blurFilter = imageTools.createBlurConvolutionArray({size:17,power:1}); // size must be greater than 2 and must be odd eg 3,5,7,9...
sunBlur = imageTools.applyConvolutionFilter(sunBlur,blurFilter);
var particles = [];
var createParticle = function(x,y,dx,dy){
var dir = Math.atan2(y-ch,x-cw);
var dist = Math.sqrt(Math.pow(y-ch,2)+Math.pow(x-cw,2));
var v = Math.sqrt(GRAV / dist); // get apporox orbital speed
return {
x : x,
y : y,
dx : dx + Math.cos(dir + Math.PI/2) * v, // set orbit speed at tangent
dy : dy + Math.sin(dir + Math.PI/2) * v,
s : (Math.random() + Math.random() + Math.random())/4 + 0.5, // scale
v : (Math.random() + Math.random() + Math.random()) / 3 + 2, // glow vary rate
};
}
var depthSort = function(a,b){
return b.y - a.y;
}
var updateParticles = function(){
var i,p,f,dist,dir;
for(i = 0; i < particles.length; i ++){
p = particles[i];
dist = Math.sqrt(Math.pow(cw-p.x,2)+Math.pow(ch-p.y,2));
dir = Math.atan2(ch-p.y,cw-p.x);
f = GRAV * 1 / (dist * dist);
p.dx += Math.cos(dir) * f;
p.dy += Math.sin(dir) * f;
p.x += p.dx;
p.y += p.dy;
p.rx = ((p.x - cw ) / (p.y + h)) * h + cw;
p.ry = ((p.y - ch ) / (p.y + h)) * h * -0.051+ ch;
//p.ry = ((h-p.y) - ch) * 0.1 + ch;
p.rs = (p.s / (p.y + h)) * h
}
particles.sort(depthSort)
}
var drawParticles = function(){
var i,j,p,f,dist,dir;
// draw behind the sun
for(i = 0; i < particles.length; i ++){
p = particles[i];
if(p.y - ch < 0){
break;
}
ctx.setTransform(p.rs,0,0,p.rs,p.rx,p.ry);
ctx.drawImage(circle,-IMAGE_SIZE_HALF,-IMAGE_SIZE_HALF);
}
// draw glow for behind the sun
ctx.globalCompositeOperation = "screen";
var iw = -blurCircle.width/2;
for(j = 0; j < i; j ++){
p = particles[j];
ctx.globalAlpha = ((Math.sin(globalTime / (50 * p.v)) + 1) / 2) * 0.6 + 0.4;
var scale = (1-(Math.sin(globalTime / (50 * p.v)) + 1) / 2) * 0.6 + 0.6;
ctx.setTransform(p.rs * 1.5 * scale,0,0,p.rs * 1.5* scale,p.rx,p.ry);
ctx.drawImage(blurCircle,iw,iw);
// second pass to intensify the glow
ctx.globalAlpha = 0.7;
ctx.setTransform(p.rs * 1.1,0,0,p.rs * 1.1,p.rx,p.ry);
ctx.drawImage(blurCircle,iw,iw);
}
// draw the sun
ctx.globalCompositeOperation = "source-over";
ctx.globalAlpha = 1;
ctx.setTransform(1,0,0,1,cw,ch);
ctx.drawImage(sun,-sun.width/2,-sun.height/2);
ctx.globalAlpha = 1;
ctx.globalCompositeOperation = "screen";
ctx.setTransform(1,0,0,1,cw,ch);
ctx.drawImage(sunBlur,-sunBlur.width/2,-sunBlur.height/2);
var scale = Math.sin(globalTime / 100) *0.5 + 1;
ctx.globalAlpha = (Math.cos(globalTime / 100) + 1) * 0.2 + 0.4;;
ctx.setTransform(1 + scale,0,0,1 + scale,cw,ch);
ctx.drawImage(sunBlur,-sunBlur.width/2,-sunBlur.height/2);
ctx.globalAlpha = 1;
ctx.globalCompositeOperation = "source-over";
// draw in front the sun
for(j = i; j < particles.length; j ++){
p = particles[j];
if(p.y > -h){ // don't draw past the near view plane
ctx.setTransform(p.rs,0,0,p.rs,p.rx,p.ry);
ctx.drawImage(circle,-IMAGE_SIZE_HALF,-IMAGE_SIZE_HALF);
}
}
ctx.globalCompositeOperation = "screen";
var iw = -blurCircle.width/2;
for(j = i; j < particles.length; j ++){
p = particles[j];
if(p.y > -h){ // don't draw past the near view plane
ctx.globalAlpha = ((Math.sin(globalTime / (50 * p.v)) + 1) / 2) * 0.6 + 0.4;
var scale = (1-(Math.sin(globalTime / (50 * p.v)) + 1) / 2) * 0.6 + 0.6;
ctx.setTransform(p.rs * 1.5 * scale,0,0,p.rs * 1.5* scale,p.rx,p.ry);
ctx.drawImage(blurCircle,iw,iw);
// second pass to intensify the glow
ctx.globalAlpha = 0.7;
ctx.setTransform(p.rs * 1.1,0,0,p.rs * 1.1,p.rx,p.ry);
ctx.drawImage(blurCircle,iw,iw);
}
}
ctx.globalCompositeOperation = "source-over";
}
var addParticles = function(count){
var ww = (h-10)* 2;
var cx = cw - ww/2;
var cy = ch - ww/2;
for(var i = 0; i < count; i ++){
particles.push(createParticle(cx + Math.random() * ww,cy + Math.random() * ww, Math.random() - 0.5, Math.random() - 0.5));
}
}
function display(){ // put code in here
if(particles.length === 0){
addParticles(NUM_PARTICLES);
}
ctx.setTransform(1,0,0,1,0,0); // reset transform
ctx.globalAlpha = 1; // reset alpha
ctx.drawImage(background,0,0,w,h)
updateParticles();
drawParticles();
ctx.globalAlpha = 1;
ctx.globalCompositeOperation = "source-over";
}
function update(timer){ // Main update loop
globalTime = timer;
display(); // call demo code
requestAnimationFrame(update);
}
requestAnimationFrame(update);
/** SimpleFullCanvasMouse.js end **/
I'm looking for a way to change the color of text to either #000 or #fff depending on the main color of a background image within a div called "banner". The background images are chosen at random on each page so I need to be able to do this automatically. I came across JavaScript color contraster but I'm struggling to understand how to use it properly. I notice the link I've posted gives a solution in javascript and I've also read about a possible solution in jquery.
I'm clueless with functions so if anyone could explain clearly how I could achieve this, where I place functions and how I "call them" (if that's the right term!) to use it I'd be really grateful.
Thanks for any help.
You could do something like this. (using Colours.js and this answer)
Note, this will only work with images on the same domain and in browsers that support HTML5 canvas.
'use strict';
var getAverageRGB = function(imgEl) {
var rgb = {
b: 0,
g: 0,
r: 0
};
var canvas = document.createElement('canvas');
var context = canvas.getContext && canvas.getContext('2d');
if (Boolean(context) === false) {
return rgb;
}
var height = imgEl.naturalHeight || imgEl.offsetHeight || imgEl.height;
var width = imgEl.naturalWidth || imgEl.offsetWidth || imgEl.width;
canvas.height = height;
canvas.width = width;
context.drawImage(imgEl, 0, 0);
var data;
try {
data = context.getImageData(0, 0, width, height).data;
} catch (e) {
console.error('security error, img on diff domain');
return rgb;
}
var count = 0;
var length = data.length;
// only visit every 5 pixels
var blockSize = 5;
var step = (blockSize * 4) - 4;
for (var i = step; i < length; i += step) {
count += 1;
rgb.r += data[i];
rgb.g += data[i + 1];
rgb.b += data[i + 2];
}
rgb.r = Math.floor(rgb.r / count);
rgb.g = Math.floor(rgb.g / count);
rgb.b = Math.floor(rgb.b / count);
return rgb;
};
var rgb = getAverageRGB(document.getElementById('image'));
var avgComplement = Colors.complement(rgb.r, rgb.b, rgb.g);
var avgComplementHex = Colors.rgb2hex.apply(null, avgComplement.a);
var compliment = parseInt(avgComplementHex.slice(1), 16);
document.body.style.backgroundColor = 'rgb(' + [
rgb.r,
rgb.g,
rgb.b
].join(',') + ')';
var maxColors = 0xFFFFFF;
var midPoint = Math.floor(maxColors / 2);
document.getElementById('text').style.color = compliment > midPoint ? '#000' : '#fff';
<script src="https://cdnjs.cloudflare.com/ajax/libs/Colors.js/1.2.3/colors.min.js"></script>
<div id="text">Setting the BODY's background to the average color in the following image and this text to a complimentary colour of black or white:</div>
<img id="image" src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/4QBKRXhpZgAASUkqAAgAAAADABoBBQABAAAAMgAAABsBBQABAAAAOgAAACgBAwABAAAAAgAAAAAAAAAAcDg5gJaYAABwODmAlpgA/9sAQwAFAwQEBAMFBAQEBQUFBgcMCAcHBwcPCwsJDBEPEhIRDxERExYcFxMUGhURERghGBodHR8fHxMXIiQiHiQcHh8e/9sAQwEFBQUHBgcOCAgOHhQRFB4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4e/8AAEQgAqgDiAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIBAgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEXGBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX29/j5+v/aAAwDAQACEQMRAD8A8f0TSNMfRbFm060bdBGSTApLEqMknHJq8NH0r/oGWX/fhP8ACk0A/wDEjsOn/HtH/wCgCtEHpX6FQw9P2cfdWy6H5jjMXWjWklJ7vqVF0XScf8gqx/8AAdP8KeNF0f8A6Bdl/wB+E/wq4pqVSOOP1rb6vT/lX3HDLG4j+d/eyiNE0c/8wqx/78J/hS/2JpH/AECrH/wHT/CtJAp6Ej8KeE96f1el/KvuMnj8Qv8Al4/vZljRdH/6BNh/4Dp/hSjRdHP/ADCLD/wHT/CtQJTlWj6vR/lX3CePxH87+9mYuh6OOTpGnn/t3T/CrI0HRQoI0XTiPT7NHn+ValssII8xM+9asMdsccD24qJ0qUfsr7jejisTP/l4/vZzcegaEyZ/sXTf/AWP/CnNoGghf+QJpufa0j/wrqlsVIyoo+wMD7fSsOSi/sr7jsVTFW+N/ecqnh/Qd2Doum89/ssf+FWYvDGgN00TTD/26x/4V0JsARyuakisivTj2pOFHpFfcXCeJT1k/vZhJ4W0AAZ0HSz/ANukf+FTp4V8OdT4e0r/AMA4/wD4mt1Y2XqKeF4IxWbp0/5V9x1RrV19p/eYR8M+GAP+Rc0j/wAAYv8A4mmHw54Y/wChc0j/AMAov/ia3vLDdRThbKe1T7On/KvuH9Yr/wAz+858eGPDZH/Iv6T/AOAcX/xNRt4Y8O7uNB0n/wAA4/8A4mukNqaha2YNnP4U/ZU+y+4HWr2+J/eYJ8L+HQP+QBpX/gHH/wDE1G/hnw7jjQdL/wDASP8AwrofLPekZARwDT9lT/lX3EuvWt8T+85v/hGvD2cHQtL56f6JH/hTX8MaARxoemfhax/4V0LQOeik03yZCcBTVclP+VfcZOtW/mf3s5S88M6KgLLo+nj/ALd0/wAKpr4c0l+V0qxH/bBf8K7Z7RmGGj3fUVEbErgCMCtYqlb4V9xzSeJcrqbt6s5JfC+k5/5BVh/4Dp/hTj4Y0ntpOn/+Ayf4V1RtnTotQyRyDOAPxpShTa+FfcilKunrOX3s+cPESJb+INRgjhiVI7uVFCpwAHIAFFL4u3f8JXq//X9P/wCjGor5x7n3sY6I9Z8OxqdA044JP2WLp/uCtFYB6/gRVbwxFG2g6ZuYrm1i7f7Aret9O342XKYP95SK+roSSpRv2R+a4zmliJqPd/mZv2c9cZHsacsXqCK34dFmZdyxRXC9f3b4P61ZTRYiMslzEe+WHFW60EYLC1pdDnI4kJHzYOasCBl5BR/901uS6DtXKSvj/bi3fqKgNpYxnbLLET9HQ0KrGWxM8NUh8a/EoxTBeGi/WtKwaCRsLGpPcbajSHSySGe7j/2l2uP6VoafHYxMDDqMLc8CRShP86mo1bYrDN89pNWL0NtCVAMAz9Kmj02BjxHtFXfNkMYkS1WZcdYnDZ/Knx3sH3WjkRvT0rz3Kdj36caJDHpyL91iaX7Oqdc/lV5LmOVP3M8ef9oVHFdESlZWgK5+UhwCfzqE5Gz9lHYqhIScZFSraI3pWlHbWt6AGt292Q9KtR6HCMBLiUY6A81lKqlubwpOT01MU6eMdKkg0R7gnyyBj1rWks5YlJEyYHr2qG3v2tZQ48tyvXb3pKcmvdG6cIv39DLutAu4RuCBl9VOaqGzuFGPLYY9q7GDxPphX97MU9QYyD/9eppNa0KSEyedGSeAMHNZ+2qx+KJfsaEvhkcG3mIMyAgfSo3YMMjIz3xXZz32iSDDOgU+qZP5Vm3cmhuhjU4J43BTW0aze8TGpQS2mjlJy2PlG76Gqs0l3Eu5rd9vrXR3lnZGPMNz/Ks+eS5j+RCsijp8tdUKifQ86tRkr629NTHTVEU7Ssin3qxHqUA5EhGfWprmRZkxc2yEj1XFZs1va5ysIHsGPFbqMJdDilKvT1Uk19xq29+jNkup+p61eWSObPyp9BXK+QFOYgQfc5qzbSXcYG3kD0zUSorozajjJrSaOkfT4miEn2hWP9wrgis67slbPlqfwFVhqtxEMFGyfUnFMbV5nJAk8sd6z5Jo7FiKc3Y+ZPGcRXxhrQ9NQnH/AJEaik8ZTFvF+snzDzfzn/yI1FfPvc+zjzWR7P4SBPh7TdoB/wBEi4I/2BW9aF/M2lUX14xVDwLAz+HNKxH1tIev/XNa7zSfDdzdOgmQordGIGAPXNfRxxEIUo37I+DqYCpVxE3Du/zMyztS2Jrd2L+qsR/OtmMaoItzLJIg5+VQT+WKv29ha2tyYYr4sU+9sjB/Wu30ldKSziD3k0kjY3boxtX1rgr4yMNUrnsYXLpPRuzPM01u0DkSoyt33w4x+VTrqGjyozSxW0gPUMSD+tdX4ztfDltdxsJWkMmdzxL8uB354PNYhfw9DCFe3jkBGRJswxrSFenOKai0c1TC4iMnGUotL+uhWtovCd4Mf2VKhPVon4H68U+bQvCsnHmXlse2GyD+dRBtOZz9lVsnt2H1qWOCCTgIVcjuMjNXz2ekmjJYZTjaUE36FKbwxYAq1nq7c9C0fT8Qas2XhjXPMT7Je7g//TQj8SD2q7BYxopbcGJPCrW/pdzBY2ojidQ3fJOazq4qUY6O/qbUMqpzleUeX0bKFh4P1S6eSOa4t3KnCMVwX9T7VT1XwrLZQt5zo0gPyrxj/wCtW8uq3CIyvICp7qKyrm5SRXj3Fw3ZxyK5KeIquV29D0pYGgocqWvmYNvHKrB1mKFem1q6LS9TaA4mYyL79ayZbB1BfyJVU9GVSQKrNHKjf6x1/wB5DW85xqLUxo0JUHdHXx6tbS7luEQL0XIzn61WuxYXCkRogftgYH4Vzi+ag3GQEd+Kf9slXjeRnuBWShZ6M3lJNe8jYaPRY9iG0lkkJ+ZmlwoqjqcVnJctJbuFU/whDhAOnPeqE8wdcqzHPUYqsZATklsDjit4KS1uctRQasolkQE/PvDH2qSNLtBuiZCvowFR2xm4MSSNv+6ccfWr7xXcUfmTW8iKOpI4pyq20Ijh01dEmnQSXMxku/JRc9FQZNWb2yjX97Gij2rNtr9vMIjRnbvtFXPtJlUo0Mgz1G081jJz5rnXTpwUbGbFZRyXDzXG1sn5UJ4FRTeGUnl3C4ARufpWskEZJZ4nOehXtU0Vs2wsrlD6GtPbzWqZg8JSmrSjcwJPDBjBWKVSB0BqleaXLbABsN9D0rpmguSf9cAOnAzVae1jx88jOauNefVmUsHTS92NjkZ4JO6A/jVSWGQDlABXTXlvEDhU/EmqE0ixn5YYm9dy5roVVtHK8MlI+U/GH/I26x/1/wA//oxqKk8avnxlrZ8uPnULj/0Y1FeA9z7KOyPoPwRqEUPgvRoVjV3FlBlhGcjEa11mn+IriNs+W0qnHDA4ArhfB5b/AIRfSij4xZQ5wP8ApmtdHZ313CMR3qY/uyR7hXvwoxlSV10R8XVxk4VpWfV/mdXFqdvcFZZNNkEmMZTo1X7fVZbcsq2dwsLDBV1LZ/GuatNXuVOWhsJh7oV/lWjDr06HK6fY/hIwrmnhl2O2ljXu3+At7El44d7S4iC8YRiePYGj7JZPGI3t9RO3oduKtweJLxetlbYP/TVqmh8S3iykm0hKHoqyHI/GpcZrRIr2tNu7f4GUNOVWL2z3cZ7ZSrFvZ67vV4ZBIpOMPHj/AD9a1INXu5JQyQzAE/PtwSR6A9qsXF04AJgvhETlt+D+o9qlzl1NIxi9U/uILfSvE6uWOmJJn+6cA1bW28SIAW0AEZxU9lrNnGqhoroupypLY47Zq5daxNeQgQOE/u78kj17iuWTm3rFfiddPkS0k/wKP2TxS8Zkj0iVMepQ/pmrFlJq3lg3ltA2DgrJFj9RSWt9PCGM10cEYGwkf1pzancsSsMkjEkHczAj8jWbUnpZfibqUVrd/gbelQ6tdSGOCwtoyvVC5GB2NacWi67KrO0FmuD91iMn8xWPpt1deQ8q6jKk4bJYoAMelXIdU1i4QlLwvEvD74uM/UGuGpCpf3bHXTnBrW5Nc6ZcLCy3NrZsOpygx+dZSaJY3MjD7NbgjunSrP2rcH3zoGGSfvBSKikurQELFLFI6n5uCMGqgqiQSdN7j10yztk8tI7dl6f6sH681ImkaZMu2O0t3PcDgipIbhZCqJbq4x8x80KKuWEnlxuCLJQvKmRgSx9Biic6iW+oRhT6LQrW9nZwt5KssW3jbjP4VcuLXTmi2GRZUx8wZOM1havLby3TM1t5chbJkiZgGP4darQGYuI1kkyegINTySl71x88VpY2m0qxjQtDDAoPLYTGaotFpxkYG2LbB1VuvtT7N7Asy3GoKjA8jkmrivZsuyK7giz3KFgT9aXNJPVsqyeyRjSCw3Ex2z7R2BJOaq3DWy4IjKg/3jzXTfakt0PmX1iQOnlqf5Vh3+pWSZZniZ8/xJxW9Obk7JGNSCirtmdLd6ZH96dG9Qp6VRv9SsSAFCRKRjOOfrWj/b+mtGY54bN19MLgH8qTUvEdjd24t/s1h5fvjgflXTBTT2ZzSlBrSSOQvrm2ckCcAdsJk1lTurALEGY/7teh6ZP4Ygg3TSxmU/3MKB+FSHXdEt9yrMsi/wAKiIcfU45ro+staKDZyvDRlq5o+CPHIYeNtdBHP9pXH/oxqKn+J0sdx8SfFE67NsmsXbjj1mc0V4rqSufQLY9U8OX1/FoemxxkMPssRUEDpsFaaatqifNj/wAhisPw/dXKeHtPVkdo/ssQBCjpsFWWu1TO5JEJ56YzX2WHn7kb9kfnmNpuNaXKurN2DxHfIo8xIjjqSuD+lXIvErfxxAt6A/41yyzwuNreeccjbyR+FT2zwLjfBdSqOuY+vt61rKUOxzxp1JK6Z1cPiYEgGzkbnHBBq4niu1QgSQOpz0IH+NcldT2TRKbcTRkDBXyiB+eM1TDo2cOVz6g1CVOa2HN1qTtc9EXxfp8QX5ZH9kI4qxH47sQMJY3ZHf8AeCvNFgV3H79RnpngVajsGLALLERnqJBUypUTSGJr/Zf4HoY8c2iZY2dyB26HH61OPiHZ7VHky+mSvb3rkLWzjFrIk26bA2r5RCH8SAc/Wq08SQopgsl3AE72nbfj6YGKwUaUnax1yq4iEOZSO9Xx7ZkfPGzcYb5B/hVwfEjRhDsGkncOd6xHcfzNeXyazeCPy40tomGPmAOR+dV4dVvo5t7LFLySQ5OD+RpvCUpL4fxIWYVYvSf4Hra/EvTIYmEMEg38Nkf/AF6b/wAJ4JD5sdrqC55ysZII9etebWviAxE+Zpdu2/hgh4b8Dnmp01vS3Rku9Jfj7gRVxk9c57VnLBwjtD8TeOPqy+2vuO+bx3prORMXjdf70G0/pUsHjDQ5Pme7CsTyTuUivNZbuykDNHbRorfLtZwBilt7L7XKEhhtiPVbgAD6mqWGo21TRj9fxCdk036M9Ph8RaXLIGh1OHHYGTH860LfVoiQ4vFfHTbIuR+NeQ6pYPbOkbWxDMu7Ky7ww/Liq8Wm30214ocRucK7fKv4mj6rRkrp6FLM8RCXK43fzR7j/bLMAd7SAZ4eQMtImsiMhkgBOOhbivGHtNT08BmEiM+Qjwzbs47cGo4NUvhKUbUL2N+25/6Gs/qFJrSRt/a1ZO0oanuUeuRbwz2SFvVTg/rUEeqw7901n5p9C5x+mK8plvb6CNSL7UZCVzvV0Kn6DFV4/EWoIQXmupc8EMoA/MYqY4CL2ZpLN5xa5kext4jgTj+z7ZEH8LKTn8cjiqc2u6YWYrptsXbqSxJ/DPSvK31m5lYGSxkkU85WRs/rVrz4Mbh56/XII/SqWXwXUzlm9SWyR6GdcslUB9JsZMZOXjG4/jisq9vNOuJmddJsY938KIVx+Vca90jZ23Fznv8ANVeW9OzYLy5x7rmtoYKKd0znq5rJqzS/A6ueazDBo9PhiH+xI3581H/aFtEOYWxnqpzXHSXXpdSk+6kVA11J/wA/L/rW31VW3OZZo+b4Twrx0YpfG2uyrGdr6lcMPoZWopvidmbxLqjb+t5Mf/HzRXzbhqffRk7I9A0Qr/Y9l0z9nj7f7ArQVx6n8aydDP8AxJ7L/r3T/wBAFXgxBr6ig70o+iPiMXH99L1ZejlA7kfSrEVyycpI6/RiKzQx9RUilvWt9GcMotGl9smwR50pB7bzT476dDlZX655wf51nA/7WaXP1pWXYXvd2aS3sobcJOfXaP8AClF7MCT5gyeD8q/4Vmgj+9TunVgKi0eyFae12ag1K42qvmnC4wNo/wAKe+q3bghp+CeflA/pVCOOVvuozfRSacY5UOHjZSOuUIxR+67IGqy6v8SxLcvNkSYfPqopEI27di49MVGgJOMdOvFTxwyllVY2Zm+6Ap5pqcF2MHGpfqPik8tgVjjz/u1fg1S5jjCoqgDp8o/qKgt7SVpGj8pi6/eXByv1FTCAdMj3qZVKcmVCFeCurofcavezJ5c0ztHjG0ouP5VHbanc2rbraVoTnOVRf6ike2Hduc1XltwCcOaajTatYmU6q15n95dl1zUpFZJb2RlbqCq8/pSwa1eQj93NgYA+6Mfl0rL2MD0FSxxxjmRhj2NN0qdtkKOIqXvzP7zUXXLjzNxkwx6lUA/lT11JHcu6Rsx6lkyarafHpBlxdzzqhGMpjI/PrWobDRryNn0+8gDDpE0ojYjp0bqe/BrjrVqdF2cXb00PVw9KtXjdST8m9RkeoRggjyxj04H5VML+PHIj/Oo4/Dt3KR+6WFcfNIW3IeM59qL3w7LawI7XdqXdQQiuCfp9fwrGGMw0pJKWp0PC4uKu4aIbPfxKPvMv0rOmvYWHDv8AlVO+jEL7WfnPcmqjt/tCu+HJa6Z5NWdXm5Wi688eciaQfhURnUniZvxFUy5HOR0znNNZmHcVskczi2XHljP/AC2yfdaiaRf74P4VVZz6UxpPpSb0KjTvI8k8Qt/xP9R6f8fUv/oZoqPxAf8Aifah/wBfUv8A6EaK+Vb1P0uC91Hc6Hk6RZ8f8sE/9BFaEaM3AUsfQCrnhTT4bnQNMNuyzSG0h3ID82SgJxn0NXk8/TnmSRXthwxQjBOOtd0c0UaaUY6pHjVsslOq25WTY2y0WZ4vOuUmijzgAINx/MjFaMelaZBsMrzTsTyv2iOPPt3qpMftYEgLPyGLc/d7/Wke5lgLGOSdQOqqBwPr1rgq5hiJ7St6HTHLqMI6xv5s0LiwsAwzafZlI4BnLZ/XioLKyjR/tEN3C4B5VogcD0G7rWdHexKcqznJ4JJz/KrllMs9+EcmNWUBmkJ+X6DuTXPLE11H3pMPZYdyVoq5o3VzpspJMtoCRjEcKj+lT2UNgDiynjeTPDDlgfbjpWZdxRrK6xx28rIxCsFxkUgupLYFmaNTt+XCfoKwdepy6SZvaFN3lBW72/U15IZwSz6hcRgn+JeCfXgdPbio7iG6cvK99BcZHJkUrkemarW11PEEuXusiSMHkkH6Uy+1AkhZ5ZllU5w+GUn0zis41al7Dq06TpuW1/Msx2GqXHl5iXyWJKkyAYPvjn8+tQvY6yhaPyWDDHzBxz9D/hUum6zAI/tASSYgZZWyQPp61eg8T28MY/0YneQepyO+Rn8utYPEYqEtk0cP1Ok0nOVrlaxg1vdv8qSR88lsZ+oNXGh1JiYnsiFHRhIp/TNWk8WxmIEzXcO7jARSB+XWqV54hgEZMd/dykqcDaUA9M1n9axF7qy+9Gs6GHpw0qXXnYeNKv3jYtFEjDgASbc/UZ61HcaQY9yCaF26s28ZX2xnmoTci704yz20sjooO+Iln/3vUfrUNnGk+1JradQx4Z5Mb+/Qe1d9HNMTDXn09Dmll2FqyVldssx6IrqNl2jM3RFGT/OrR8H6iASXtVAx96YZ/Q1qWunppkEtysiefx+5CHAHO7BxnPbFUrTWbaZbY+eFyMt/eX2OO/tW888xF7K1vQ6VkOCg1z6P1GR+GNkFw0wlYoud6ttVBnBOOScHtUljbPYMy213ZwYwC8tuHLe+49j2x3rpZtWtLvSruwjEcZSJW3J0J7j1HHNcksiJLK8LmSEAlyefk4Hf3rkljq2IT9pJ27Hp0cvwlFr2cVfv1NOO2ub2VfImW7ZRu2Rs8TsvqpYlc9sVFqCW9vqNul82pWsuD5ZmZXGO5U9+ao2jOtxF5Uj7AGMRXnnrn3rd06aTUbX7JPDdK2xslo1beOeV+uPrjpXLJKL0On2cWrLd/MprcxpfMBbRyxhciQz7CwHquCAT7Vl6xcCa43xaa6wKw86JdrhvQj+IH8D9K1Nat4rGa1eO6WRrmEOoK4xjgjHcdqzr+UveiN4wLiNs4I2ZwBgqTwe4rWlUlFqzf3swrUKduV6X8jM1B45fLuIYoAFGDmMDB9Dgc/jWbILZtzNJGrhvmEPGPcDofpXQzRT3bN9sADGIFhtBYZJC571WaB55FiEZAxwuwcH0rupYurT2ZxVMsjU0aX3GY2kTzpvsZo7wHoqkK5+ik8/TrVRtPvBCZTbPhSQwH3lI65XqK2HtcLlLa3yzd4wVPuO4/A1prZ3TWhkFuLhmTBK53lc9M+o7E89q7IZrUi/e1OV5DTv29GfM/iB8a9qA/wCnqX/0M0UzxjHMvi7WVKPkX84/1eP+WjUV5jxOp76pJI9B0O5ni0ayVZCUWFDsbkH5R+OK6W28SzhI/PeR5gcF2wy7fTB61yGkt/xKbT/rgn/oIq3vPtX1EMPSqU43XQ+VqYmrSqy5X1Z3Nl4msJZpZbq0iExGQ8Z8qMD0AP8Ak0o1PR7wxKq3LzAH93EgYIOvH0OPauGD1LFLIjb4pZI3H8SMQfzFYSyui9m0Wszq7SSsd3pOnaFfSFDJdQTqP+WjD5fr6fjVk+GL54Ens5Xntw+VMTbwpHf+leem4kLM7SSMz/fJc5b6+taEHiLWYLcQQalcJGDuwD3+vf6Vx1spqP4JmkcbRkvfi16HaDRblyC1nqLkFmZQmT75OOntS2/h+WfEUGmXrADJ80N9ew9q5WPxp4nUof7ZuW2NuG7HX39RVxviH4ue3EB1qZVDZBAAI56Z9K43k+Jf2kdH1/DPdP7kdI+g30MQURPHIqbVdoGyB25rPi8Nap5z+dHMyMOTDASG79SODVSH4k+Kkt5IW1BpWd96ytncjdiO3HpVSPxp4m3szazcsW67jweMDIHpVQyfFL7SJqYzBycW76GrNpkOnlbi4s5YA53IZpvL3EdcA1Pplot0+RYJdwkj5UkV3Q/TI61y+saxd6v5RvmSQxLhTt5P1p9lrVxbW6W6ooiTnCDG5vU/rV1cmqyim5amH12hKpZaR9EdzPY2sK7H0sICf4vlz/h+FYt0lm115UNteO4GPLUBlVj0Ofb0p2meMLZI1ivIpJ7dWJ8iTJGSMblPY+1d54Qi0zUtEM9lYXlpAm4K0qjDt/EQepOPWvGqYKph9amx6sKVDFPlg0/lqcdpCppl4kkthdAhMCaRGAUnqQMdPrU8GmatrHiCOG00QyFtzKzS+UCFHJJ6Ad8da9F02WO0u47i0iiLwtjbdAsrnHU5+8OxqzqEr3vnXkdnBbTEBjHFlYy+ACw9D+nNcqmot26nZHK4xjyqWl77IzrPwudMkCXevG3mchvKKGTHH3dzdK5fxV/wjOk38sureVNdAAyW6XoWU+hKKMZx26074o2niKw0KfUP7WY2ckirPCwwyhuMbvQGvIZd7szMSxbqSck/jXuZbl0MRHncjzcxxqws+RR+873UvG/hciJLfw2ZBEMLJ5ro34461TtfHejW24xeG4UYtncC2f1NcM0ZPfFJ5ZX3r1llNJaWPL/tere6S+49Ei+IeiK7uvhoRlnVjscgEg5zjoOfSrun/FPSbPYbXwt5bpu2hpyVBPXGeleXBPYCl20PKaEt0ylnVZbW+49Suvirot3LFNe+FfNljkMqETZ2vjBPTp7dKp3HxJ0SVEVvC6SERhDukPIHQe1eclMnrml2gdqFk9HswedV32+476b4h6BKJSfDTQF12syzsSw/pRF8RNJQu8XhvDNGE3mUsfqR6+9efsinqKAi1aymkujB5zW8vuO7fx9pW1Vg8LxxFZCykyliF/u/TrU0PxAjjtZVtdMEEzDakhYttUnOAOmfc158Fp4JAwDVrLKXVC/tevsmvuPOfEks1z4i1K4aWQtLdyuSzc5Lk80VHrJ/4m95/wBfD/8AoRorw3Tjc+iUm0dhpBH9lWoP/PFP/QRVrj1P51S0k/8AEqtf+uCf+girQNfSUH+7j6I+YxC/ey9WSAehP504fU1GGxTg9bXOZofmnDNRg0u6nchoeM5p+KjDUu407ktEy49TTht65JqEMKXfmnzEtXJRMFOCM0ry/KAeAM4Pfn1NQ8elKDjpU3CyJzdOIVjyAqnIwoDZ+vWuh0Px3q+iQlbd3IzuJSQrn3I5GffFcv8Auz2prhccVz18NTqxtNXOqhiZ0ZXg2j0C1+LmrC4L3FvLIPaddwPrkoaRvi74gKTR+UxV87SJyCPQcDGK89CKD0xTq4FlFC97He85xFrJnfQfFXxA8C2U0SXECsCVkkJBxjjpyDjvXI312bm7nuFs4oBKSRHGTtTJzxVNDin7hiuzD4SnQ+BHDisbUxHx6iEv1wB9aMtn7wH40hakzXYcY9WbuVP4U7dUO6lDUJisSlj60m6oy1JmnzAokhIxTc+9MzSE+9HMPlJN1IWqMmm7sUSkaQj7x5/q5/4mt5/13f8A9CNFN1Y/8TS7/wCu7/8AoRor5t7n2kVojrdNP/Ettv8Arkv/AKCKsgnFUtOP/Eut/wDrkn/oIq0G+WvWoS9xeh85XX7x+pKCaUH3qLdQDWykc9iYMaduPrUINLup3JcSYN607dVfdTt1PmJcSYNShqgDUu40+YXKThjnrS7qg3UobFFxOJYLYFNLGow1SWsdzd3cNlY2l1eXU7FYoLWB5pXIBY4RAScAE9OgNTOairydkOFNydkrsUE56UZ4xmktoL+50z+1LfStUm0/az/bEsJmgCqwRmMgXaArMqk5wCQDTWS4NjJfpZXz2Uc4tnuo7SR4RMV3CLzAu3eQQQucnPSsViaVr8y+9GzwlZOzg/uZJuo31Y1DR9f0/UrPTb/w34gs7++JFnaz6TPHNckYyI0ZAz4yM7QcZpq6ZrLa1/Ya6BrTavkg6eumzG6GFDHMOzeBtIOcdDmmsVReqmvvQng6y3g/uZBu96N1XbXQfEt3eXtlaeFPElxc2DKt5DFo9y72xYblEiiPKEjkBsZHNRJo+vSaMNaTw34gbSzE0wvRpNz9nMagln8zZt2gAknOBij65Q/nX3opYGv/ACP7mVgaXcaSC31GdkWDSNXmaSzN+gj0+di1qOs4wnMX+3933rRtPDXiq9sINQsvB/im5s7hVeC4h0S6eOVWxtKsI8MDkYI65pfW6P8AMvvQvqVf+R/czP3Uhc1Zu9K1yz1WHSLzw9rttqU4UwWU2mTpcTBiwBSMoGYEq3IB6GntoPiX/T/+KU8S/wDEu/4/v+JNdf6L8gf95+7+T5SG+bHBB6U3i6KV3Nfeio4Ku3ZQf3Mpbj60hY460k0N3DcQ2s2nX8NxP5Xk28lpKksvmY8sohXc27I24Bzniia01KJL15dH1eNbGZbe9ZtPmAtpWbascnyfI5PAVsEnoKbxVJbyX3r0BYSq/sv7gLUwv71ZGj6+bG+v/wDhG/EAstPkkjvbk6VcCK2eP/WLI+zCFf4gxGO+Kig03VrnRG1630TWJtIRirajHp8zWoIbaczBdn3uOvWo+t0X9tfei1g6yesH9zPPNV/5Cl3/ANdn/wDQjRSamR/aV1z/AMtn/wDQjRXgvc+ojsdVp5/0G3H/AEyX/wBBFWQfeuMt7m5WJFW4lAC8AOalW7us/wDHzN/32a7qWIailY8urhFKTdzr80oNcd9su/8An6n/AO/hpn2y8/5+5/8Av4a1+svsZfUV3O23e9APvXCfb77/AJ/Lj/v63+NH2++/5/Lj/v63+NX9YfYf9nr+b8DvQfek4rhPt17/AM/lx/39P+NO+333/P5cf9/W/wAaFiX2F/Z6/m/A7vd70ZrhPt99/wA/lx/39b/Gj7fff8/lx/39b/Gj60+wv7P/AL34Hd5p273rgvt99/z+XH/f1v8AGj7fff8AP5cf9/W/xpfWpdg/s/8Avfgd7u96s6Rql7ous6fremEm9026juoAG27mRgdpPowyp9mNecC/vs/8ftz/AN/W/wAaU39//wA/tz/39b/GnOtzxs0VDA8krqX4H0brPxJ8L6hqHiHS7LRde0fwteaG2m6bFDbwTzQTSXAuZpXjM6LhnLAAOThV6dBBp/j/AMI6d8OD8P10LxLdWBikuXvz9liaW/8AtImjlNuJGwAERN3nkhcja3WvntNQvw3F9c/9/W/xp0mpai5bff3TfWZj/WvF+r0lor/f/wAA9J1qjfT7vRdz6A8b/EHw/rOpXP2K38TPY6j4rh8QXX2y2twbFYkCiO3jFyQ7uSdzlo+FUYNZ2leN9I0z4y6142XStSvNL1B9SdLWSGKOcm6jbCyBZsbQzbSVfO0ZAB4Hhn2++3t/ptz/AN/W/wAaVr++/wCf25/7+t/jVwwtKEXFX2tv6eXkE61WTWq0s9v+D5n0T4a+L/2ZC3iTw2b+SO90t7GOwYxx2VvaRSIrRtLMzvMpYMPN8xXOQ2BjFWw8f6PpJ8KPajxLqn9m6vqU+rrd2Ntbi+tL8gTLhLlhvCjIUgKWPVdoz8/Nf33/AD+3P/f1v8acL++/5/bn/v63+NEsJR53o+nX/geQ4160IqzX3f8AB8z6S8MfF7QtB8Wa1rcPhPWpIrl7HTNPt1uLeP7Lo9su3yix37nkIBaMAA/89B1rl/BPjHRPDMei2UGj6wbLSfG765F+7gDtZeSkSJjzced8uSM7f9qvFBf33/P5cf8Af1v8aFv77P8Ax+XH/f1v8aSw1K/Xp1/4ASrVGnt93y7nqWqajptz4+t9bMlzJp63cFxKYfDtjYTIElaQqsNvP5cjfdHmO6scnPQZ73UviT4UvIPEcN7pesarZ315f3+nW91pVvBcWM9wWb91eRXYkhUsUDjZJuCdgxUfOBv77Y/+mXHb/lq3+NKL6+2n/TLj/v6f8acsLSklF32fX08hRr1YtyTW66eXqfQP/CdeDx480HxvJYeJbm60jSre3TTDZWscTXUNsYo5PtH2hyFDkMP3JIwDg4wdtfjToNvd6heweFdYuG1+bT312zvHt3jnWKGSGfEqupZyDG6tsT51yQtfMq3t55pH2ufH/XQ0i3t5hv8AS5/+/hpvDU2+v3/PsCqzj227fLufSFn8V/D0OojWfsHiiC7svFWpa9aW8EduqXaXK4jgmk8/MYPAfCSDGQM1z91440S/+GI8P6npNxeaxDaLb6fcLpsNq1j+8DNH9qiuN01sMyFYng5JG4kgGvD/ALbebW/0uf8A7+GkN7ebj/pc/T/noazWEpLv06+i7F/WKt3qvu+ffzK+qE/2ndf9dn/9CNFNkkfzG+dup70VnzM3Uj//2Q==" />