I'm trying to create a thumbnail image on the client side using javascript and a canvas element, but when I shrink the image down, it looks terrible. It looks as if it was downsized in photoshop with the resampling set to 'Nearest Neighbor' instead of Bicubic. I know its possible to get this to look right, because this site can do it just fine using a canvas as well. I've tried using the same code they do as shown in the "[Source]" link, but it still looks terrible. Is there something I'm missing, some setting that needs to be set or something?
EDIT:
I'm trying to resize a jpg. I have tried resizing the same jpg on the linked site and in photoshop, and it looks fine when downsized.
Here is the relevant code:
reader.onloadend = function(e)
{
var img = new Image();
var ctx = canvas.getContext("2d");
var canvasCopy = document.createElement("canvas");
var copyContext = canvasCopy.getContext("2d");
img.onload = function()
{
var ratio = 1;
if(img.width > maxWidth)
ratio = maxWidth / img.width;
else if(img.height > maxHeight)
ratio = maxHeight / img.height;
canvasCopy.width = img.width;
canvasCopy.height = img.height;
copyContext.drawImage(img, 0, 0);
canvas.width = img.width * ratio;
canvas.height = img.height * ratio;
ctx.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvas.width, canvas.height);
};
img.src = reader.result;
}
EDIT2:
Seems I was mistaken, the linked website wasn't doing any better of a job of downsizing the image. I tried the other methods suggested and none of them look any better. This is what the different methods resulted in:
Photoshop:
Canvas:
Image with image-rendering: optimizeQuality set and scaled with width/height:
Image with image-rendering: optimizeQuality set and scaled with -moz-transform:
Canvas resize on pixastic:
I guess this means firefox isn't using bicubic sampling like its supposed to. I'll just have to wait until they actually add it.
EDIT3:
Original Image
So what do you do if all the browsers (actually, Chrome 5 gave me quite good one) won't give you good enough resampling quality? You implement them yourself then! Oh come on, we're entering the new age of Web 3.0, HTML5 compliant browsers, super optimized JIT javascript compilers, multi-core(†) machines, with tons of memory, what are you afraid of? Hey, there's the word java in javascript, so that should guarantee the performance, right? Behold, the thumbnail generating code:
// returns a function that calculates lanczos weight
function lanczosCreate(lobes) {
return function(x) {
if (x > lobes)
return 0;
x *= Math.PI;
if (Math.abs(x) < 1e-16)
return 1;
var xx = x / lobes;
return Math.sin(x) * Math.sin(xx) / x / xx;
};
}
// elem: canvas element, img: image element, sx: scaled width, lobes: kernel radius
function thumbnailer(elem, img, sx, lobes) {
this.canvas = elem;
elem.width = img.width;
elem.height = img.height;
elem.style.display = "none";
this.ctx = elem.getContext("2d");
this.ctx.drawImage(img, 0, 0);
this.img = img;
this.src = this.ctx.getImageData(0, 0, img.width, img.height);
this.dest = {
width : sx,
height : Math.round(img.height * sx / img.width),
};
this.dest.data = new Array(this.dest.width * this.dest.height * 3);
this.lanczos = lanczosCreate(lobes);
this.ratio = img.width / sx;
this.rcp_ratio = 2 / this.ratio;
this.range2 = Math.ceil(this.ratio * lobes / 2);
this.cacheLanc = {};
this.center = {};
this.icenter = {};
setTimeout(this.process1, 0, this, 0);
}
thumbnailer.prototype.process1 = function(self, u) {
self.center.x = (u + 0.5) * self.ratio;
self.icenter.x = Math.floor(self.center.x);
for (var v = 0; v < self.dest.height; v++) {
self.center.y = (v + 0.5) * self.ratio;
self.icenter.y = Math.floor(self.center.y);
var a, r, g, b;
a = r = g = b = 0;
for (var i = self.icenter.x - self.range2; i <= self.icenter.x + self.range2; i++) {
if (i < 0 || i >= self.src.width)
continue;
var f_x = Math.floor(1000 * Math.abs(i - self.center.x));
if (!self.cacheLanc[f_x])
self.cacheLanc[f_x] = {};
for (var j = self.icenter.y - self.range2; j <= self.icenter.y + self.range2; j++) {
if (j < 0 || j >= self.src.height)
continue;
var f_y = Math.floor(1000 * Math.abs(j - self.center.y));
if (self.cacheLanc[f_x][f_y] == undefined)
self.cacheLanc[f_x][f_y] = self.lanczos(Math.sqrt(Math.pow(f_x * self.rcp_ratio, 2)
+ Math.pow(f_y * self.rcp_ratio, 2)) / 1000);
weight = self.cacheLanc[f_x][f_y];
if (weight > 0) {
var idx = (j * self.src.width + i) * 4;
a += weight;
r += weight * self.src.data[idx];
g += weight * self.src.data[idx + 1];
b += weight * self.src.data[idx + 2];
}
}
}
var idx = (v * self.dest.width + u) * 3;
self.dest.data[idx] = r / a;
self.dest.data[idx + 1] = g / a;
self.dest.data[idx + 2] = b / a;
}
if (++u < self.dest.width)
setTimeout(self.process1, 0, self, u);
else
setTimeout(self.process2, 0, self);
};
thumbnailer.prototype.process2 = function(self) {
self.canvas.width = self.dest.width;
self.canvas.height = self.dest.height;
self.ctx.drawImage(self.img, 0, 0, self.dest.width, self.dest.height);
self.src = self.ctx.getImageData(0, 0, self.dest.width, self.dest.height);
var idx, idx2;
for (var i = 0; i < self.dest.width; i++) {
for (var j = 0; j < self.dest.height; j++) {
idx = (j * self.dest.width + i) * 3;
idx2 = (j * self.dest.width + i) * 4;
self.src.data[idx2] = self.dest.data[idx];
self.src.data[idx2 + 1] = self.dest.data[idx + 1];
self.src.data[idx2 + 2] = self.dest.data[idx + 2];
}
}
self.ctx.putImageData(self.src, 0, 0);
self.canvas.style.display = "block";
};
...with which you can produce results like these!
so anyway, here is a 'fixed' version of your example:
img.onload = function() {
var canvas = document.createElement("canvas");
new thumbnailer(canvas, img, 188, 3); //this produces lanczos3
// but feel free to raise it up to 8. Your client will appreciate
// that the program makes full use of his machine.
document.body.appendChild(canvas);
};
Now it's time to pit your best browsers out there and see which one will least likely increase your client's blood pressure!
Umm, where's my sarcasm tag?
(since many parts of the code is based on Anrieff Gallery Generator is it also covered under GPL2? I don't know)
† actually due to limitation of javascript, multi-core is not supported.
Fast image resize/resample algorithm using Hermite filter with JavaScript. Support transparency, gives good quality. Preview:
Update: version 2.0 added on GitHub (faster, web workers + transferable objects). Finally i got it working!
Git: https://github.com/viliusle/Hermite-resize
Demo: http://viliusle.github.io/miniPaint/
/**
* Hermite resize - fast image resize/resample using Hermite filter. 1 cpu version!
*
* #param {HtmlElement} canvas
* #param {int} width
* #param {int} height
* #param {boolean} resize_canvas if true, canvas will be resized. Optional.
*/
function resample_single(canvas, width, height, resize_canvas) {
var width_source = canvas.width;
var height_source = canvas.height;
width = Math.round(width);
height = Math.round(height);
var ratio_w = width_source / width;
var ratio_h = height_source / height;
var ratio_w_half = Math.ceil(ratio_w / 2);
var ratio_h_half = Math.ceil(ratio_h / 2);
var ctx = canvas.getContext("2d");
var img = ctx.getImageData(0, 0, width_source, height_source);
var img2 = ctx.createImageData(width, height);
var data = img.data;
var data2 = img2.data;
for (var j = 0; j < height; j++) {
for (var i = 0; i < width; i++) {
var x2 = (i + j * width) * 4;
var weight = 0;
var weights = 0;
var weights_alpha = 0;
var gx_r = 0;
var gx_g = 0;
var gx_b = 0;
var gx_a = 0;
var center_y = (j + 0.5) * ratio_h;
var yy_start = Math.floor(j * ratio_h);
var yy_stop = Math.ceil((j + 1) * ratio_h);
for (var yy = yy_start; yy < yy_stop; yy++) {
var dy = Math.abs(center_y - (yy + 0.5)) / ratio_h_half;
var center_x = (i + 0.5) * ratio_w;
var w0 = dy * dy; //pre-calc part of w
var xx_start = Math.floor(i * ratio_w);
var xx_stop = Math.ceil((i + 1) * ratio_w);
for (var xx = xx_start; xx < xx_stop; xx++) {
var dx = Math.abs(center_x - (xx + 0.5)) / ratio_w_half;
var w = Math.sqrt(w0 + dx * dx);
if (w >= 1) {
//pixel too far
continue;
}
//hermite filter
weight = 2 * w * w * w - 3 * w * w + 1;
var pos_x = 4 * (xx + yy * width_source);
//alpha
gx_a += weight * data[pos_x + 3];
weights_alpha += weight;
//colors
if (data[pos_x + 3] < 255)
weight = weight * data[pos_x + 3] / 250;
gx_r += weight * data[pos_x];
gx_g += weight * data[pos_x + 1];
gx_b += weight * data[pos_x + 2];
weights += weight;
}
}
data2[x2] = gx_r / weights;
data2[x2 + 1] = gx_g / weights;
data2[x2 + 2] = gx_b / weights;
data2[x2 + 3] = gx_a / weights_alpha;
}
}
//clear and resize canvas
if (resize_canvas === true) {
canvas.width = width;
canvas.height = height;
} else {
ctx.clearRect(0, 0, width_source, height_source);
}
//draw
ctx.putImageData(img2, 0, 0);
}
Try pica - that's a highly optimized resizer with selectable algorythms. See demo.
For example, original image from first post is resized in 120ms with Lanczos filter and 3px window or 60ms with Box filter and 0.5px window. For huge 17mb image 5000x3000px resize takes ~1s on desktop and 3s on mobile.
All resize principles were described very well in this thread, and pica does not add rocket science. But it's optimized very well for modern JIT-s, and is ready to use out of box (via npm or bower). Also, it use webworkers when available to avoid interface freezes.
I also plan to add unsharp mask support soon, because it's very useful after downscale.
I know this is an old thread but it might be useful for some people such as myself that months after are hitting this issue for the first time.
Here is some code that resizes the image every time you reload the image. I am aware this is not optimal at all, but I provide it as a proof of concept.
Also, sorry for using jQuery for simple selectors but I just feel too comfortable with the syntax.
$(document).on('ready', createImage);
$(window).on('resize', createImage);
var createImage = function(){
var canvas = document.getElementById('myCanvas');
canvas.width = window.innerWidth || $(window).width();
canvas.height = window.innerHeight || $(window).height();
var ctx = canvas.getContext('2d');
img = new Image();
img.addEventListener('load', function () {
ctx.drawImage(this, 0, 0, w, h);
});
img.src = 'http://www.ruinvalor.com/Telanor/images/original.jpg';
};
html, body{
height: 100%;
width: 100%;
margin: 0;
padding: 0;
background: #000;
}
canvas{
position: absolute;
left: 0;
top: 0;
z-index: 0;
}
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<html>
<head>
<meta charset="utf-8" />
<title>Canvas Resize</title>
</head>
<body>
<canvas id="myCanvas"></canvas>
</body>
</html>
My createImage function is called once when the document is loaded and after that it is called every time the window receives a resize event.
I tested it in Chrome 6 and Firefox 3.6, both on the Mac. This "technique" eats processor as it if was ice cream in the summer, but it does the trick.
I've put up some algorithms to do image interpolation on html canvas pixel arrays that might be useful here:
https://web.archive.org/web/20170104190425/http://jsperf.com:80/pixel-interpolation/2
These can be copy/pasted and can be used inside of web workers to resize images (or any other operation that requires interpolation - I'm using them to defish images at the moment).
I haven't added the lanczos stuff above, so feel free to add that as a comparison if you'd like.
This is a javascript function adapted from #Telanor's code. When passing a image base64 as first argument to the function, it returns the base64 of the resized image. maxWidth and maxHeight are optional.
function thumbnail(base64, maxWidth, maxHeight) {
// Max size for thumbnail
if(typeof(maxWidth) === 'undefined') var maxWidth = 500;
if(typeof(maxHeight) === 'undefined') var maxHeight = 500;
// Create and initialize two canvas
var canvas = document.createElement("canvas");
var ctx = canvas.getContext("2d");
var canvasCopy = document.createElement("canvas");
var copyContext = canvasCopy.getContext("2d");
// Create original image
var img = new Image();
img.src = base64;
// Determine new ratio based on max size
var ratio = 1;
if(img.width > maxWidth)
ratio = maxWidth / img.width;
else if(img.height > maxHeight)
ratio = maxHeight / img.height;
// Draw original image in second canvas
canvasCopy.width = img.width;
canvasCopy.height = img.height;
copyContext.drawImage(img, 0, 0);
// Copy and resize second canvas to first canvas
canvas.width = img.width * ratio;
canvas.height = img.height * ratio;
ctx.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvas.width, canvas.height);
return canvas.toDataURL();
}
I'd highly suggest you check out this link and make sure it is set to true.
Controlling image scaling behavior
Introduced in Gecko 1.9.2 (Firefox 3.6
/ Thunderbird 3.1 / Fennec 1.0)
Gecko 1.9.2 introduced the
mozImageSmoothingEnabled property to
the canvas element; if this Boolean
value is false, images won't be
smoothed when scaled. This property is
true by default. view plainprint?
cx.mozImageSmoothingEnabled = false;
If you're simply trying to resize an image, I'd recommend setting width and height of the image with CSS. Here's a quick example:
.small-image {
width: 100px;
height: 100px;
}
Note that the height and width can also be set using JavaScript. Here's quick code sample:
var img = document.getElement("my-image");
img.style.width = 100 + "px"; // Make sure you add the "px" to the end,
img.style.height = 100 + "px"; // otherwise you'll confuse IE
Also, to ensure that the resized image looks good, add the following css rules to image selector:
-ms-interpolation-mode: bicubic: introduce in IE7
image-rendering: optimizeQuality: introduced in FireFox 3.6
As far as I can tell, all browsers except IE using an bicubic algorithm to resize images by default, so your resized images should look good in Firefox and Chrome.
If setting the css width and height doesn't work, you may want to play with a css transform:
-moz-transform: scale(sx[, sy])
-webkit-transform:scale(sx[, sy])
If for whatever reason you need to use a canvas, please note that there are two ways an image can be resize: by resizing the canvas with css or by drawing the image at a smaller size.
See this question for more details.
i got this image by right clicking the canvas element in firefox and saving as.
var img = new Image();
img.onload = function () {
console.debug(this.width,this.height);
var canvas = document.createElement('canvas'), ctx;
canvas.width = 188;
canvas.height = 150;
document.body.appendChild(canvas);
ctx = canvas.getContext('2d');
ctx.drawImage(img,0,0,188,150);
};
img.src = 'original.jpg';
so anyway, here is a 'fixed' version of your example:
var img = new Image();
// added cause it wasnt defined
var canvas = document.createElement("canvas");
document.body.appendChild(canvas);
var ctx = canvas.getContext("2d");
var canvasCopy = document.createElement("canvas");
// adding it to the body
document.body.appendChild(canvasCopy);
var copyContext = canvasCopy.getContext("2d");
img.onload = function()
{
var ratio = 1;
// defining cause it wasnt
var maxWidth = 188,
maxHeight = 150;
if(img.width > maxWidth)
ratio = maxWidth / img.width;
else if(img.height > maxHeight)
ratio = maxHeight / img.height;
canvasCopy.width = img.width;
canvasCopy.height = img.height;
copyContext.drawImage(img, 0, 0);
canvas.width = img.width * ratio;
canvas.height = img.height * ratio;
// the line to change
// ctx.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvas.width, canvas.height);
// the method signature you are using is for slicing
ctx.drawImage(canvasCopy, 0, 0, canvas.width, canvas.height);
};
// changed for example
img.src = 'original.jpg';
For resizing to image with width less that original, i use:
function resize2(i) {
var cc = document.createElement("canvas");
cc.width = i.width / 2;
cc.height = i.height / 2;
var ctx = cc.getContext("2d");
ctx.drawImage(i, 0, 0, cc.width, cc.height);
return cc;
}
var cc = img;
while (cc.width > 64 * 2) {
cc = resize2(cc);
}
// .. than drawImage(cc, .... )
and it works =).
I have a feeling the module I wrote will produce similar results to photoshop, as it preserves color data by averaging them, not applying an algorithm. It's kind of slow, but to me it is the best, because it preserves all the color data.
https://github.com/danschumann/limby-resize/blob/master/lib/canvas_resize.js
It doesn't take the nearest neighbor and drop other pixels, or sample a group and take a random average. It takes the exact proportion each source pixel should output into the destination pixel. The average pixel color in the source will be the average pixel color in the destination, which these other formulas, I think they will not be.
an example of how to use is at the bottom of
https://github.com/danschumann/limby-resize
UPDATE OCT 2018: These days my example is more academic than anything else. Webgl is pretty much 100%, so you'd be better off resizing with that to produce similar results, but faster. PICA.js does this, I believe. –
The problem with some of this solutions is that they access directly the pixel data and loop through it to perform the downsampling. Depending on the size of the image this can be very resource intensive, and it would be better to use the browser's internal algorithms.
The drawImage() function is using a linear-interpolation, nearest-neighbor resampling method. That works well when you are not resizing down more than half the original size.
If you loop to only resize max one half at a time, the results would be quite good, and much faster than accessing pixel data.
This function downsample to half at a time until reaching the desired size:
function resize_image( src, dst, type, quality ) {
var tmp = new Image(),
canvas, context, cW, cH;
type = type || 'image/jpeg';
quality = quality || 0.92;
cW = src.naturalWidth;
cH = src.naturalHeight;
tmp.src = src.src;
tmp.onload = function() {
canvas = document.createElement( 'canvas' );
cW /= 2;
cH /= 2;
if ( cW < src.width ) cW = src.width;
if ( cH < src.height ) cH = src.height;
canvas.width = cW;
canvas.height = cH;
context = canvas.getContext( '2d' );
context.drawImage( tmp, 0, 0, cW, cH );
dst.src = canvas.toDataURL( type, quality );
if ( cW <= src.width || cH <= src.height )
return;
tmp.src = dst.src;
}
}
// The images sent as parameters can be in the DOM or be image objects
resize_image( $( '#original' )[0], $( '#smaller' )[0] );
Credits to this post
So something interesting that I found a while ago while working with canvas that might be helpful:
To resize the canvas control on its own, you need to use the height="" and width="" attributes (or canvas.width/canvas.height elements). If you use CSS to resize the canvas, it will actually stretch (i.e.: resize) the content of the canvas to fit the full canvas (rather than simply increasing or decreasing the area of the canvas.
It'd be worth a shot to try drawing the image into a canvas control with the height and width attributes set to the size of the image and then using CSS to resize the canvas to the size you're looking for. Perhaps this would use a different resizing algorithm.
It should also be noted that canvas has different effects in different browsers (and even different versions of different browsers). The algorithms and techniques used in the browsers is likely to change over time (especially with Firefox 4 and Chrome 6 coming out so soon, which will place heavy emphasis on canvas rendering performance).
In addition, you may want to give SVG a shot, too, as it likely uses a different algorithm as well.
Best of luck!
Fast and simple Javascript image resizer:
https://github.com/calvintwr/blitz-hermite-resize
const blitz = Blitz.create()
/* Promise */
blitz({
source: DOM Image/DOM Canvas/jQuery/DataURL/File,
width: 400,
height: 600
}).then(output => {
// handle output
})catch(error => {
// handle error
})
/* Await */
let resized = await blizt({...})
/* Old school callback */
const blitz = Blitz.create('callback')
blitz({...}, function(output) {
// run your callback.
})
History
This is really after many rounds of research, reading and trying.
The resizer algorithm uses #ViliusL's Hermite script (Hermite resizer is really the fastest and gives reasonably good output). Extended with features you need.
Forks 1 worker to do the resizing so that it doesn't freeze your browser when resizing, unlike all other JS resizers out there.
I converted #syockit's answer as well as the step-down approach into a reusable Angular service for anyone who's interested: https://gist.github.com/fisch0920/37bac5e741eaec60e983
I included both solutions because they both have their own pros / cons. The lanczos convolution approach is higher quality at the cost of being slower, whereas the step-wise downscaling approach produces reasonably antialiased results and is significantly faster.
Example usage:
angular.module('demo').controller('ExampleCtrl', function (imageService) {
// EXAMPLE USAGE
// NOTE: it's bad practice to access the DOM inside a controller,
// but this is just to show the example usage.
// resize by lanczos-sinc filter
imageService.resize($('#myimg')[0], 256, 256)
.then(function (resizedImage) {
// do something with resized image
})
// resize by stepping down image size in increments of 2x
imageService.resizeStep($('#myimg')[0], 256, 256)
.then(function (resizedImage) {
// do something with resized image
})
})
Thanks #syockit for an awesome answer. however, I had to reformat a little as follows to make it work. Perhaps due to DOM scanning issues:
$(document).ready(function () {
$('img').on("load", clickA);
function clickA() {
var img = this;
var canvas = document.createElement("canvas");
new thumbnailer(canvas, img, 50, 3);
document.body.appendChild(canvas);
}
function thumbnailer(elem, img, sx, lobes) {
this.canvas = elem;
elem.width = img.width;
elem.height = img.height;
elem.style.display = "none";
this.ctx = elem.getContext("2d");
this.ctx.drawImage(img, 0, 0);
this.img = img;
this.src = this.ctx.getImageData(0, 0, img.width, img.height);
this.dest = {
width: sx,
height: Math.round(img.height * sx / img.width)
};
this.dest.data = new Array(this.dest.width * this.dest.height * 3);
this.lanczos = lanczosCreate(lobes);
this.ratio = img.width / sx;
this.rcp_ratio = 2 / this.ratio;
this.range2 = Math.ceil(this.ratio * lobes / 2);
this.cacheLanc = {};
this.center = {};
this.icenter = {};
setTimeout(process1, 0, this, 0);
}
//returns a function that calculates lanczos weight
function lanczosCreate(lobes) {
return function (x) {
if (x > lobes)
return 0;
x *= Math.PI;
if (Math.abs(x) < 1e-16)
return 1
var xx = x / lobes;
return Math.sin(x) * Math.sin(xx) / x / xx;
}
}
process1 = function (self, u) {
self.center.x = (u + 0.5) * self.ratio;
self.icenter.x = Math.floor(self.center.x);
for (var v = 0; v < self.dest.height; v++) {
self.center.y = (v + 0.5) * self.ratio;
self.icenter.y = Math.floor(self.center.y);
var a, r, g, b;
a = r = g = b = 0;
for (var i = self.icenter.x - self.range2; i <= self.icenter.x + self.range2; i++) {
if (i < 0 || i >= self.src.width)
continue;
var f_x = Math.floor(1000 * Math.abs(i - self.center.x));
if (!self.cacheLanc[f_x])
self.cacheLanc[f_x] = {};
for (var j = self.icenter.y - self.range2; j <= self.icenter.y + self.range2; j++) {
if (j < 0 || j >= self.src.height)
continue;
var f_y = Math.floor(1000 * Math.abs(j - self.center.y));
if (self.cacheLanc[f_x][f_y] == undefined)
self.cacheLanc[f_x][f_y] = self.lanczos(Math.sqrt(Math.pow(f_x * self.rcp_ratio, 2) + Math.pow(f_y * self.rcp_ratio, 2)) / 1000);
weight = self.cacheLanc[f_x][f_y];
if (weight > 0) {
var idx = (j * self.src.width + i) * 4;
a += weight;
r += weight * self.src.data[idx];
g += weight * self.src.data[idx + 1];
b += weight * self.src.data[idx + 2];
}
}
}
var idx = (v * self.dest.width + u) * 3;
self.dest.data[idx] = r / a;
self.dest.data[idx + 1] = g / a;
self.dest.data[idx + 2] = b / a;
}
if (++u < self.dest.width)
setTimeout(process1, 0, self, u);
else
setTimeout(process2, 0, self);
};
process2 = function (self) {
self.canvas.width = self.dest.width;
self.canvas.height = self.dest.height;
self.ctx.drawImage(self.img, 0, 0);
self.src = self.ctx.getImageData(0, 0, self.dest.width, self.dest.height);
var idx, idx2;
for (var i = 0; i < self.dest.width; i++) {
for (var j = 0; j < self.dest.height; j++) {
idx = (j * self.dest.width + i) * 3;
idx2 = (j * self.dest.width + i) * 4;
self.src.data[idx2] = self.dest.data[idx];
self.src.data[idx2 + 1] = self.dest.data[idx + 1];
self.src.data[idx2 + 2] = self.dest.data[idx + 2];
}
}
self.ctx.putImageData(self.src, 0, 0);
self.canvas.style.display = "block";
}
});
I wanted some well defined functions out of answers here so ended up with these which am hoping would be useful for others also,
function getImageFromLink(link) {
return new Promise(function (resolve) {
var image = new Image();
image.onload = function () { resolve(image); };
image.src = link;
});
}
function resizeImageToBlob(image, width, height, mime) {
return new Promise(function (resolve) {
var canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
canvas.getContext('2d').drawImage(image, 0, 0, width, height);
return canvas.toBlob(resolve, mime);
});
}
getImageFromLink(location.href).then(function (image) {
// calculate these based on the original size
var width = image.width / 4;
var height = image.height / 4;
return resizeImageToBlob(image, width, height, 'image/jpeg');
}).then(function (blob) {
// Do something with the result Blob object
document.querySelector('img').src = URL.createObjectURL(blob);
});
Just for the sake of testing this run it on a image opened in a tab.
I just ran a page of side by sides comparisons and unless something has changed recently, I could see no better downsizing (scaling) using canvas vs. simple css. I tested in FF6 Mac OSX 10.7. Still slightly soft vs. the original.
I did however stumble upon something that did make a huge difference and that was using image filters in browsers that support canvas. You can actually manipulate images much like you can in Photoshop with blur, sharpen, saturation, ripple, grayscale, etc.
I then found an awesome jQuery plug-in which makes application of these filters a snap:
http://codecanyon.net/item/jsmanipulate-jquery-image-manipulation-plugin/428234
I simply apply the sharpen filter right after resizing the image which should give you the desired effect. I didn't even have to use a canvas element.
Looking for another great simple solution?
var img=document.createElement('img');
img.src=canvas.toDataURL();
$(img).css("background", backgroundColor);
$(img).width(settings.width);
$(img).height(settings.height);
This solution will use the resize algorith of browser! :)
Related
I use html5 canvas elements to resize images im my browser. It turns out that the quality is very low. I found this: Disable Interpolation when Scaling a <canvas> but it does not help to increase the quality.
Below is my css and js code as well as the image scalled with Photoshop and scaled in the canvas API.
What do I have to do to get optimal quality when scaling an image in the browser?
Note: I want to scale down a large image to a small one, modify color in a canvas and send the result from the canvas to the server.
CSS:
canvas, img {
image-rendering: optimizeQuality;
image-rendering: -moz-crisp-edges;
image-rendering: -webkit-optimize-contrast;
image-rendering: optimize-contrast;
-ms-interpolation-mode: nearest-neighbor;
}
JS:
var $img = $('<img>');
var $originalCanvas = $('<canvas>');
$img.load(function() {
var originalContext = $originalCanvas[0].getContext('2d');
originalContext.imageSmoothingEnabled = false;
originalContext.webkitImageSmoothingEnabled = false;
originalContext.mozImageSmoothingEnabled = false;
originalContext.drawImage(this, 0, 0, 379, 500);
});
The image resized with photoshop:
The image resized on canvas:
Edit:
I tried to make downscaling in more than one steps as proposed in:
Resizing an image in an HTML5 canvas and
Html5 canvas drawImage: how to apply antialiasing
This is the function I have used:
function resizeCanvasImage(img, canvas, maxWidth, maxHeight) {
var imgWidth = img.width,
imgHeight = img.height;
var ratio = 1, ratio1 = 1, ratio2 = 1;
ratio1 = maxWidth / imgWidth;
ratio2 = maxHeight / imgHeight;
// Use the smallest ratio that the image best fit into the maxWidth x maxHeight box.
if (ratio1 < ratio2) {
ratio = ratio1;
}
else {
ratio = ratio2;
}
var canvasContext = canvas.getContext("2d");
var canvasCopy = document.createElement("canvas");
var copyContext = canvasCopy.getContext("2d");
var canvasCopy2 = document.createElement("canvas");
var copyContext2 = canvasCopy2.getContext("2d");
canvasCopy.width = imgWidth;
canvasCopy.height = imgHeight;
copyContext.drawImage(img, 0, 0);
// init
canvasCopy2.width = imgWidth;
canvasCopy2.height = imgHeight;
copyContext2.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvasCopy2.width, canvasCopy2.height);
var rounds = 2;
var roundRatio = ratio * rounds;
for (var i = 1; i <= rounds; i++) {
console.log("Step: "+i);
// tmp
canvasCopy.width = imgWidth * roundRatio / i;
canvasCopy.height = imgHeight * roundRatio / i;
copyContext.drawImage(canvasCopy2, 0, 0, canvasCopy2.width, canvasCopy2.height, 0, 0, canvasCopy.width, canvasCopy.height);
// copy back
canvasCopy2.width = imgWidth * roundRatio / i;
canvasCopy2.height = imgHeight * roundRatio / i;
copyContext2.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvasCopy2.width, canvasCopy2.height);
} // end for
// copy back to canvas
canvas.width = imgWidth * roundRatio / rounds;
canvas.height = imgHeight * roundRatio / rounds;
canvasContext.drawImage(canvasCopy2, 0, 0, canvasCopy2.width, canvasCopy2.height, 0, 0, canvas.width, canvas.height);
}
Here is the result if I use a 2 step down sizing:
Here is the result if I use a 3 step down sizing:
Here is the result if I use a 4 step down sizing:
Here is the result if I use a 20 step down sizing:
Note: It turns out that from 1 step to 2 steps there is a large improvement in image quality but the more steps you add to the process the more fuzzy the image becomes.
Is there a way to solve the problem that the image gets more fuzzy the more steps you add?
Edit 2013-10-04: I tried the algorithm of GameAlchemist. Here is the result compared to Photoshop.
PhotoShop Image:
GameAlchemist's Algorithm:
Since your problem is to downscale your image, there is no point in talking about interpolation -which is about creating pixel-. The issue here is downsampling.
To downsample an image, we need to turn each square of p * p pixels in the original image into a single pixel in the destination image.
For performances reasons Browsers do a very simple downsampling : to build the smaller image, they will just pick ONE pixel in the source and use its value for the destination. which 'forgets' some details and adds noise.
Yet there's an exception to that : since the 2X image downsampling is very simple to compute (average 4 pixels to make one) and is used for retina/HiDPI pixels, this case is handled properly -the Browser does make use of 4 pixels to make one-.
BUT... if you use several time a 2X downsampling, you'll face the issue that the successive rounding errors will add too much noise.
What's worse, you won't always resize by a power of two, and resizing to the nearest power + a last resizing is very noisy.
What you seek is a pixel-perfect downsampling, that is : a re-sampling of the image that will take all input pixels into account -whatever the scale-.
To do that we must compute, for each input pixel, its contribution to one, two, or four destination pixels depending wether the scaled projection of the input pixels is right inside a destination pixels, overlaps an X border, an Y border, or both.
( A scheme would be nice here, but i don't have one. )
Here's an example of canvas scale vs my pixel perfect scale on a 1/3 scale of a zombat.
Notice that the picture might get scaled in your Browser, and is .jpegized by S.O..
Yet we see that there's much less noise especially in the grass behind the wombat, and the branches on its right. The noise in the fur makes it more contrasted, but it looks like he's got white hairs -unlike source picture-.
Right image is less catchy but definitively nicer.
Here's the code to do the pixel perfect downscaling :
fiddle result :
http://jsfiddle.net/gamealchemist/r6aVp/embedded/result/
fiddle itself : http://jsfiddle.net/gamealchemist/r6aVp/
// scales the image by (float) scale < 1
// returns a canvas containing the scaled image.
function downScaleImage(img, scale) {
var imgCV = document.createElement('canvas');
imgCV.width = img.width;
imgCV.height = img.height;
var imgCtx = imgCV.getContext('2d');
imgCtx.drawImage(img, 0, 0);
return downScaleCanvas(imgCV, scale);
}
// scales the canvas by (float) scale < 1
// returns a new canvas containing the scaled image.
function downScaleCanvas(cv, scale) {
if (!(scale < 1) || !(scale > 0)) throw ('scale must be a positive number <1 ');
var sqScale = scale * scale; // square scale = area of source pixel within target
var sw = cv.width; // source image width
var sh = cv.height; // source image height
var tw = Math.floor(sw * scale); // target image width
var th = Math.floor(sh * scale); // target image height
var sx = 0, sy = 0, sIndex = 0; // source x,y, index within source array
var tx = 0, ty = 0, yIndex = 0, tIndex = 0; // target x,y, x,y index within target array
var tX = 0, tY = 0; // rounded tx, ty
var w = 0, nw = 0, wx = 0, nwx = 0, wy = 0, nwy = 0; // weight / next weight x / y
// weight is weight of current source point within target.
// next weight is weight of current source point within next target's point.
var crossX = false; // does scaled px cross its current px right border ?
var crossY = false; // does scaled px cross its current px bottom border ?
var sBuffer = cv.getContext('2d').
getImageData(0, 0, sw, sh).data; // source buffer 8 bit rgba
var tBuffer = new Float32Array(3 * tw * th); // target buffer Float32 rgb
var sR = 0, sG = 0, sB = 0; // source's current point r,g,b
/* untested !
var sA = 0; //source alpha */
for (sy = 0; sy < sh; sy++) {
ty = sy * scale; // y src position within target
tY = 0 | ty; // rounded : target pixel's y
yIndex = 3 * tY * tw; // line index within target array
crossY = (tY != (0 | ty + scale));
if (crossY) { // if pixel is crossing botton target pixel
wy = (tY + 1 - ty); // weight of point within target pixel
nwy = (ty + scale - tY - 1); // ... within y+1 target pixel
}
for (sx = 0; sx < sw; sx++, sIndex += 4) {
tx = sx * scale; // x src position within target
tX = 0 | tx; // rounded : target pixel's x
tIndex = yIndex + tX * 3; // target pixel index within target array
crossX = (tX != (0 | tx + scale));
if (crossX) { // if pixel is crossing target pixel's right
wx = (tX + 1 - tx); // weight of point within target pixel
nwx = (tx + scale - tX - 1); // ... within x+1 target pixel
}
sR = sBuffer[sIndex ]; // retrieving r,g,b for curr src px.
sG = sBuffer[sIndex + 1];
sB = sBuffer[sIndex + 2];
/* !! untested : handling alpha !!
sA = sBuffer[sIndex + 3];
if (!sA) continue;
if (sA != 0xFF) {
sR = (sR * sA) >> 8; // or use /256 instead ??
sG = (sG * sA) >> 8;
sB = (sB * sA) >> 8;
}
*/
if (!crossX && !crossY) { // pixel does not cross
// just add components weighted by squared scale.
tBuffer[tIndex ] += sR * sqScale;
tBuffer[tIndex + 1] += sG * sqScale;
tBuffer[tIndex + 2] += sB * sqScale;
} else if (crossX && !crossY) { // cross on X only
w = wx * scale;
// add weighted component for current px
tBuffer[tIndex ] += sR * w;
tBuffer[tIndex + 1] += sG * w;
tBuffer[tIndex + 2] += sB * w;
// add weighted component for next (tX+1) px
nw = nwx * scale
tBuffer[tIndex + 3] += sR * nw;
tBuffer[tIndex + 4] += sG * nw;
tBuffer[tIndex + 5] += sB * nw;
} else if (crossY && !crossX) { // cross on Y only
w = wy * scale;
// add weighted component for current px
tBuffer[tIndex ] += sR * w;
tBuffer[tIndex + 1] += sG * w;
tBuffer[tIndex + 2] += sB * w;
// add weighted component for next (tY+1) px
nw = nwy * scale
tBuffer[tIndex + 3 * tw ] += sR * nw;
tBuffer[tIndex + 3 * tw + 1] += sG * nw;
tBuffer[tIndex + 3 * tw + 2] += sB * nw;
} else { // crosses both x and y : four target points involved
// add weighted component for current px
w = wx * wy;
tBuffer[tIndex ] += sR * w;
tBuffer[tIndex + 1] += sG * w;
tBuffer[tIndex + 2] += sB * w;
// for tX + 1; tY px
nw = nwx * wy;
tBuffer[tIndex + 3] += sR * nw;
tBuffer[tIndex + 4] += sG * nw;
tBuffer[tIndex + 5] += sB * nw;
// for tX ; tY + 1 px
nw = wx * nwy;
tBuffer[tIndex + 3 * tw ] += sR * nw;
tBuffer[tIndex + 3 * tw + 1] += sG * nw;
tBuffer[tIndex + 3 * tw + 2] += sB * nw;
// for tX + 1 ; tY +1 px
nw = nwx * nwy;
tBuffer[tIndex + 3 * tw + 3] += sR * nw;
tBuffer[tIndex + 3 * tw + 4] += sG * nw;
tBuffer[tIndex + 3 * tw + 5] += sB * nw;
}
} // end for sx
} // end for sy
// create result canvas
var resCV = document.createElement('canvas');
resCV.width = tw;
resCV.height = th;
var resCtx = resCV.getContext('2d');
var imgRes = resCtx.getImageData(0, 0, tw, th);
var tByteBuffer = imgRes.data;
// convert float32 array into a UInt8Clamped Array
var pxIndex = 0; //
for (sIndex = 0, tIndex = 0; pxIndex < tw * th; sIndex += 3, tIndex += 4, pxIndex++) {
tByteBuffer[tIndex] = Math.ceil(tBuffer[sIndex]);
tByteBuffer[tIndex + 1] = Math.ceil(tBuffer[sIndex + 1]);
tByteBuffer[tIndex + 2] = Math.ceil(tBuffer[sIndex + 2]);
tByteBuffer[tIndex + 3] = 255;
}
// writing result to canvas.
resCtx.putImageData(imgRes, 0, 0);
return resCV;
}
It is quite memory greedy, since a float buffer is required to store the intermediate values of the destination image (-> if we count the result canvas, we use 6 times the source image's memory in this algorithm).
It is also quite expensive, since each source pixel is used whatever the destination size, and we have to pay for the getImageData / putImageDate, quite slow also.
But there's no way to be faster than process each source value in this case, and situation is not that bad : For my 740 * 556 image of a wombat, processing takes between 30 and 40 ms.
Fast canvas resample with good quality: http://jsfiddle.net/9g9Nv/442/
Update: version 2.0 (faster, web workers + transferable objects) - https://github.com/viliusle/Hermite-resize
/**
* Hermite resize - fast image resize/resample using Hermite filter. 1 cpu version!
*
* #param {HtmlElement} canvas
* #param {int} width
* #param {int} height
* #param {boolean} resize_canvas if true, canvas will be resized. Optional.
*/
function resample_single(canvas, width, height, resize_canvas) {
var width_source = canvas.width;
var height_source = canvas.height;
width = Math.round(width);
height = Math.round(height);
var ratio_w = width_source / width;
var ratio_h = height_source / height;
var ratio_w_half = Math.ceil(ratio_w / 2);
var ratio_h_half = Math.ceil(ratio_h / 2);
var ctx = canvas.getContext("2d");
var img = ctx.getImageData(0, 0, width_source, height_source);
var img2 = ctx.createImageData(width, height);
var data = img.data;
var data2 = img2.data;
for (var j = 0; j < height; j++) {
for (var i = 0; i < width; i++) {
var x2 = (i + j * width) * 4;
var weight = 0;
var weights = 0;
var weights_alpha = 0;
var gx_r = 0;
var gx_g = 0;
var gx_b = 0;
var gx_a = 0;
var center_y = (j + 0.5) * ratio_h;
var yy_start = Math.floor(j * ratio_h);
var yy_stop = Math.ceil((j + 1) * ratio_h);
for (var yy = yy_start; yy < yy_stop; yy++) {
var dy = Math.abs(center_y - (yy + 0.5)) / ratio_h_half;
var center_x = (i + 0.5) * ratio_w;
var w0 = dy * dy; //pre-calc part of w
var xx_start = Math.floor(i * ratio_w);
var xx_stop = Math.ceil((i + 1) * ratio_w);
for (var xx = xx_start; xx < xx_stop; xx++) {
var dx = Math.abs(center_x - (xx + 0.5)) / ratio_w_half;
var w = Math.sqrt(w0 + dx * dx);
if (w >= 1) {
//pixel too far
continue;
}
//hermite filter
weight = 2 * w * w * w - 3 * w * w + 1;
var pos_x = 4 * (xx + yy * width_source);
//alpha
gx_a += weight * data[pos_x + 3];
weights_alpha += weight;
//colors
if (data[pos_x + 3] < 255)
weight = weight * data[pos_x + 3] / 250;
gx_r += weight * data[pos_x];
gx_g += weight * data[pos_x + 1];
gx_b += weight * data[pos_x + 2];
weights += weight;
}
}
data2[x2] = gx_r / weights;
data2[x2 + 1] = gx_g / weights;
data2[x2 + 2] = gx_b / weights;
data2[x2 + 3] = gx_a / weights_alpha;
}
}
//clear and resize canvas
if (resize_canvas === true) {
canvas.width = width;
canvas.height = height;
} else {
ctx.clearRect(0, 0, width_source, height_source);
}
//draw
ctx.putImageData(img2, 0, 0);
}
Suggestion 1 - extend the process pipe-line
You can use step-down as I describe in the links you refer to but you appear to use them in a wrong way.
Step down is not needed to scale images to ratios above 1:2 (typically, but not limited to). It is where you need to do a drastic down-scaling you need to split it up in two (and rarely, more) steps depending on content of the image (in particular where high-frequencies such as thin lines occur).
Every time you down-sample an image you will loose details and information. You cannot expect the resulting image to be as clear as the original.
If you are then scaling down the images in many steps you will loose a lot of information in total and the result will be poor as you already noticed.
Try with just one extra step, or at tops two.
Convolutions
In case of Photoshop notice that it applies a convolution after the image has been re-sampled, such as sharpen. It's not just bi-cubic interpolation that takes place so in order to fully emulate Photoshop we need to also add the steps Photoshop is doing (with the default setup).
For this example I will use my original answer that you refer to in your post, but I have added a sharpen convolution to it to improve quality as a post process (see demo at bottom).
Here is code for adding sharpen filter (it's based on a generic convolution filter - I put the weight matrix for sharpen inside it as well as a mix factor to adjust the pronunciation of the effect):
Usage:
sharpen(context, width, height, mixFactor);
The mixFactor is a value between [0.0, 1.0] and allow you do downplay the sharpen effect - rule-of-thumb: the less size the less of the effect is needed.
Function (based on this snippet):
function sharpen(ctx, w, h, mix) {
var weights = [0, -1, 0, -1, 5, -1, 0, -1, 0],
katet = Math.round(Math.sqrt(weights.length)),
half = (katet * 0.5) |0,
dstData = ctx.createImageData(w, h),
dstBuff = dstData.data,
srcBuff = ctx.getImageData(0, 0, w, h).data,
y = h;
while(y--) {
x = w;
while(x--) {
var sy = y,
sx = x,
dstOff = (y * w + x) * 4,
r = 0, g = 0, b = 0, a = 0;
for (var cy = 0; cy < katet; cy++) {
for (var cx = 0; cx < katet; cx++) {
var scy = sy + cy - half;
var scx = sx + cx - half;
if (scy >= 0 && scy < h && scx >= 0 && scx < w) {
var srcOff = (scy * w + scx) * 4;
var wt = weights[cy * katet + cx];
r += srcBuff[srcOff] * wt;
g += srcBuff[srcOff + 1] * wt;
b += srcBuff[srcOff + 2] * wt;
a += srcBuff[srcOff + 3] * wt;
}
}
}
dstBuff[dstOff] = r * mix + srcBuff[dstOff] * (1 - mix);
dstBuff[dstOff + 1] = g * mix + srcBuff[dstOff + 1] * (1 - mix);
dstBuff[dstOff + 2] = b * mix + srcBuff[dstOff + 2] * (1 - mix)
dstBuff[dstOff + 3] = srcBuff[dstOff + 3];
}
}
ctx.putImageData(dstData, 0, 0);
}
The result of using this combination will be:
ONLINE DEMO HERE
Depending on how much of the sharpening you want to add to the blend you can get result from default "blurry" to very sharp:
Suggestion 2 - low level algorithm implementation
If you want to get the best result quality-wise you'll need to go low-level and consider to implement for example this brand new algorithm to do this.
See Interpolation-Dependent Image Downsampling (2011) from IEEE.
Here is a link to the paper in full (PDF).
There are no implementations of this algorithm in JavaScript AFAIK of at this time so you're in for a hand-full if you want to throw yourself at this task.
The essence is (excerpts from the paper):
Abstract
An interpolation oriented adaptive down-sampling algorithm is proposed
for low bit-rate image coding in this paper. Given an image, the
proposed algorithm is able to obtain a low resolution image, from
which a high quality image with the same resolution as the input
image can be interpolated. Different from the traditional
down-sampling algorithms, which are independent from the
interpolation process, the proposed down-sampling algorithm hinges the
down-sampling to the interpolation process. Consequently, the
proposed down-sampling algorithm is able to maintain the original
information of the input image to the largest extent. The down-sampled
image is then fed into JPEG. A total variation (TV) based post
processing is then applied to the decompressed low resolution image.
Ultimately, the processed image is interpolated to maintain the
original resolution of the input image. Experimental results verify
that utilizing the downsampled image by the proposed algorithm, an
interpolated image with much higher quality can be achieved. Besides,
the proposed algorithm is able to achieve superior performance than
JPEG for low bit rate image coding.
(see provided link for all details, formulas etc.)
If you wish to use canvas only, the best result will be with multiple downsteps. But that's not good enougth yet. For better quality you need pure js implementation. We just released pica - high speed downscaler with variable quality/speed. In short, it resizes 1280*1024px in ~0.1s, and 5000*3000px image in 1s, with highest quality (lanczos filter with 3 lobes). Pica has demo, where you can play with your images, quality levels, and even try it on mobile devices.
Pica does not have unsharp mask yet, but that will be added very soon. That's much more easy than implement high speed convolution filter for resize.
Why use the canvas to resize images? Modern browsers all use bicubic interpolation — the same process used by Photoshop (if you're doing it right) — and they do it faster than the canvas process. Just specify the image size you want (use only one dimension, height or width, to resize proportionally).
This is supported by most browsers, including later versions of IE. Earlier versions may require browser-specific CSS.
A simple function (using jQuery) to resize an image would be like this:
function resizeImage(img, percentage) {
var coeff = percentage/100,
width = $(img).width(),
height = $(img).height();
return {"width": width*coeff, "height": height*coeff}
}
Then just use the returned value to resize the image in one or both dimensions.
Obviously there are different refinements you could make, but this gets the job done.
Paste the following code into the console of this page and watch what happens to the gravatars:
function resizeImage(img, percentage) {
var coeff = percentage/100,
width = $(img).width(),
height = $(img).height();
return {"width": width*coeff, "height": height*coeff}
}
$('.user-gravatar32 img').each(function(){
var newDimensions = resizeImage( this, 150);
this.style.width = newDimensions.width + "px";
this.style.height = newDimensions.height + "px";
});
Not the right answer for people who really need to resize the image itself, but just to shrink the file size.
I had a problem with "directly from the camera" pictures, that my customers often uploaded in "uncompressed" JPEG.
Not so well known is, that the canvas supports (in most browsers 2017) to change the quality of JPEG
data=canvas.toDataURL('image/jpeg', .85) # [1..0] default 0.92
With this trick I could reduce 4k x 3k pics with >10Mb to 1 or 2Mb, sure it depends on your needs.
look here
I found a solution that doesn't need to access directly the pixel data and loop through it to perform the downsampling. Depending on the size of the image this can be very resource intensive, and it would be better to use the browser's internal algorithms.
The drawImage() function is using a linear-interpolation, nearest-neighbor resampling method. That works well when you are not resizing down more than half the original size.
If you loop to only resize max one half at a time, the results would be quite good, and much faster than accessing pixel data.
This function downsample to half at a time until reaching the desired size:
function resize_image( src, dst, type, quality ) {
var tmp = new Image(),
canvas, context, cW, cH;
type = type || 'image/jpeg';
quality = quality || 0.92;
cW = src.naturalWidth;
cH = src.naturalHeight;
tmp.src = src.src;
tmp.onload = function() {
canvas = document.createElement( 'canvas' );
cW /= 2;
cH /= 2;
if ( cW < src.width ) cW = src.width;
if ( cH < src.height ) cH = src.height;
canvas.width = cW;
canvas.height = cH;
context = canvas.getContext( '2d' );
context.drawImage( tmp, 0, 0, cW, cH );
dst.src = canvas.toDataURL( type, quality );
if ( cW <= src.width || cH <= src.height )
return;
tmp.src = dst.src;
}
}
// The images sent as parameters can be in the DOM or be image objects
resize_image( $( '#original' )[0], $( '#smaller' )[0] );
This is the improved Hermite resize filter that utilises 1 worker so that the window doesn't freeze.
https://github.com/calvintwr/blitz-hermite-resize
const blitz = Blitz.create()
/* Promise */
blitz({
source: DOM Image/DOM Canvas/jQuery/DataURL/File,
width: 400,
height: 600
}).then(output => {
// handle output
})catch(error => {
// handle error
})
/* Await */
let resized = await blitz({...})
/* Old school callback */
const blitz = Blitz.create('callback')
blitz({...}, function(output) {
// run your callback.
})
Here is a reusable Angular service for high quality image / canvas resizing: https://gist.github.com/fisch0920/37bac5e741eaec60e983
The service supports lanczos convolution and step-wise downscaling. The convolution approach is higher quality at the cost of being slower, whereas the step-wise downscaling approach produces reasonably antialiased results and is significantly faster.
Example usage:
angular.module('demo').controller('ExampleCtrl', function (imageService) {
// EXAMPLE USAGE
// NOTE: it's bad practice to access the DOM inside a controller,
// but this is just to show the example usage.
// resize by lanczos-sinc filter
imageService.resize($('#myimg')[0], 256, 256)
.then(function (resizedImage) {
// do something with resized image
})
// resize by stepping down image size in increments of 2x
imageService.resizeStep($('#myimg')[0], 256, 256)
.then(function (resizedImage) {
// do something with resized image
})
})
Maybe man you can try this, which is I always use in my project.In this way you can not only get high quality image ,but any other element on your canvas.
/*
* #parame canvas => canvas object
* #parame rate => the pixel quality
*/
function setCanvasSize(canvas, rate) {
const scaleRate = rate;
canvas.width = window.innerWidth * scaleRate;
canvas.height = window.innerHeight * scaleRate;
canvas.style.width = window.innerWidth + 'px';
canvas.style.height = window.innerHeight + 'px';
canvas.getContext('2d').scale(scaleRate, scaleRate);
}
instead of .85, if we add 1.0. You will get exact answer.
data=canvas.toDataURL('image/jpeg', 1.0);
You can get clear and bright image. Please check
I really try to avoid running through image data, especially on larger images. Thus I came up with a rather simple way to decently reduce image size without any restrictions or limitations using a few extra steps.
This routine goes down to the lowest possible half step before the desired target size. Then it scales it up to twice the target size and then half again. Sounds funny at first, but the results are astoundingly good and go there swiftly.
function resizeCanvas(canvas, newWidth, newHeight) {
let ctx = canvas.getContext('2d');
let buffer = document.createElement('canvas');
buffer.width = ctx.canvas.width;
buffer.height = ctx.canvas.height;
let ctxBuf = buffer.getContext('2d');
let scaleX = newWidth / ctx.canvas.width;
let scaleY = newHeight / ctx.canvas.height;
let scaler = Math.min(scaleX, scaleY);
//see if target scale is less than half...
if (scaler < 0.5) {
//while loop in case target scale is less than quarter...
while (scaler < 0.5) {
ctxBuf.canvas.width = ctxBuf.canvas.width * 0.5;
ctxBuf.canvas.height = ctxBuf.canvas.height * 0.5;
ctxBuf.scale(0.5, 0.5);
ctxBuf.drawImage(canvas, 0, 0);
ctxBuf.setTransform(1, 0, 0, 1, 0, 0);
ctx.canvas.width = ctxBuf.canvas.width;
ctx.canvas.height = ctxBuf.canvas.height;
ctx.drawImage(buffer, 0, 0);
scaleX = newWidth / ctxBuf.canvas.width;
scaleY = newHeight / ctxBuf.canvas.height;
scaler = Math.min(scaleX, scaleY);
}
//only if the scaler is now larger than half, double target scale trick...
if (scaler > 0.5) {
scaleX *= 2.0;
scaleY *= 2.0;
ctxBuf.canvas.width = ctxBuf.canvas.width * scaleX;
ctxBuf.canvas.height = ctxBuf.canvas.height * scaleY;
ctxBuf.scale(scaleX, scaleY);
ctxBuf.drawImage(canvas, 0, 0);
ctxBuf.setTransform(1, 0, 0, 1, 0, 0);
scaleX = 0.5;
scaleY = 0.5;
}
} else
ctxBuf.drawImage(canvas, 0, 0);
//wrapping things up...
ctx.canvas.width = newWidth;
ctx.canvas.height = newHeight;
ctx.scale(scaleX, scaleY);
ctx.drawImage(buffer, 0, 0);
ctx.setTransform(1, 0, 0, 1, 0, 0);
}
context.scale(xScale, yScale)
<canvas id="c"></canvas>
<hr/>
<img id="i" />
<script>
var i = document.getElementById('i');
i.onload = function(){
var width = this.naturalWidth,
height = this.naturalHeight,
canvas = document.getElementById('c'),
ctx = canvas.getContext('2d');
canvas.width = Math.floor(width / 2);
canvas.height = Math.floor(height / 2);
ctx.scale(0.5, 0.5);
ctx.drawImage(this, 0, 0);
ctx.rect(0,0,500,500);
ctx.stroke();
// restore original 1x1 scale
ctx.scale(2, 2);
ctx.rect(0,0,500,500);
ctx.stroke();
};
i.src = 'https://static.md/b70a511140758c63f07b618da5137b5d.png';
</script>
DEMO: Resizing images with JS and HTML Canvas Demo fiddler.
You may find 3 different methods to do this resize, that will help you understand how the code is working and why.
https://jsfiddle.net/1b68eLdr/93089/
Full code of both demo, and TypeScript method that you may want to use in your code, can be found in the GitHub project.
https://github.com/eyalc4/ts-image-resizer
This is the final code:
export class ImageTools {
base64ResizedImage: string = null;
constructor() {
}
ResizeImage(base64image: string, width: number = 1080, height: number = 1080) {
let img = new Image();
img.src = base64image;
img.onload = () => {
// Check if the image require resize at all
if(img.height <= height && img.width <= width) {
this.base64ResizedImage = base64image;
// TODO: Call method to do something with the resize image
}
else {
// Make sure the width and height preserve the original aspect ratio and adjust if needed
if(img.height > img.width) {
width = Math.floor(height * (img.width / img.height));
}
else {
height = Math.floor(width * (img.height / img.width));
}
let resizingCanvas: HTMLCanvasElement = document.createElement('canvas');
let resizingCanvasContext = resizingCanvas.getContext("2d");
// Start with original image size
resizingCanvas.width = img.width;
resizingCanvas.height = img.height;
// Draw the original image on the (temp) resizing canvas
resizingCanvasContext.drawImage(img, 0, 0, resizingCanvas.width, resizingCanvas.height);
let curImageDimensions = {
width: Math.floor(img.width),
height: Math.floor(img.height)
};
let halfImageDimensions = {
width: null,
height: null
};
// Quickly reduce the size by 50% each time in few iterations until the size is less then
// 2x time the target size - the motivation for it, is to reduce the aliasing that would have been
// created with direct reduction of very big image to small image
while (curImageDimensions.width * 0.5 > width) {
// Reduce the resizing canvas by half and refresh the image
halfImageDimensions.width = Math.floor(curImageDimensions.width * 0.5);
halfImageDimensions.height = Math.floor(curImageDimensions.height * 0.5);
resizingCanvasContext.drawImage(resizingCanvas, 0, 0, curImageDimensions.width, curImageDimensions.height,
0, 0, halfImageDimensions.width, halfImageDimensions.height);
curImageDimensions.width = halfImageDimensions.width;
curImageDimensions.height = halfImageDimensions.height;
}
// Now do final resize for the resizingCanvas to meet the dimension requirments
// directly to the output canvas, that will output the final image
let outputCanvas: HTMLCanvasElement = document.createElement('canvas');
let outputCanvasContext = outputCanvas.getContext("2d");
outputCanvas.width = width;
outputCanvas.height = height;
outputCanvasContext.drawImage(resizingCanvas, 0, 0, curImageDimensions.width, curImageDimensions.height,
0, 0, width, height);
// output the canvas pixels as an image. params: format, quality
this.base64ResizedImage = outputCanvas.toDataURL('image/jpeg', 0.85);
// TODO: Call method to do something with the resize image
}
};
}}
I'm really struggling with a couple problems in the HTML5 canvas.
I've posted the project to GitHub pages (https://swedy13.github.io/) and added an image (the circles are in motion) so you can see the issue. Basically, if you scroll down you'll find several green circles bouncing around on the page. I'd like to replace those with my client logos.
I'm calling requestAnimation from three files based on different actions, all of which can be found in https://github.com/swedy13/swedy13.github.io/tree/master/assets/js
Filenames:
- filters.js (calls requestAnimation when you use the filters)
- main.js (on load and resize)
- portfolio.js (this is where the canvas code is)
Update: I've added the "portfolio.js" code below so the answer can be self-contained.
function runAnimation(width, height, type){
var canvas = document.getElementsByTagName('canvas')[0];
var c = canvas.getContext('2d');
// ---- DIMENSIONS ---- //
// Container
var x = width;
var y = height - 65;
canvas.width = x;
canvas.height = y;
var container = {x: 0 ,y: 0 ,width: x, height: y};
// Portrait Variables
var cPos = 200;
var cMargin = 70;
var cSpeed = 3;
var r = x*.075;
if (y > x && x >= 500) {
cPos = x * (x / y) - 150;
cMargin = 150;
}
// Landscape Variables
if (x > y) {
cPos = y * (y / x) - 50;
cMargin = 150;
cSpeed = 3;
r = x*.05;
}
// ---- CIRCLES ---- //
// Circles
var circles = [];
var img = new Image();
// Gets active post ids and count
var activeName = [];
var activeLogo = [];
var activePosts = $('.active').map(function() {
activeName.push($(this).text().replace(/\s+/g, '-').toLowerCase());
// Returns the image source
/*activeLogo.push($(this).find('img').prop('src'));*/
// Returns an image node
var elem = document.getElementsByClassName($(this).text().replace(/\s+/g, '-').toLowerCase())
activeLogo.push(elem[0].childNodes[0]);
});
// Populates circle data
for (var i = 0; i < $('.active').length; i++) {
circles.push({
id:activeName[i],
r:r,
color: 100,
/*image: activeLogo[i],*/
x:Math.random() * cPos + cMargin,
y:Math.random() * cPos + cMargin,
vx:Math.random() * cSpeed + .25,
vy:Math.random() * cSpeed + .25
});
}
// ---- DRAW ---- //
requestAnimationFrame(draw);
function draw(){
c.fillStyle = 'white';
c.fillRect(container.x, container.y, container.width, container.height);
for (var i = 0; i < circles.length; i++){
/*var img = new Image();
var path = circles[i].image;*/
/*var size = circles[i].r * 2;*/
/*img.src = circles[4].image;*/
var img = activeLogo[i];
img.onload = function (circles) {
/*c.drawImage(img, 0, 0, size, size);*/
var pattern = c.createPattern(this, "repeat");
c.fillStyle = pattern;
c.fill();
};
c.fillStyle = 'hsl(' + circles[i].color + ', 100%, 50%)';
c.beginPath();
c.arc(circles[i].x, circles[i].y, circles[i].r, 0, 2*Math.PI, false);
c.fill();
// If the circle size/position is greater than the canvas width, bounce x
if ((circles[i].x + circles[i].vx + circles[i].r > container.width) || (circles[i].x - circles[i].r + circles[i].vx < container.x)) {
circles[i].vx = -circles[i].vx;
}
// If the circle size/position is greater than the canvas width, bounce y
if ((circles[i].y + circles[i].vy + circles[i].r > container.height) || (circles[i].y - circles[i].r + circles[i].vy < container.y)){
circles[i].vy = -circles[i].vy;
}
// Generates circle motion by adding position and velocity each frame
circles[i].x += circles[i].vx;
circles[i].y += circles[i].vy;
}
requestAnimationFrame(draw);
}
}
The way it works right now is:
1. I have my portfolio content set to "display: none" (eventually it will be a pop-up when they click on one of the circles).
2. The canvas is getting the portfolio objects from the DOM, including the image that I can't get to work.
3. If I use the "onload()" function, I can get the images to show up and repeat in the background. But it's just a static background - the circles are moving above it and revealing the background. That isn't what I want.
So basically, I'm trying to figure out how to attach the background image to the circle (based on the circle ID).
----------------- UPDATE -----------------
I can now clip the image to a circle and get the circle to move in the background. But it isn't visible on the page (I can tell it's moving by console logging it's position). The only time I see anything is when the circle lines up with the images position, then it shows.
function runAnimation(width, height, type){
var canvas = document.getElementsByTagName('canvas')[0];
var c = canvas.getContext("2d");
canvas.width = width;
canvas.height = height;
// Collects portfolio information from the DOM
var activeName = [];
var activeLogo = [];
$('.active').map(function() {
var text = $(this).text().replace(/\s+/g, '-').toLowerCase();
var elem = document.getElementsByClassName(text);
activeName.push(text);
activeLogo.push(elem[0].childNodes[0]);
});
var img = new Image();
img.onload = start;
var circles = [];
var cPos = 200;
var cMargin = 70;
var cSpeed = 3;
for (var i = 0; i < 1; i++) {
circles.push({
id: activeName[i],
img: activeLogo[i],
size: 50,
xPos: Math.random() * cPos + cMargin,
yPos: Math.random() * cPos + cMargin,
xVel: Math.random() * cSpeed + .25,
yVel: Math.random() * cSpeed + .25,
});
img.src = circles[i].img;
}
requestAnimationFrame(start);
function start(){
for (var i = 0; i < circles.length; i++) {
var circle = createImageInCircle(circles[i].img, circles[i].size, circles[i].xPos, circles[i].yPos);
c.drawImage(circle, circles[i].size, circles[i].size);
animateCircle(circles[i]);
}
requestAnimationFrame(start);
}
function createImageInCircle(img, radius, x, y){
var canvas2 = document.createElement('canvas');
var c2 = canvas2.getContext('2d');
canvas2.width = canvas2.height = radius*2;
c2.fillStyle = 'white';
c2.beginPath();
c2.arc(x, y, radius, 0, Math.PI*2);
c2.fill();
c2.globalCompositeOperation = 'source-atop';
c2.drawImage(img, 0, 0, 100, 100);
return(canvas2);
}
function animateCircle(circle) {
// If the circle size/position is greater than the canvas width, bounce x
if ((circle.xPos + circle.xVel + circle.size > canvas.width) || (circle.xPos - circle.size + circle.xVel < 0)) {
console.log('Bounce X');
circle.xVel = -circle.xVel;
}
// If the circle size/position is greater than the canvas width, bounce y
if ((circle.yPos + circle.yVel + circle.size > canvas.height) || (circle.yPos + circle.yVel - circle.size < 0)) {
console.log('Bounce Y');
circle.yVel = -circle.yVel;
}
// Generates circle motion by adding position and velocity each frame
circle.xPos += circle.xVel;
circle.yPos += circle.yVel;
}
}
I'm not sure if I'm animating the correct thing. I've tried animating canvas2, but that didn't make sense to me.
PS - Sorry for the GitHub formatting, not sure why it looks like that.
PPS - Apologies for any junk code I didn't clean up. I've tried a lot of stuff and probably lost track of some of the changes.
PPPS - And forgive me for not making the answer self-contained. I thought linking to GitHub would be more useful, but I've updated the question to contain all the necessary info. Thanks for the feedback.
To get you started...
Here's how to clip an image into a circle using compositing.
The example code creates a single canvas logo-ball that you can reuse for each of your bouncing balls.
var logoball1=dreateImageInCircle(logoImg1,50);
var logoball2=dreateImageInCircle(logoImg2,50);
Then you can draw each logo-ball onto your main canvas like this:
ctx.drawImage(logoball1,35,40);
ctx.drawImage(logoball2,100,75);
There are many examples here on Stackoverflow of how to animate the balls around the canvas so I leave that part to you.
var canvas=document.getElementById("canvas");
var ctx=canvas.getContext("2d");
var cw=canvas.width;
var ch=canvas.height;
var img=new Image();
img.onload=start;
img.src="https://dl.dropboxusercontent.com/u/139992952/m%26m600x455.jpg";
function start(){
var copy=createImageInCircle(img,50);
ctx.drawImage(copy,20,75);
ctx.drawImage(copy,150,120);
ctx.drawImage(copy,280,75);
}
function createImageInCircle(img,radius){
var c=document.createElement('canvas');
var cctx=c.getContext('2d');
c.width=c.height=radius*2;
cctx.beginPath();
cctx.arc(radius,radius,radius,0,Math.PI*2);
cctx.fill();
cctx.globalCompositeOperation='source-atop';
cctx.drawImage(img,radius-img.width/2,radius-img.height/2);
return(c);
}
body{ background-color:white; }
#canvas{border:1px solid red; }
<canvas id="canvas" width=512 height=512></canvas>
Hi I want to make a blur effect particle like this:
Can I use shadowBlur and shadowOffsetX/shadowOffsetY to do this? The actual shine will glow and fade a little bit repeatedly, so if I have to write some kind of animation how can I achieve this?
I have tried this code (jsfiddle example) but it doesn't look like the effect. So I wonder how to blur and glow the particle at the same time?
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
const ra = window.requestAnimationFrame
|| window.webkitRequestAnimationFrame
|| window.mozRequestAnimationFrame
|| window.oRequestAnimationFrame
|| window.msRequestAnimationFrame
|| function(callback) {
window.setTimeout(callback, 1000 / 60);
};
class Particle {
constructor(options) {
this.ctx = options.context;
this.x = options.x;
this.y = options.y;
this.radius = options.radius;
this.lightSize = this.radius;
this.color = options.color;
this.lightDirection = true;
}
glow() {
const lightSpeed = 0.5;
this.lightSize += this.lightDirection ? lightSpeed : -lightSpeed;
if (this.lightSize > this.radius || this.lightSize < this.radius) {
this.lightDirection = !this.lightDirection;
}
}
render() {
this.ctx.clearRect(0, 0, canvas.width, canvas.height);
this.glow();
this.ctx.globalAlpha = 0.5;
this.ctx.fillStyle = this.color;
this.ctx.beginPath();
this.ctx.arc(this.x, this.y, this.lightSize,
0, Math.PI * 2
);
this.ctx.fill();
this.ctx.globalAlpha = 0.62;
this.ctx.beginPath();
this.ctx.arc(this.x, this.y, this.radius * 0.7, 0, Math.PI * 2);
this.ctx.shadowColor = this.color;
this.ctx.shadowBlur = 6;
this.ctx.shadowOffsetX = 0;
this.ctx.shadowOffsetY = 0;
this.ctx.fill();
}
}
var particle = new Particle({
context: ctx,
x: 60,
y: 80,
radius: 12,
color: '#4d88ff'
});
function run() {
particle.render();
ra(run);
}
run();
<canvas id='canvas'></canvas>
There are several ways to do this. For a particle system my option is to pre render the blur using a blur filter. A common filter is the convolution filter. It uses a small array to determine the amount neighboring pixels contribute to each pixel of the image. You are best to look up convolution functions to understand it.
Wiki Convolution and Wiki Gaussian blur for more info.
I am not much of a fan of the standard Gaussian blur or the convolution filter used so in the demo snippet below you can find my version that I think creates a much better blur. The convolution blur filter is procedurally created and is in the imageTools object.
To use create a filter pass an object with properties size the blur amount in pixels and power is the strength. Lower powers is less spread on the blur.
// image must be loaded or created
var blurFilter = imageTools.createBlurConvolutionArray({size:17,power:1}); // size must be greater than 2 and must be odd eg 3,5,7,9...
// apply the convolution filter on the image. The returned image may be a new
//image if the input image does not have a ctx property pointing to a 2d canvas context
image = imageTools.applyConvolutionFilter(image,blurFilter);
In the demo I create a image, draw a circle on it, copy it and pad it so that there is room for the blur. Then create a blur filter and apply it to the image.
When I render the particles I first draw all the unblurred images, then draw the blurred copies with the ctx.globalCompositeOperation = "screen"; so that they have a shine. To vary the amount of shine I use the ctx.globalAlpha to vary the intensity of the rendered blurred image. To improve the FX I have drawn the blur image twice, once with oscillating scale and next at fixed scale and alpha.
The demo is simple, image tools can be found at the top. Then there is some stuff to setup the canvas and handle resize event. Then there is the code that creates the images, and apply the filters. Then starts the render adds some particles and renders everything.
Look in the function drawParticles for how I draw everything.
imageTools has all the image functions you will need. The imageTools.applyConvolutionFilter will apply any filter (sharpen, outline, and many more) you just need to create the appropriate filter. The apply uses the photon count colour model so gives a very high quality result especially for blurs type effects. (though for sharpen you may want to get in and change the squaring of the RGB values, I personally like it other do not)
The blur filter is not fast so if you apply it to larger images It would be best that you break it up in so you do not block the page execution.
A cheap way to get a blur is to copy the image to blur to a smaller version of itself, eg 1/4 then render it scaled back to normal size, the canvas will apply bilinear filtering on the image give a blur effect. Not the best quality but for most situations it is indistinguishable from the more sophisticated blur that I have presented.
UPDATE
Change the code so that the particles have a bit of a 3dFX to show that the blur can work up to larger scales. The blue particles are 32 by 32 image and the blur is 9 pixels with the blur image being 50by 50 pixels.
var imageTools = (function () {
var tools = {
canvas : function (width, height) { // create a blank image (canvas)
var c = document.createElement("canvas");
c.width = width;
c.height = height;
return c;
},
createImage : function (width, height) {
var image = this.canvas(width, height);
image.ctx = image.getContext("2d");
return image;
},
image2Canvas : function (img) {
var image = this.canvas(img.width, img.height);
image.ctx = image.getContext("2d");
image.drawImage(img, 0, 0);
return image;
},
padImage : function(img,amount){
var image = this.canvas(img.width + amount * 2, img.height + amount * 2);
image.ctx = image.getContext("2d");
image.ctx.drawImage(img, amount, amount);
return image;
},
getImageData : function (image) {
return (image.ctx || (this.image2Canvas(image).ctx)).getImageData(0, 0, image.width, image.height);
},
putImageData : function (image, imgData){
(image.ctx || (this.image2Canvas(image).ctx)).putImageData(imgData,0, 0);
return image;
},
createBlurConvolutionArray : function(options){
var i, j, d; // misc vars
var filterArray = []; // the array to create
var size = options.size === undefined ? 3: options.size; // array size
var center = Math.floor(size / 2); // center of array
// the power ? needs descriptive UI options
var power = options.power === undefined ? 1: options.power;
// dist to corner
var maxDist = Math.sqrt(center * center + center * center);
var dist = 0; // distance sum
var sum = 0; // weight sum
var centerWeight; // center calculated weight
var totalDistance; // calculated total distance from center
// first pass get the total distance
for(i = 0; i < size; i++){
for(j = 0; j < size; j++){
d = (maxDist-Math.sqrt((center-i)*(center-i)+(center-j)*(center-j)));
d = Math.pow(d,power)
dist += d;
}
}
totalDistance = dist; // total distance to all points;
// second pass get the total weight of all but center
for(i = 0; i < size; i++){
for(j = 0; j < size; j++){
d = (maxDist-Math.sqrt((center-i)*(center-i)+(center-j)*(center-j)));
d = Math.pow(d,power)
d = d/totalDistance;
sum += d;
}
}
var scale = 1/sum;
sum = 0; // used to check
for(i = 0; i < size; i++){
for(j = 0; j < size; j++){
d = (maxDist-Math.sqrt((center-i)*(center-i)+(center-j)*(center-j)));
d = Math.pow(d,power)
d = d/totalDistance;
filterArray.push(d*scale);
}
}
return filterArray;
},
applyConvolutionFilter : function(image,filter){
imageData = this.getImageData(image);
imageDataResult = this.getImageData(image);
var w = imageData.width;
var h = imageData.height;
var data = imageData.data;
var data1 = imageDataResult.data;
var side = Math.round(Math.sqrt(filter.length));
var halfSide = Math.floor(side/2);
var r,g,b,a,c;
for(var y = 0; y < h; y++){
for(var x = 0; x < w; x++){
var ind = y*4*w+x*4;
r = 0;
g = 0;
b = 0;
a = 0;
for (var cy=0; cy<side; cy++) {
for (var cx=0; cx<side; cx++) {
var scy = y + cy - halfSide;
var scx = x + cx - halfSide;
if (scy >= 0 && scy < h && scx >= 0 && scx < w) {
var srcOff = (scy*w+scx)*4;
var wt = filter[cy*side+cx];
r += data[srcOff+0] * data[srcOff+0] * wt;
g += data[srcOff+1] * data[srcOff+1] * wt;
b += data[srcOff+2] * data[srcOff+2] * wt;
a += data[srcOff+3] * data[srcOff+3] * wt;
}
}
}
data1[ind+0] = Math.sqrt(Math.max(0,r));
data1[ind+1] = Math.sqrt(Math.max(0,g));
data1[ind+2] = Math.sqrt(Math.max(0,b));
data1[ind+3] = Math.sqrt(Math.max(0,a));
}
}
return this.putImageData(image,imageDataResult);
}
};
return tools;
})();
/** SimpleFullCanvasMouse.js begin **/
const CANVAS_ELEMENT_ID = "canv";
const U = undefined;
var w, h, cw, ch; // short cut vars
var canvas, ctx;
var globalTime = 0;
var createCanvas, resizeCanvas, setGlobals;
var L = typeof log === "function" ? log : function(d){ console.log(d); }
createCanvas = function () {
var c,cs;
cs = (c = document.createElement("canvas")).style;
c.id = CANVAS_ELEMENT_ID;
cs.position = "absolute";
cs.top = cs.left = "0px";
cs.zIndex = 1000;
document.body.appendChild(c);
return c;
}
resizeCanvas = function () {
if (canvas === U) { canvas = createCanvas(); }
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
ctx = canvas.getContext("2d");
if (typeof setGlobals === "function") { setGlobals(); }
}
setGlobals = function(){
cw = (w = canvas.width) / 2; ch = (h = canvas.height) / 2;
if(particles && particles.length > 0){
particles.length = 0;
}
}
resizeCanvas(); // create and size canvas
window.addEventListener("resize",resizeCanvas); // add resize event
const IMAGE_SIZE = 32;
const IMAGE_SIZE_HALF = 16;
const GRAV = 2001;
const NUM_PARTICLES = 90;
var background = imageTools.createImage(8,8);
var grad = ctx.createLinearGradient(0,0,0,8);
grad.addColorStop(0,"#000");
grad.addColorStop(1,"#048");
background.ctx.fillStyle = grad;
background.ctx.fillRect(0,0,8,8);
var circle = imageTools.createImage(IMAGE_SIZE,IMAGE_SIZE);
circle.ctx.fillStyle = "#5BF";
circle.ctx.arc(IMAGE_SIZE_HALF, IMAGE_SIZE_HALF, IMAGE_SIZE_HALF -2,0, Math.PI * 2);
circle.ctx.fill();
var blurFilter = imageTools.createBlurConvolutionArray({size:9,power:1}); // size must be greater than 2 and must be odd eg 3,5,7,9...
var blurCircle = imageTools.padImage(circle,9);
blurCircle = imageTools.applyConvolutionFilter(blurCircle,blurFilter)
var sun = imageTools.createImage(64,64);
grad = ctx.createRadialGradient(32,32,0,32,32,32);
grad.addColorStop(0,"#FF0");
grad.addColorStop(1,"#A40");
sun.ctx.fillStyle = grad;
sun.ctx.arc(32,32,32 -2,0, Math.PI * 2);
sun.ctx.fill();
var sunBlur = imageTools.padImage(sun,17);
blurFilter = imageTools.createBlurConvolutionArray({size:17,power:1}); // size must be greater than 2 and must be odd eg 3,5,7,9...
sunBlur = imageTools.applyConvolutionFilter(sunBlur,blurFilter);
var particles = [];
var createParticle = function(x,y,dx,dy){
var dir = Math.atan2(y-ch,x-cw);
var dist = Math.sqrt(Math.pow(y-ch,2)+Math.pow(x-cw,2));
var v = Math.sqrt(GRAV / dist); // get apporox orbital speed
return {
x : x,
y : y,
dx : dx + Math.cos(dir + Math.PI/2) * v, // set orbit speed at tangent
dy : dy + Math.sin(dir + Math.PI/2) * v,
s : (Math.random() + Math.random() + Math.random())/4 + 0.5, // scale
v : (Math.random() + Math.random() + Math.random()) / 3 + 2, // glow vary rate
};
}
var depthSort = function(a,b){
return b.y - a.y;
}
var updateParticles = function(){
var i,p,f,dist,dir;
for(i = 0; i < particles.length; i ++){
p = particles[i];
dist = Math.sqrt(Math.pow(cw-p.x,2)+Math.pow(ch-p.y,2));
dir = Math.atan2(ch-p.y,cw-p.x);
f = GRAV * 1 / (dist * dist);
p.dx += Math.cos(dir) * f;
p.dy += Math.sin(dir) * f;
p.x += p.dx;
p.y += p.dy;
p.rx = ((p.x - cw ) / (p.y + h)) * h + cw;
p.ry = ((p.y - ch ) / (p.y + h)) * h * -0.051+ ch;
//p.ry = ((h-p.y) - ch) * 0.1 + ch;
p.rs = (p.s / (p.y + h)) * h
}
particles.sort(depthSort)
}
var drawParticles = function(){
var i,j,p,f,dist,dir;
// draw behind the sun
for(i = 0; i < particles.length; i ++){
p = particles[i];
if(p.y - ch < 0){
break;
}
ctx.setTransform(p.rs,0,0,p.rs,p.rx,p.ry);
ctx.drawImage(circle,-IMAGE_SIZE_HALF,-IMAGE_SIZE_HALF);
}
// draw glow for behind the sun
ctx.globalCompositeOperation = "screen";
var iw = -blurCircle.width/2;
for(j = 0; j < i; j ++){
p = particles[j];
ctx.globalAlpha = ((Math.sin(globalTime / (50 * p.v)) + 1) / 2) * 0.6 + 0.4;
var scale = (1-(Math.sin(globalTime / (50 * p.v)) + 1) / 2) * 0.6 + 0.6;
ctx.setTransform(p.rs * 1.5 * scale,0,0,p.rs * 1.5* scale,p.rx,p.ry);
ctx.drawImage(blurCircle,iw,iw);
// second pass to intensify the glow
ctx.globalAlpha = 0.7;
ctx.setTransform(p.rs * 1.1,0,0,p.rs * 1.1,p.rx,p.ry);
ctx.drawImage(blurCircle,iw,iw);
}
// draw the sun
ctx.globalCompositeOperation = "source-over";
ctx.globalAlpha = 1;
ctx.setTransform(1,0,0,1,cw,ch);
ctx.drawImage(sun,-sun.width/2,-sun.height/2);
ctx.globalAlpha = 1;
ctx.globalCompositeOperation = "screen";
ctx.setTransform(1,0,0,1,cw,ch);
ctx.drawImage(sunBlur,-sunBlur.width/2,-sunBlur.height/2);
var scale = Math.sin(globalTime / 100) *0.5 + 1;
ctx.globalAlpha = (Math.cos(globalTime / 100) + 1) * 0.2 + 0.4;;
ctx.setTransform(1 + scale,0,0,1 + scale,cw,ch);
ctx.drawImage(sunBlur,-sunBlur.width/2,-sunBlur.height/2);
ctx.globalAlpha = 1;
ctx.globalCompositeOperation = "source-over";
// draw in front the sun
for(j = i; j < particles.length; j ++){
p = particles[j];
if(p.y > -h){ // don't draw past the near view plane
ctx.setTransform(p.rs,0,0,p.rs,p.rx,p.ry);
ctx.drawImage(circle,-IMAGE_SIZE_HALF,-IMAGE_SIZE_HALF);
}
}
ctx.globalCompositeOperation = "screen";
var iw = -blurCircle.width/2;
for(j = i; j < particles.length; j ++){
p = particles[j];
if(p.y > -h){ // don't draw past the near view plane
ctx.globalAlpha = ((Math.sin(globalTime / (50 * p.v)) + 1) / 2) * 0.6 + 0.4;
var scale = (1-(Math.sin(globalTime / (50 * p.v)) + 1) / 2) * 0.6 + 0.6;
ctx.setTransform(p.rs * 1.5 * scale,0,0,p.rs * 1.5* scale,p.rx,p.ry);
ctx.drawImage(blurCircle,iw,iw);
// second pass to intensify the glow
ctx.globalAlpha = 0.7;
ctx.setTransform(p.rs * 1.1,0,0,p.rs * 1.1,p.rx,p.ry);
ctx.drawImage(blurCircle,iw,iw);
}
}
ctx.globalCompositeOperation = "source-over";
}
var addParticles = function(count){
var ww = (h-10)* 2;
var cx = cw - ww/2;
var cy = ch - ww/2;
for(var i = 0; i < count; i ++){
particles.push(createParticle(cx + Math.random() * ww,cy + Math.random() * ww, Math.random() - 0.5, Math.random() - 0.5));
}
}
function display(){ // put code in here
if(particles.length === 0){
addParticles(NUM_PARTICLES);
}
ctx.setTransform(1,0,0,1,0,0); // reset transform
ctx.globalAlpha = 1; // reset alpha
ctx.drawImage(background,0,0,w,h)
updateParticles();
drawParticles();
ctx.globalAlpha = 1;
ctx.globalCompositeOperation = "source-over";
}
function update(timer){ // Main update loop
globalTime = timer;
display(); // call demo code
requestAnimationFrame(update);
}
requestAnimationFrame(update);
/** SimpleFullCanvasMouse.js end **/
I have a webpage in which users upload an image of some hand written work. Sometimes it's scanned pencil which can be very difficult to read.
Is it possible to possible to have a slider/button that I could use to darken or maybe even sharpen a particular image? I would need a slider/button per image as the page I view contains several user uploaded images.
Thanks.
Yes, there are two ways, one is css filters (see posit labs answer), the other one is with canvas, here is a nice tutorial for that, and here is my demo.
For the demo, you would have to use an image in your own domain (otherwise the canvas becomes tainted and you can't access the pixels), that's why you see the Data URI src in the image, is the only way to make the image origin from the fiddle.
HTML
<img id="myImage" src="mydomain/img.png">
<button class="filter-btn" data-filter="darken" data-img="#myImage">Darken</button>
<button class="filter-btn" data-filter="sharpen" data-img="#myImage">Sharpen</button>
If you copy and paste the JavaScript, the only thing you have to do is use this markup for it to work, the image can be configured however you want, the buttons are the important part.
Each button has a filter-btn class, to indicate that it's intended to apply a filter, then, you specify the filter via the data-filter attribute (in this case it can be sharpen or darken), and finally you link the button to the image via the data-img attribute, where you can specify any css selector to get to the image.
JavaScript
Remember, you don't have to touch any of these if you follow the HTML markup, but if you have any questions about the code, shoot!
ImageFilter = {}
ImageFilter.init = function () {
var buttons = document.querySelectorAll(".filter-btn");
for (var i = 0; i < buttons.length; i++) {
var btn = buttons[i];
var filter = btn.dataset.filter;
var img = btn.dataset.img;
img.crossOrigin = "Anonymous";
(function (filter, img) {
btn.addEventListener("click", function () {
ImageFilter.doFilter(filter, img);
});
})(filter, img);
}
}
window.addEventListener("load", ImageFilter.init);
ImageFilter.getImage = function (selector) {
return document.querySelector(selector);
}
ImageFilter.createData = function (canvas, w, h) {
var context = canvas.getContext("2d");
return context.createImageData(w, h);
}
ImageFilter.doFilter = function (type, image) {
var image = ImageFilter.getImage(image);
switch (type) {
case "darken":
var adjustment = -5;
var canvas = ImageFilter.newCanvas(image);
var data = ImageFilter.getData(canvas);
var actualData = data.data;
for (var i = 0; i < actualData.length; i++) {
actualData[i] += adjustment;
actualData[i + 1] += adjustment;
actualData[i + 2] += adjustment;
}
ImageFilter.putData(data, canvas);
var newImg = image.cloneNode(true);
newImg.src = ImageFilter.getSource(canvas);
newImg.id = image.id;
replaceNode(image, newImg);
break;
case "sharpen":
var weights = [0, -1, 0, -1, 5, -1,
0, -1, 0];
var canvas = ImageFilter.newCanvas(image);
var data = ImageFilter.getData(canvas);
var side = Math.round(Math.sqrt(weights.length));
var halfSide = Math.floor(side / 2);
var src = data.data;
var sw = data.width;
var sh = data.height;
var w = sw;
var h = sh;
var output = ImageFilter.createData(canvas, w, h);
var dst = output.data;
var alphaFac = 1;
for (var y = 0; y < h; y++) {
for (var x = 0; x < w; x++) {
var sy = y;
var sx = x;
var dstOff = (y * w + x) * 4;
var r = 0,
g = 0,
b = 0,
a = 0;
for (var cy = 0; cy < side; cy++) {
for (var cx = 0; cx < side; cx++) {
var scy = sy + cy - halfSide;
var scx = sx + cx - halfSide;
if (scy >= 0 && scy < sh && scx >= 0 && scx < sw) {
var srcOff = (scy * sw + scx) * 4;
var wt = weights[cy * side + cx];
r += src[srcOff] * wt;
g += src[srcOff + 1] * wt;
b += src[srcOff + 2] * wt;
a += src[srcOff + 3] * wt;
}
}
}
dst[dstOff] = r;
dst[dstOff + 1] = g;
dst[dstOff + 2] = b;
dst[dstOff + 3] = a + alphaFac * (255 - a);
}
}
ImageFilter.putData(output, canvas);
var newImg = image.cloneNode(true);
newImg.src = ImageFilter.getSource(canvas);
replaceNode(image, newImg);
break;
}
}
ImageFilter.newCanvas = function (image) {
var canvas = document.createElement("canvas");
canvas.width = image.width;
canvas.height = image.height;
var context = canvas.getContext("2d");
context.drawImage(image, 0, 0, image.width, image.height);
return canvas;
}
ImageFilter.getData = function (canvas) {
var context = canvas.getContext("2d");
return context.getImageData(0, 0, canvas.width, canvas.height);
}
ImageFilter.putData = function (data, canvas) {
var context = canvas.getContext("2d");
context.putImageData(data, 0, 0);
}
ImageFilter.getSource = function (canvas) {
return canvas.toDataURL();
}
function replaceNode(node1, node2) {
var parent = node1.parentNode;
var next = node1.nextSibling;
if (next) parent.insertBefore(node2, next);
else parent.appendChild(node2);
parent.removeChild(node1);
}
That's it, see the demo, hope it helps!
Updates
Firefox fix: creating a new image and replacing the old one each time seems to fix the firefox bug where it doesn't update the image's src. (29/01/15 2:07a.m)
Short answer: yes.
The easiest way to do this would be with CSS Filters, but they aren't supported on old browsers (support table). The example below applies a 200% contrast filter.
filter: contrast(2);
Another option would be to use HTML Canvas to draw the images and manually manipulate the pixels. It's not very fast, and it's much more complicated than CSS Filters. I won't go into depth, but here is an article about filtering images with canvas.
In my opinion, the users should be responsible for uploading quality images. It seems silly to correct their mistake by adding extra controls to your site.
I use html5 canvas elements to resize images im my browser. It turns out that the quality is very low. I found this: Disable Interpolation when Scaling a <canvas> but it does not help to increase the quality.
Below is my css and js code as well as the image scalled with Photoshop and scaled in the canvas API.
What do I have to do to get optimal quality when scaling an image in the browser?
Note: I want to scale down a large image to a small one, modify color in a canvas and send the result from the canvas to the server.
CSS:
canvas, img {
image-rendering: optimizeQuality;
image-rendering: -moz-crisp-edges;
image-rendering: -webkit-optimize-contrast;
image-rendering: optimize-contrast;
-ms-interpolation-mode: nearest-neighbor;
}
JS:
var $img = $('<img>');
var $originalCanvas = $('<canvas>');
$img.load(function() {
var originalContext = $originalCanvas[0].getContext('2d');
originalContext.imageSmoothingEnabled = false;
originalContext.webkitImageSmoothingEnabled = false;
originalContext.mozImageSmoothingEnabled = false;
originalContext.drawImage(this, 0, 0, 379, 500);
});
The image resized with photoshop:
The image resized on canvas:
Edit:
I tried to make downscaling in more than one steps as proposed in:
Resizing an image in an HTML5 canvas and
Html5 canvas drawImage: how to apply antialiasing
This is the function I have used:
function resizeCanvasImage(img, canvas, maxWidth, maxHeight) {
var imgWidth = img.width,
imgHeight = img.height;
var ratio = 1, ratio1 = 1, ratio2 = 1;
ratio1 = maxWidth / imgWidth;
ratio2 = maxHeight / imgHeight;
// Use the smallest ratio that the image best fit into the maxWidth x maxHeight box.
if (ratio1 < ratio2) {
ratio = ratio1;
}
else {
ratio = ratio2;
}
var canvasContext = canvas.getContext("2d");
var canvasCopy = document.createElement("canvas");
var copyContext = canvasCopy.getContext("2d");
var canvasCopy2 = document.createElement("canvas");
var copyContext2 = canvasCopy2.getContext("2d");
canvasCopy.width = imgWidth;
canvasCopy.height = imgHeight;
copyContext.drawImage(img, 0, 0);
// init
canvasCopy2.width = imgWidth;
canvasCopy2.height = imgHeight;
copyContext2.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvasCopy2.width, canvasCopy2.height);
var rounds = 2;
var roundRatio = ratio * rounds;
for (var i = 1; i <= rounds; i++) {
console.log("Step: "+i);
// tmp
canvasCopy.width = imgWidth * roundRatio / i;
canvasCopy.height = imgHeight * roundRatio / i;
copyContext.drawImage(canvasCopy2, 0, 0, canvasCopy2.width, canvasCopy2.height, 0, 0, canvasCopy.width, canvasCopy.height);
// copy back
canvasCopy2.width = imgWidth * roundRatio / i;
canvasCopy2.height = imgHeight * roundRatio / i;
copyContext2.drawImage(canvasCopy, 0, 0, canvasCopy.width, canvasCopy.height, 0, 0, canvasCopy2.width, canvasCopy2.height);
} // end for
// copy back to canvas
canvas.width = imgWidth * roundRatio / rounds;
canvas.height = imgHeight * roundRatio / rounds;
canvasContext.drawImage(canvasCopy2, 0, 0, canvasCopy2.width, canvasCopy2.height, 0, 0, canvas.width, canvas.height);
}
Here is the result if I use a 2 step down sizing:
Here is the result if I use a 3 step down sizing:
Here is the result if I use a 4 step down sizing:
Here is the result if I use a 20 step down sizing:
Note: It turns out that from 1 step to 2 steps there is a large improvement in image quality but the more steps you add to the process the more fuzzy the image becomes.
Is there a way to solve the problem that the image gets more fuzzy the more steps you add?
Edit 2013-10-04: I tried the algorithm of GameAlchemist. Here is the result compared to Photoshop.
PhotoShop Image:
GameAlchemist's Algorithm:
Since your problem is to downscale your image, there is no point in talking about interpolation -which is about creating pixel-. The issue here is downsampling.
To downsample an image, we need to turn each square of p * p pixels in the original image into a single pixel in the destination image.
For performances reasons Browsers do a very simple downsampling : to build the smaller image, they will just pick ONE pixel in the source and use its value for the destination. which 'forgets' some details and adds noise.
Yet there's an exception to that : since the 2X image downsampling is very simple to compute (average 4 pixels to make one) and is used for retina/HiDPI pixels, this case is handled properly -the Browser does make use of 4 pixels to make one-.
BUT... if you use several time a 2X downsampling, you'll face the issue that the successive rounding errors will add too much noise.
What's worse, you won't always resize by a power of two, and resizing to the nearest power + a last resizing is very noisy.
What you seek is a pixel-perfect downsampling, that is : a re-sampling of the image that will take all input pixels into account -whatever the scale-.
To do that we must compute, for each input pixel, its contribution to one, two, or four destination pixels depending wether the scaled projection of the input pixels is right inside a destination pixels, overlaps an X border, an Y border, or both.
( A scheme would be nice here, but i don't have one. )
Here's an example of canvas scale vs my pixel perfect scale on a 1/3 scale of a zombat.
Notice that the picture might get scaled in your Browser, and is .jpegized by S.O..
Yet we see that there's much less noise especially in the grass behind the wombat, and the branches on its right. The noise in the fur makes it more contrasted, but it looks like he's got white hairs -unlike source picture-.
Right image is less catchy but definitively nicer.
Here's the code to do the pixel perfect downscaling :
fiddle result :
http://jsfiddle.net/gamealchemist/r6aVp/embedded/result/
fiddle itself : http://jsfiddle.net/gamealchemist/r6aVp/
// scales the image by (float) scale < 1
// returns a canvas containing the scaled image.
function downScaleImage(img, scale) {
var imgCV = document.createElement('canvas');
imgCV.width = img.width;
imgCV.height = img.height;
var imgCtx = imgCV.getContext('2d');
imgCtx.drawImage(img, 0, 0);
return downScaleCanvas(imgCV, scale);
}
// scales the canvas by (float) scale < 1
// returns a new canvas containing the scaled image.
function downScaleCanvas(cv, scale) {
if (!(scale < 1) || !(scale > 0)) throw ('scale must be a positive number <1 ');
var sqScale = scale * scale; // square scale = area of source pixel within target
var sw = cv.width; // source image width
var sh = cv.height; // source image height
var tw = Math.floor(sw * scale); // target image width
var th = Math.floor(sh * scale); // target image height
var sx = 0, sy = 0, sIndex = 0; // source x,y, index within source array
var tx = 0, ty = 0, yIndex = 0, tIndex = 0; // target x,y, x,y index within target array
var tX = 0, tY = 0; // rounded tx, ty
var w = 0, nw = 0, wx = 0, nwx = 0, wy = 0, nwy = 0; // weight / next weight x / y
// weight is weight of current source point within target.
// next weight is weight of current source point within next target's point.
var crossX = false; // does scaled px cross its current px right border ?
var crossY = false; // does scaled px cross its current px bottom border ?
var sBuffer = cv.getContext('2d').
getImageData(0, 0, sw, sh).data; // source buffer 8 bit rgba
var tBuffer = new Float32Array(3 * tw * th); // target buffer Float32 rgb
var sR = 0, sG = 0, sB = 0; // source's current point r,g,b
/* untested !
var sA = 0; //source alpha */
for (sy = 0; sy < sh; sy++) {
ty = sy * scale; // y src position within target
tY = 0 | ty; // rounded : target pixel's y
yIndex = 3 * tY * tw; // line index within target array
crossY = (tY != (0 | ty + scale));
if (crossY) { // if pixel is crossing botton target pixel
wy = (tY + 1 - ty); // weight of point within target pixel
nwy = (ty + scale - tY - 1); // ... within y+1 target pixel
}
for (sx = 0; sx < sw; sx++, sIndex += 4) {
tx = sx * scale; // x src position within target
tX = 0 | tx; // rounded : target pixel's x
tIndex = yIndex + tX * 3; // target pixel index within target array
crossX = (tX != (0 | tx + scale));
if (crossX) { // if pixel is crossing target pixel's right
wx = (tX + 1 - tx); // weight of point within target pixel
nwx = (tx + scale - tX - 1); // ... within x+1 target pixel
}
sR = sBuffer[sIndex ]; // retrieving r,g,b for curr src px.
sG = sBuffer[sIndex + 1];
sB = sBuffer[sIndex + 2];
/* !! untested : handling alpha !!
sA = sBuffer[sIndex + 3];
if (!sA) continue;
if (sA != 0xFF) {
sR = (sR * sA) >> 8; // or use /256 instead ??
sG = (sG * sA) >> 8;
sB = (sB * sA) >> 8;
}
*/
if (!crossX && !crossY) { // pixel does not cross
// just add components weighted by squared scale.
tBuffer[tIndex ] += sR * sqScale;
tBuffer[tIndex + 1] += sG * sqScale;
tBuffer[tIndex + 2] += sB * sqScale;
} else if (crossX && !crossY) { // cross on X only
w = wx * scale;
// add weighted component for current px
tBuffer[tIndex ] += sR * w;
tBuffer[tIndex + 1] += sG * w;
tBuffer[tIndex + 2] += sB * w;
// add weighted component for next (tX+1) px
nw = nwx * scale
tBuffer[tIndex + 3] += sR * nw;
tBuffer[tIndex + 4] += sG * nw;
tBuffer[tIndex + 5] += sB * nw;
} else if (crossY && !crossX) { // cross on Y only
w = wy * scale;
// add weighted component for current px
tBuffer[tIndex ] += sR * w;
tBuffer[tIndex + 1] += sG * w;
tBuffer[tIndex + 2] += sB * w;
// add weighted component for next (tY+1) px
nw = nwy * scale
tBuffer[tIndex + 3 * tw ] += sR * nw;
tBuffer[tIndex + 3 * tw + 1] += sG * nw;
tBuffer[tIndex + 3 * tw + 2] += sB * nw;
} else { // crosses both x and y : four target points involved
// add weighted component for current px
w = wx * wy;
tBuffer[tIndex ] += sR * w;
tBuffer[tIndex + 1] += sG * w;
tBuffer[tIndex + 2] += sB * w;
// for tX + 1; tY px
nw = nwx * wy;
tBuffer[tIndex + 3] += sR * nw;
tBuffer[tIndex + 4] += sG * nw;
tBuffer[tIndex + 5] += sB * nw;
// for tX ; tY + 1 px
nw = wx * nwy;
tBuffer[tIndex + 3 * tw ] += sR * nw;
tBuffer[tIndex + 3 * tw + 1] += sG * nw;
tBuffer[tIndex + 3 * tw + 2] += sB * nw;
// for tX + 1 ; tY +1 px
nw = nwx * nwy;
tBuffer[tIndex + 3 * tw + 3] += sR * nw;
tBuffer[tIndex + 3 * tw + 4] += sG * nw;
tBuffer[tIndex + 3 * tw + 5] += sB * nw;
}
} // end for sx
} // end for sy
// create result canvas
var resCV = document.createElement('canvas');
resCV.width = tw;
resCV.height = th;
var resCtx = resCV.getContext('2d');
var imgRes = resCtx.getImageData(0, 0, tw, th);
var tByteBuffer = imgRes.data;
// convert float32 array into a UInt8Clamped Array
var pxIndex = 0; //
for (sIndex = 0, tIndex = 0; pxIndex < tw * th; sIndex += 3, tIndex += 4, pxIndex++) {
tByteBuffer[tIndex] = Math.ceil(tBuffer[sIndex]);
tByteBuffer[tIndex + 1] = Math.ceil(tBuffer[sIndex + 1]);
tByteBuffer[tIndex + 2] = Math.ceil(tBuffer[sIndex + 2]);
tByteBuffer[tIndex + 3] = 255;
}
// writing result to canvas.
resCtx.putImageData(imgRes, 0, 0);
return resCV;
}
It is quite memory greedy, since a float buffer is required to store the intermediate values of the destination image (-> if we count the result canvas, we use 6 times the source image's memory in this algorithm).
It is also quite expensive, since each source pixel is used whatever the destination size, and we have to pay for the getImageData / putImageDate, quite slow also.
But there's no way to be faster than process each source value in this case, and situation is not that bad : For my 740 * 556 image of a wombat, processing takes between 30 and 40 ms.
Fast canvas resample with good quality: http://jsfiddle.net/9g9Nv/442/
Update: version 2.0 (faster, web workers + transferable objects) - https://github.com/viliusle/Hermite-resize
/**
* Hermite resize - fast image resize/resample using Hermite filter. 1 cpu version!
*
* #param {HtmlElement} canvas
* #param {int} width
* #param {int} height
* #param {boolean} resize_canvas if true, canvas will be resized. Optional.
*/
function resample_single(canvas, width, height, resize_canvas) {
var width_source = canvas.width;
var height_source = canvas.height;
width = Math.round(width);
height = Math.round(height);
var ratio_w = width_source / width;
var ratio_h = height_source / height;
var ratio_w_half = Math.ceil(ratio_w / 2);
var ratio_h_half = Math.ceil(ratio_h / 2);
var ctx = canvas.getContext("2d");
var img = ctx.getImageData(0, 0, width_source, height_source);
var img2 = ctx.createImageData(width, height);
var data = img.data;
var data2 = img2.data;
for (var j = 0; j < height; j++) {
for (var i = 0; i < width; i++) {
var x2 = (i + j * width) * 4;
var weight = 0;
var weights = 0;
var weights_alpha = 0;
var gx_r = 0;
var gx_g = 0;
var gx_b = 0;
var gx_a = 0;
var center_y = (j + 0.5) * ratio_h;
var yy_start = Math.floor(j * ratio_h);
var yy_stop = Math.ceil((j + 1) * ratio_h);
for (var yy = yy_start; yy < yy_stop; yy++) {
var dy = Math.abs(center_y - (yy + 0.5)) / ratio_h_half;
var center_x = (i + 0.5) * ratio_w;
var w0 = dy * dy; //pre-calc part of w
var xx_start = Math.floor(i * ratio_w);
var xx_stop = Math.ceil((i + 1) * ratio_w);
for (var xx = xx_start; xx < xx_stop; xx++) {
var dx = Math.abs(center_x - (xx + 0.5)) / ratio_w_half;
var w = Math.sqrt(w0 + dx * dx);
if (w >= 1) {
//pixel too far
continue;
}
//hermite filter
weight = 2 * w * w * w - 3 * w * w + 1;
var pos_x = 4 * (xx + yy * width_source);
//alpha
gx_a += weight * data[pos_x + 3];
weights_alpha += weight;
//colors
if (data[pos_x + 3] < 255)
weight = weight * data[pos_x + 3] / 250;
gx_r += weight * data[pos_x];
gx_g += weight * data[pos_x + 1];
gx_b += weight * data[pos_x + 2];
weights += weight;
}
}
data2[x2] = gx_r / weights;
data2[x2 + 1] = gx_g / weights;
data2[x2 + 2] = gx_b / weights;
data2[x2 + 3] = gx_a / weights_alpha;
}
}
//clear and resize canvas
if (resize_canvas === true) {
canvas.width = width;
canvas.height = height;
} else {
ctx.clearRect(0, 0, width_source, height_source);
}
//draw
ctx.putImageData(img2, 0, 0);
}
Suggestion 1 - extend the process pipe-line
You can use step-down as I describe in the links you refer to but you appear to use them in a wrong way.
Step down is not needed to scale images to ratios above 1:2 (typically, but not limited to). It is where you need to do a drastic down-scaling you need to split it up in two (and rarely, more) steps depending on content of the image (in particular where high-frequencies such as thin lines occur).
Every time you down-sample an image you will loose details and information. You cannot expect the resulting image to be as clear as the original.
If you are then scaling down the images in many steps you will loose a lot of information in total and the result will be poor as you already noticed.
Try with just one extra step, or at tops two.
Convolutions
In case of Photoshop notice that it applies a convolution after the image has been re-sampled, such as sharpen. It's not just bi-cubic interpolation that takes place so in order to fully emulate Photoshop we need to also add the steps Photoshop is doing (with the default setup).
For this example I will use my original answer that you refer to in your post, but I have added a sharpen convolution to it to improve quality as a post process (see demo at bottom).
Here is code for adding sharpen filter (it's based on a generic convolution filter - I put the weight matrix for sharpen inside it as well as a mix factor to adjust the pronunciation of the effect):
Usage:
sharpen(context, width, height, mixFactor);
The mixFactor is a value between [0.0, 1.0] and allow you do downplay the sharpen effect - rule-of-thumb: the less size the less of the effect is needed.
Function (based on this snippet):
function sharpen(ctx, w, h, mix) {
var weights = [0, -1, 0, -1, 5, -1, 0, -1, 0],
katet = Math.round(Math.sqrt(weights.length)),
half = (katet * 0.5) |0,
dstData = ctx.createImageData(w, h),
dstBuff = dstData.data,
srcBuff = ctx.getImageData(0, 0, w, h).data,
y = h;
while(y--) {
x = w;
while(x--) {
var sy = y,
sx = x,
dstOff = (y * w + x) * 4,
r = 0, g = 0, b = 0, a = 0;
for (var cy = 0; cy < katet; cy++) {
for (var cx = 0; cx < katet; cx++) {
var scy = sy + cy - half;
var scx = sx + cx - half;
if (scy >= 0 && scy < h && scx >= 0 && scx < w) {
var srcOff = (scy * w + scx) * 4;
var wt = weights[cy * katet + cx];
r += srcBuff[srcOff] * wt;
g += srcBuff[srcOff + 1] * wt;
b += srcBuff[srcOff + 2] * wt;
a += srcBuff[srcOff + 3] * wt;
}
}
}
dstBuff[dstOff] = r * mix + srcBuff[dstOff] * (1 - mix);
dstBuff[dstOff + 1] = g * mix + srcBuff[dstOff + 1] * (1 - mix);
dstBuff[dstOff + 2] = b * mix + srcBuff[dstOff + 2] * (1 - mix)
dstBuff[dstOff + 3] = srcBuff[dstOff + 3];
}
}
ctx.putImageData(dstData, 0, 0);
}
The result of using this combination will be:
ONLINE DEMO HERE
Depending on how much of the sharpening you want to add to the blend you can get result from default "blurry" to very sharp:
Suggestion 2 - low level algorithm implementation
If you want to get the best result quality-wise you'll need to go low-level and consider to implement for example this brand new algorithm to do this.
See Interpolation-Dependent Image Downsampling (2011) from IEEE.
Here is a link to the paper in full (PDF).
There are no implementations of this algorithm in JavaScript AFAIK of at this time so you're in for a hand-full if you want to throw yourself at this task.
The essence is (excerpts from the paper):
Abstract
An interpolation oriented adaptive down-sampling algorithm is proposed
for low bit-rate image coding in this paper. Given an image, the
proposed algorithm is able to obtain a low resolution image, from
which a high quality image with the same resolution as the input
image can be interpolated. Different from the traditional
down-sampling algorithms, which are independent from the
interpolation process, the proposed down-sampling algorithm hinges the
down-sampling to the interpolation process. Consequently, the
proposed down-sampling algorithm is able to maintain the original
information of the input image to the largest extent. The down-sampled
image is then fed into JPEG. A total variation (TV) based post
processing is then applied to the decompressed low resolution image.
Ultimately, the processed image is interpolated to maintain the
original resolution of the input image. Experimental results verify
that utilizing the downsampled image by the proposed algorithm, an
interpolated image with much higher quality can be achieved. Besides,
the proposed algorithm is able to achieve superior performance than
JPEG for low bit rate image coding.
(see provided link for all details, formulas etc.)
If you wish to use canvas only, the best result will be with multiple downsteps. But that's not good enougth yet. For better quality you need pure js implementation. We just released pica - high speed downscaler with variable quality/speed. In short, it resizes 1280*1024px in ~0.1s, and 5000*3000px image in 1s, with highest quality (lanczos filter with 3 lobes). Pica has demo, where you can play with your images, quality levels, and even try it on mobile devices.
Pica does not have unsharp mask yet, but that will be added very soon. That's much more easy than implement high speed convolution filter for resize.
Why use the canvas to resize images? Modern browsers all use bicubic interpolation — the same process used by Photoshop (if you're doing it right) — and they do it faster than the canvas process. Just specify the image size you want (use only one dimension, height or width, to resize proportionally).
This is supported by most browsers, including later versions of IE. Earlier versions may require browser-specific CSS.
A simple function (using jQuery) to resize an image would be like this:
function resizeImage(img, percentage) {
var coeff = percentage/100,
width = $(img).width(),
height = $(img).height();
return {"width": width*coeff, "height": height*coeff}
}
Then just use the returned value to resize the image in one or both dimensions.
Obviously there are different refinements you could make, but this gets the job done.
Paste the following code into the console of this page and watch what happens to the gravatars:
function resizeImage(img, percentage) {
var coeff = percentage/100,
width = $(img).width(),
height = $(img).height();
return {"width": width*coeff, "height": height*coeff}
}
$('.user-gravatar32 img').each(function(){
var newDimensions = resizeImage( this, 150);
this.style.width = newDimensions.width + "px";
this.style.height = newDimensions.height + "px";
});
Not the right answer for people who really need to resize the image itself, but just to shrink the file size.
I had a problem with "directly from the camera" pictures, that my customers often uploaded in "uncompressed" JPEG.
Not so well known is, that the canvas supports (in most browsers 2017) to change the quality of JPEG
data=canvas.toDataURL('image/jpeg', .85) # [1..0] default 0.92
With this trick I could reduce 4k x 3k pics with >10Mb to 1 or 2Mb, sure it depends on your needs.
look here
I found a solution that doesn't need to access directly the pixel data and loop through it to perform the downsampling. Depending on the size of the image this can be very resource intensive, and it would be better to use the browser's internal algorithms.
The drawImage() function is using a linear-interpolation, nearest-neighbor resampling method. That works well when you are not resizing down more than half the original size.
If you loop to only resize max one half at a time, the results would be quite good, and much faster than accessing pixel data.
This function downsample to half at a time until reaching the desired size:
function resize_image( src, dst, type, quality ) {
var tmp = new Image(),
canvas, context, cW, cH;
type = type || 'image/jpeg';
quality = quality || 0.92;
cW = src.naturalWidth;
cH = src.naturalHeight;
tmp.src = src.src;
tmp.onload = function() {
canvas = document.createElement( 'canvas' );
cW /= 2;
cH /= 2;
if ( cW < src.width ) cW = src.width;
if ( cH < src.height ) cH = src.height;
canvas.width = cW;
canvas.height = cH;
context = canvas.getContext( '2d' );
context.drawImage( tmp, 0, 0, cW, cH );
dst.src = canvas.toDataURL( type, quality );
if ( cW <= src.width || cH <= src.height )
return;
tmp.src = dst.src;
}
}
// The images sent as parameters can be in the DOM or be image objects
resize_image( $( '#original' )[0], $( '#smaller' )[0] );
This is the improved Hermite resize filter that utilises 1 worker so that the window doesn't freeze.
https://github.com/calvintwr/blitz-hermite-resize
const blitz = Blitz.create()
/* Promise */
blitz({
source: DOM Image/DOM Canvas/jQuery/DataURL/File,
width: 400,
height: 600
}).then(output => {
// handle output
})catch(error => {
// handle error
})
/* Await */
let resized = await blitz({...})
/* Old school callback */
const blitz = Blitz.create('callback')
blitz({...}, function(output) {
// run your callback.
})
Here is a reusable Angular service for high quality image / canvas resizing: https://gist.github.com/fisch0920/37bac5e741eaec60e983
The service supports lanczos convolution and step-wise downscaling. The convolution approach is higher quality at the cost of being slower, whereas the step-wise downscaling approach produces reasonably antialiased results and is significantly faster.
Example usage:
angular.module('demo').controller('ExampleCtrl', function (imageService) {
// EXAMPLE USAGE
// NOTE: it's bad practice to access the DOM inside a controller,
// but this is just to show the example usage.
// resize by lanczos-sinc filter
imageService.resize($('#myimg')[0], 256, 256)
.then(function (resizedImage) {
// do something with resized image
})
// resize by stepping down image size in increments of 2x
imageService.resizeStep($('#myimg')[0], 256, 256)
.then(function (resizedImage) {
// do something with resized image
})
})
Maybe man you can try this, which is I always use in my project.In this way you can not only get high quality image ,but any other element on your canvas.
/*
* #parame canvas => canvas object
* #parame rate => the pixel quality
*/
function setCanvasSize(canvas, rate) {
const scaleRate = rate;
canvas.width = window.innerWidth * scaleRate;
canvas.height = window.innerHeight * scaleRate;
canvas.style.width = window.innerWidth + 'px';
canvas.style.height = window.innerHeight + 'px';
canvas.getContext('2d').scale(scaleRate, scaleRate);
}
instead of .85, if we add 1.0. You will get exact answer.
data=canvas.toDataURL('image/jpeg', 1.0);
You can get clear and bright image. Please check
I really try to avoid running through image data, especially on larger images. Thus I came up with a rather simple way to decently reduce image size without any restrictions or limitations using a few extra steps.
This routine goes down to the lowest possible half step before the desired target size. Then it scales it up to twice the target size and then half again. Sounds funny at first, but the results are astoundingly good and go there swiftly.
function resizeCanvas(canvas, newWidth, newHeight) {
let ctx = canvas.getContext('2d');
let buffer = document.createElement('canvas');
buffer.width = ctx.canvas.width;
buffer.height = ctx.canvas.height;
let ctxBuf = buffer.getContext('2d');
let scaleX = newWidth / ctx.canvas.width;
let scaleY = newHeight / ctx.canvas.height;
let scaler = Math.min(scaleX, scaleY);
//see if target scale is less than half...
if (scaler < 0.5) {
//while loop in case target scale is less than quarter...
while (scaler < 0.5) {
ctxBuf.canvas.width = ctxBuf.canvas.width * 0.5;
ctxBuf.canvas.height = ctxBuf.canvas.height * 0.5;
ctxBuf.scale(0.5, 0.5);
ctxBuf.drawImage(canvas, 0, 0);
ctxBuf.setTransform(1, 0, 0, 1, 0, 0);
ctx.canvas.width = ctxBuf.canvas.width;
ctx.canvas.height = ctxBuf.canvas.height;
ctx.drawImage(buffer, 0, 0);
scaleX = newWidth / ctxBuf.canvas.width;
scaleY = newHeight / ctxBuf.canvas.height;
scaler = Math.min(scaleX, scaleY);
}
//only if the scaler is now larger than half, double target scale trick...
if (scaler > 0.5) {
scaleX *= 2.0;
scaleY *= 2.0;
ctxBuf.canvas.width = ctxBuf.canvas.width * scaleX;
ctxBuf.canvas.height = ctxBuf.canvas.height * scaleY;
ctxBuf.scale(scaleX, scaleY);
ctxBuf.drawImage(canvas, 0, 0);
ctxBuf.setTransform(1, 0, 0, 1, 0, 0);
scaleX = 0.5;
scaleY = 0.5;
}
} else
ctxBuf.drawImage(canvas, 0, 0);
//wrapping things up...
ctx.canvas.width = newWidth;
ctx.canvas.height = newHeight;
ctx.scale(scaleX, scaleY);
ctx.drawImage(buffer, 0, 0);
ctx.setTransform(1, 0, 0, 1, 0, 0);
}
context.scale(xScale, yScale)
<canvas id="c"></canvas>
<hr/>
<img id="i" />
<script>
var i = document.getElementById('i');
i.onload = function(){
var width = this.naturalWidth,
height = this.naturalHeight,
canvas = document.getElementById('c'),
ctx = canvas.getContext('2d');
canvas.width = Math.floor(width / 2);
canvas.height = Math.floor(height / 2);
ctx.scale(0.5, 0.5);
ctx.drawImage(this, 0, 0);
ctx.rect(0,0,500,500);
ctx.stroke();
// restore original 1x1 scale
ctx.scale(2, 2);
ctx.rect(0,0,500,500);
ctx.stroke();
};
i.src = 'https://static.md/b70a511140758c63f07b618da5137b5d.png';
</script>
DEMO: Resizing images with JS and HTML Canvas Demo fiddler.
You may find 3 different methods to do this resize, that will help you understand how the code is working and why.
https://jsfiddle.net/1b68eLdr/93089/
Full code of both demo, and TypeScript method that you may want to use in your code, can be found in the GitHub project.
https://github.com/eyalc4/ts-image-resizer
This is the final code:
export class ImageTools {
base64ResizedImage: string = null;
constructor() {
}
ResizeImage(base64image: string, width: number = 1080, height: number = 1080) {
let img = new Image();
img.src = base64image;
img.onload = () => {
// Check if the image require resize at all
if(img.height <= height && img.width <= width) {
this.base64ResizedImage = base64image;
// TODO: Call method to do something with the resize image
}
else {
// Make sure the width and height preserve the original aspect ratio and adjust if needed
if(img.height > img.width) {
width = Math.floor(height * (img.width / img.height));
}
else {
height = Math.floor(width * (img.height / img.width));
}
let resizingCanvas: HTMLCanvasElement = document.createElement('canvas');
let resizingCanvasContext = resizingCanvas.getContext("2d");
// Start with original image size
resizingCanvas.width = img.width;
resizingCanvas.height = img.height;
// Draw the original image on the (temp) resizing canvas
resizingCanvasContext.drawImage(img, 0, 0, resizingCanvas.width, resizingCanvas.height);
let curImageDimensions = {
width: Math.floor(img.width),
height: Math.floor(img.height)
};
let halfImageDimensions = {
width: null,
height: null
};
// Quickly reduce the size by 50% each time in few iterations until the size is less then
// 2x time the target size - the motivation for it, is to reduce the aliasing that would have been
// created with direct reduction of very big image to small image
while (curImageDimensions.width * 0.5 > width) {
// Reduce the resizing canvas by half and refresh the image
halfImageDimensions.width = Math.floor(curImageDimensions.width * 0.5);
halfImageDimensions.height = Math.floor(curImageDimensions.height * 0.5);
resizingCanvasContext.drawImage(resizingCanvas, 0, 0, curImageDimensions.width, curImageDimensions.height,
0, 0, halfImageDimensions.width, halfImageDimensions.height);
curImageDimensions.width = halfImageDimensions.width;
curImageDimensions.height = halfImageDimensions.height;
}
// Now do final resize for the resizingCanvas to meet the dimension requirments
// directly to the output canvas, that will output the final image
let outputCanvas: HTMLCanvasElement = document.createElement('canvas');
let outputCanvasContext = outputCanvas.getContext("2d");
outputCanvas.width = width;
outputCanvas.height = height;
outputCanvasContext.drawImage(resizingCanvas, 0, 0, curImageDimensions.width, curImageDimensions.height,
0, 0, width, height);
// output the canvas pixels as an image. params: format, quality
this.base64ResizedImage = outputCanvas.toDataURL('image/jpeg', 0.85);
// TODO: Call method to do something with the resize image
}
};
}}