I am trying to use fabricjs to render images and do some processing.
I can use this when it comes to normal rendering onto canvas , but I am failing in case of FabricJS:
This is canvas code without fabricjs :
Clip the image and position the clipped part on the canvas:
JavaScript syntax: context.drawImage(img,sx,sy,swidth,sheight,x,y,width,height);
How to achieve the same using FabricJS?
hiddenContext.putImageData(imageDataArr, 0, 0);
var hc = document.getElementById(hid);
//assigning last working canvas.
LWC = CWC;
CWC = fabricFH;
CWC.backgroundColor = "white";
CWC.add(new fabric.Image(hc,
{
alignX : "mid",
alignY : "mid",
selectable : false,
hasBorders : false,
hasControls : false,
hasRotatingPoint : false,
lockUniScaling: true,
centeredScaling: true,
scaleX: $("#"+activeCanvas).find("canvas").get(0).width/hc.width,
scaleY: $("#"+activeCanvas).find("canvas").get(0).height/hc.height,
}
));
Above code lets me maintain aspect ratio and render perfectly onto the canvas, but I am loosing quality of the image, because my canvas is 1000*800 and image is 2130*1800.
Can you help me in rendering the image with exact quality as per the actual image?
Try this:
var canvas = new fabric.Canvas('canvas');
var img = new Image();
img.src = 'http://i2.ooshirts.com/images/lab_shirts/Cobalt-1-F.jpg';
var imgInstance = new fabric.Image(img, {
left: 10,
top: 10,
angle: 0,
width:300,
height:240,
clipTo: function(ctx){
ctx.rect(-100,-100,200,200);
}
});
canvas.add(imgInstance);
canvas.renderAll();
If we wanna map it with:
context.drawImage(img,sx,sy,swidth,sheight,x,y,width,height); where
img=Specifies the image ,
sx= Optional. The x coordinate where to start clipping,
sy= Optional. The y coordinate where to start clipping,
swidth= Optional. The width of the clipped image,
sheight= ptional. The height of the clipped image,
x= The x coordinate where to place the image on the canvas,
y= The y coordinate where to place the image on the canvas,
width= Optional. The width of the image to use,
height= Optional. The height of the image to use then
fabric img <=> img
clipTo->ctx.rect <=> sx,sy,swidth,sheight,
left <=> x,
top <=> y,
width <=> width ,
height <=> height
Hope this is clear now :)
Related
I'm trying to capture an image in the browser using html2canvas. Capturing an image of the whole browser works. But I need to specify x,y start and end coordinates that I want to capture. In the docs I saw that html2canvas can accept x,y coordinates:
x: Default: Element x-offset Description: Crop canvas x-coordinate
y: Default: Element y-offset Description: Crop canvas y-coordinate
Passing my x,y coordinates to those parameters just captures the whole window.
So instead, I tried capturing the whole window, and then cropping an area from it using drawImage() (found at some other stackoverflow post, not sure which):
function snapImage(x1,y1,x2,y2, e){
html2canvas(document.body).then(function(canvas) {
// calc the size -- but no larger than the html2canvas size!
var width = Math.min(canvas.width,Math.abs(x2-x1));
var height = Math.min(canvas.height,Math.abs(y2-y1));
// create a new avatarCanvas with the specified size
var avatarCanvas = document.createElement('canvas');
avatarCanvas.width=width;
avatarCanvas.height=height;
avatarCanvas.id = 'avatarCanvas';
// put avatarCanvas into document body
document.body.appendChild(avatarCanvas);
// use the clipping version of drawImage to draw
// a clipped portion of html2canvas's canvas onto avatarCanvas
var avatarCtx = avatarCanvas.getContext('2d');
avatarCtx.drawImage(canvas,x1,y1,width,height,0,0,width,height);
});
}
This draws a shifted image with a wrong offset. For example, given the following website:
image taken from the example at: https://github.com/niklasvh/html2canvas/tree/master/examples
I mark "pluot?" area to snap it:
see the dotted rectangle
The dotted rectangle is drawn using js, given the mouse coordinates in 2 events: onmousedown and onmouseup. Because the rectangle is drawn correctly, I assume my coordinates are correct. But when I pass these coordinates to the function snapImage() above, I get the following captured image:
Looks like there's an offset. Maybe the start coordinates drawImage() operates on differ from my canvas start coordinates?
EDIT:
Turns out that my code works when I'm on 100% zoom. It doesn't though when I zoom in / out.
I guess this is because you get x and y from event with clientX and clientY. Use pageX and pageY instead. Have a look at this jsFiddle
let startX, startY;
document.getElementsByTagName('body')[0].addEventListener('mousedown', function(event) {
console.log("ok");
startX = Math.floor(event.pageX);
startY = Math.floor(event.pageY);
});
document.getElementsByTagName('body')[0].addEventListener('mouseup', function(event) {
snapImage(Math.min(event.pageX, startX), Math.min(event.pageY, startY), Math.max(event.pageX, startX), Math.max(event.pageY, startY));
});
function snapImage(x1,y1,x2,y2, e){
console.log(x1, x2, y1, y2);
html2canvas(document.body).then(function(canvas) {
// calc the size -- but no larger than the html2canvas size!
var width = Math.min(canvas.width,Math.abs(x2-x1));
var height = Math.min(canvas.height,Math.abs(y2-y1));
// create a new avatarCanvas with the specified size
var avatarCanvas = document.createElement('canvas');
avatarCanvas.width=width;
avatarCanvas.height=height;
avatarCanvas.id = 'avatarCanvas';
// put avatarCanvas into document body
document.body.appendChild(avatarCanvas);
// use the clipping version of drawImage to draw
// a clipped portion of html2canvas's canvas onto avatarCanvas
var avatarCtx = avatarCanvas.getContext('2d');
avatarCtx.drawImage(canvas,x1,y1,width,height,0,0,width,height);
});
}
Turns out there's a built-in chrome function captureVisibleTab that captures the image of the active tab. So I ended up using that instead of html2canvas. I got help from the Copyfish Chrome Extension. Github code here: Copyfish.
Here's my code:
Listener:
//listener in background.js which invokes the screen capture
chrome.tabs.captureVisibleTab(function (dataURL) {
sendResponse({
dataURL: dataURL,
});
});
Receiver:
//receiver in content.js which gets the captured image and crops it accordingly
function(response){
var img = new Image();
img.src = response.dataURL;
var dpf = window.innerWidth / img.width;
var scaleFactor = 1 / dpf,
sx = Math.min(x1, x2) * scaleFactor,
sy = Math.min(y1, y2) * scaleFactor,
width = Math.abs(x2 - x1),
height = Math.abs(y2 - y1);
// create a new avatarCanvas with the specified size
var avatarCanvas = document.createElement('canvas');
avatarCanvas.width = width;
avatarCanvas.height = height;
avatarCanvas.id = 'avatarCanvas';
// put avatarCanvas into document body
document.body.appendChild(avatarCanvas);
// use the clipping version of drawImage to draw
var avatarCtx = avatarCanvas.getContext('2d');
avatarCtx.drawImage(img, sx, sy, scaledWidth, scaledHeight, 0, 0, width, height);
}
x,y coordinates are taken by e.clientX and e.clientY respectively.
This method is zoom- and resolution- proof.
So I have a function that dynamically produces a cropped section of a world map. The map has various points plotted onto it, plotted by longitude and latitude, depending on the data passed into the function from elsewhere in the script. (Don't worry about how these values are calculated, just accept they are calculated where I have put [number] in my code). I've worked out how to crop my map dynamically, but what I'm noticing is that there is a lot of transparent whitespace to the right of the image after the crop, when the image is appended to a div on the page. How do I remove this whitespace?
Please note that each crop will be of a different size. Setting overflow:hidden property on the containing div and limiting the containing div to a precise pixel width will not achieve what I want to achieve.
Thx u
-- Gaweyne
createZoomedMapImage: function(imageURL){
var imageObj = new Image();
imageObj.onload = _.bind(function(){
var canvas = document.createElement("canvas"),
context = canvas.getContext("2d"),
$canvas = $(canvas),
w = imageObj.width,
h = imageObj.height;
canvas.width = w;
canvas.height = h;
var startingX = [number]
var starting Y = [number]
var deltaWidth = [number]
deltaHeight = [number]
context.drawImage(imageObj, startingX, startingY, deltaWidth, deltaHeight, 0, 0, (deltaWidth*2), (deltaHeight*2));
var zoomedImage = canvas.toDataURL('image/png');
}, this);
imageObj.src = imageURL;
}
jsfiddle.net/Gaweyne/r0t3hoo6
The image tag looks like what is displayed in the result. I have an image of, say, 300 x 600px. But the actual graphic only takes up 300 x 300 pixels. I don't want the graphic to take up the full width of the image. I want the image to be 300 x 300 pixels. I don't want to set this explicitly with CSS because the cropped maps will differ in size depending on the data.
Try to use:
canvas.width = deltaWidth;
canvas.height = deltaHeight;
context.drawImage(imageObj, startingX, startingY, deltaWidth, deltaHeight, 0, 0, (deltaWidth*2), (deltaHeight*2));
var zoomedImage = canvas.toDataURL('image/png');
I don't understand how swiffy succefully antialias clipping mask when it exports a flash animation that contains mask.
Here is my exemple :
Full canvas, with mask. Super dirty in Chrome : http://goo.gl/n8yB5h
And a swiffy export where there is a picture that move inside a mask. Super clean on Chrome :
http://www.creaktif.com/lab/test4.html
I tried a lot of things, including draw a 200% canvas then scale it down, adding more points when I draw my mask, but no way to get a clean mask in my canvas.
How does swiffy ?
It just turn me crazy.
Thanks. :)
Instead of using clip() you can draw the clipping path into an off-screen canvas and use that as a "matte", or rather, an alpha mask.
Now you can use the mask with various composite modes to get anti-aliased edges. Draw in the background you want to clip, set a composite mode (operator) and draw in mask. Depending on operator you can cut out center or the surroundings (destination-in is typical equivalent to using clip() though).
Note: demo below is only useful in Chrome/Opera as Firefox/IE already apply anti-aliasing to the clip mask - here's a snapshot showing the difference:
var ctxC = document.getElementById("clip").getContext("2d");
var ctxM = document.getElementById("mask").getContext("2d");
var w = ctxC.canvas.width, h = ctxC.canvas.height;
var img = new Image();
img.onload = demo;
img.src = "http://i.imgur.com/s9ksOb1.jpg";
function demo() {
// define a clip path
ctxC.save();
createPath(ctxC);
ctxC.clip();
ctxC.drawImage(this, 0, 0, w, h);
ctxC.restore();
// create a "matte" / alpha mask
var matte = document.createElement("canvas"),
ctx = matte.getContext("2d");
matte.width = w;
matte.height = h;
// fill the path instead:
createPath(ctx);
ctx.fill(); // color doesn't matter, alpha does
// now use composition to "clip"
ctxM.drawImage(this, 0, 0, w, h); // draw image
ctxM.globalCompositeOperation = "destination-in"; // will keep bg where mask cover
ctxM.drawImage(matte, 0, 0);
ctxM.globalCompositeOperation = "source-over"; // default mode
// zoom some details:
zoom(ctxC);
zoom(ctxM);
}
function createPath(ctx) {
var r = 88;
ctx.beginPath();
ctx.moveTo(100 + r, 90);
ctx.arc(100,90,r, 0, 6.28);
}
function zoom(ctx) {
ctx.imageSmoothingEnabled =
ctx.webkitImageSmoothingEnabled =
ctx.mozImageSmoothingEnabled = false;
ctx.drawImage(ctx.canvas, 10, 10, 100, 100, 70,0, 400, 400);
}
html, body {margin:4px 0 0 4px;overflow:hidden}
canvas{background:#000;border:1px solid #000;margin:0 1px 0 0}
<canvas id="clip" height=180></canvas>
<canvas id="mask" height=180></canvas>
In my application the user loads a photo from the library, displays it, and on a tap it gets the tapped pixel's color.
I am using a variation of this snippet (first answer) Get pixel color from canvas, on mouseover to get the pixels color.
This does work if I use non-retina sizes:
var canvas = document.getElementById("myCanvas");
canvas.width = 300;
canvas.height = 480;
canvas.style.width = "300px";
canvas.style.height = "480px";
canvas.drawImage(img, 0, 0);
camera API:
navigator.camera.getPicture(funcSuccess, funcError, { quality: 100, targetWidth: 300,
targetHeight: 480, destinationType: destinationType.FILE_URI, sourceType: source });
but as soon as I double the size for retina it will always returns a wrong color. (like it expects the image to be 600x960 instead of 300x480 displayed in a 600x960 canvas, ...I don't know)
var canvas = document.getElementById("myCanvas");
canvas.width = 600;
canvas.height = 960;
canvas.style.width = "300px";
canvas.style.height = "480px";
canvas.drawImage(img, 0, 0);
camera API:
navigator.camera.getPicture(funcSuccess, funcError, { quality: 100, targetWidth: 600,
targetHeight: 960, destinationType: destinationType.FILE_URI, sourceType: source });
I appreciate every response. Thank you.
These
canvas.width = 300;
canvas.height = 480;
is the actual size of the bitmap canvas uses. Everything you do to canvas is relative to these.
and these
canvas.style.width = "300px";
canvas.style.height = "480px";
are the display size in the browser of the canvas element (not the bitmap). If the size differs from the bitmap size the bitmap size is resized for display purpose only. The bitmap is still of the same size internally. Think of the canvas being equal to an image difference being canvas is editable, image is not.
This means all operations you do to canvas element has to be scaled to fit the bitmap size in case they are not the same, including mouse/touch positions, because the x and y position you get from the mouse event is relative to the element.
Ie. clicking at point [600, 600] on the element would also be [600, 600] on the bitmap, but since the bitmap is only half the size the point would be outside the bitmap.
Likewise, clicking at point [300, 300] on the canvas element would also be [300, 300] on the bitmap, but on the bitmap it would be at the right edge so when the pixel here is changed that pixel is scaled to display size which would end up at the edge too and not where the point was clicked initially.
Try to scale your click position to get the correct pixel like this:
Put these in global (and update in canvas resize if needed):
xScale = canvas.width / canvas.offsetWidth;
yScale = canvas.height / canvas.offsetHeight;
And in your mouse event handler:
function handleMouse(e) {
var r = canvas.getBoundingClientRect(),
x = (e.clientX - r.left) * xScale ;
y = (e.clientY - r.top) * yScale ;
... rest of code goes here ...
}
(it's about the same code for touch, just use the touch point collection instead).
i am creating a image slider with html5 and jquery what i want to do is add 3 images on top of each other in one canvas and then get pixeldata of first image and remove some of it's pixels to show 2nd image through first i'm using jCanvas
Plug-in To Do This In Jquery What I've Got So Far Is
$(document).ready(function(){
function invert() {
$("canvas").setPixels({
x: 150, y: 150,
width: 100, height: 75,
// loop through each pixel
each: function(px) {
px.r = 255 - px.r;
px.g = 255 - px.g;
px.b = 255 - px.b;
px.a = 255 - px.a;
}
});
}
$("canvas")
.addLayer({
method: "drawImage",
source: "images/01.jpg",
x: 100, y: 100,
width: 480, height: 440
}).addLayer({
method: "drawImage",
source: "images/02.jpg",
x: 100, y: 100,
width: 380, height: 340
}).addLayer({
method: "drawImage",
source: "images/01.jpg",
x: 100, y: 100,
width: 280, height: 240,
load: invert
})
// Draw each layer on the canvas
.drawLayers();
});
Now What it Does Is making A hole In all the Images Means Erase all the Pixels Of That Portion Of all Images and Show the Background of canvas Is It Possible to Get Only Pixels Of Particular image or layer and Invert It is there any jquery plug-in available? any other way to do that? Any Help On this Will Be Very Useful To Me....Thanx In Advance....
Keep in mind that drawing on a canvas is like painting on paper, it doesn't remember what you drew before only what you have in the canvas right now so if you draw one image and then draw over it with another, the old picture is lost forever.
What you should do is keep all three images in three different buffers (simply load the three different images into three different image objects).
Then draw the top most image in the context.
When you wish to dissolve the first image into the second, instead of deleting pixels from the top image (which will only show the the background), simply use the same coordinates you would use to remove pixels from the first image to get the pixel data from the second image (the coordinates for deleting pixel from the top image can be used as indexes to the image data for the second image) and copy those values to the canvas, again using the same coordinates, for example:
If you algorithm leads you to first remove pixel x = 100, y = 175, use those coordinates to get the data from the buffer of the second image and copy that to the same coordinates in the canvas' image data.
Here's some code:
var width = 300;
var height = 300;
var img1 = new Image();
img1.src = "image1.png";
var img2 = new Image();
img2.src = "image2.png";
function go()
{
// Wait for the images to load
if ( !img1.complete || !img2.complete )
{
setTimeout( go, 100 );
return;
}
// Create a temporary canvas to draw the other images in the background
var tc = document.createElement( "canvas" );
tc.width = width;
tc.height = height;
var c2 = tc.getContext( "2d" );
// Draw the first image in the real canvas (change the ID to your canvas ID)
var c = document.getElementById( "myCanvas" ).getContext( "2d" );
c.drawImage( img1, 0, 0 );
var data1 = c.getImageData( 0, 0, width, height ); // Get the data for the first image
// Draw the second image in the temporary canvas (which is hidden) and get its data
c2.drawImage( img2, 0, 0 );
var data2 = c2.getImageData( 0, 0, width, height );
// Copy the data from the hidden image to the visible one
// This is where your magic comes into play, the following
// is just a very very simple example
var pix1 = data1.data;
var pix2 = data2.data;
for ( var x = 0; x < width; x++ )
{
for ( var y = 0; y < height; y++ )
{
var pos = ( ( y * width ) + x ) * 4;
pix1[ pos ] = pix2[ pos++ ];
pix1[ pos ] = pix2[ pos++ ];
pix1[ pos ] = pix2[ pos++ ];
pix1[ pos ] = pix2[ pos ];
}
}
// Redraw the visible canvas with the new data
c.putImageData( data1, 0, 0 );
}
window.onload = go;
The canvas element does not provide the ability to use layers like that. You may need to check add-ons like canvas collage or CanvasStack