Help! I am working on an application which downloads a report from an API which gives me HTML as part of the JSON return. Because it returns JSON I cannot simply open this in a new window.
In this script I am injecting this code:
let tmp = result.data;
let spot = tmp.indexOf('</body>');
let insert = `<div style="position:fixed; top:200px;left:20px; width:200px;font-size:15pt">
<div class="camera">
<video id="video" style='display:hidden'>Video stream not available.</video>
<button onClick='takepicture()'>Take photo</button>
</div>
<canvas id="canvas" style='display:none;'></canvas>
<!--<button onClick='genPDF()'>PDF</button>-->
Press CTRL+S to save page as HTML
<img id="testDownloadImage" src='' width=600 height=400 />
</div>
<script type='text/javascript'>
alert(document.body.innerHtml)
// function clearphoto() {
// var context = canvas.getContext('2d');
// context.fillStyle = "#AAA";
// context.fillRect(0, 0, canvas.width, canvas.height);
// var data = canvas.toDataURL('image/png');
// // photo.setAttribute('src', data);
// }
// Capture a photo by fetching the current contents of the video
// and drawing it into a canvas, then converting that to a PNG
// format data URL. By drawing it on an offscreen canvas and then
// drawing that to the screen, we can change its size and/or apply
// other changes before drawing it.
async function takepicture() {
let video = document.getElementById('video');
let canvas = document.getElementById('canvas');
let displayMediaOptions={video: true,displaySurface:'browser', logicalSurface:true, cursor:'never', audio: false};
try{
video.srcObject = await navigator.mediaDevices.getDisplayMedia(displayMediaOptions);
// video.play();
var context = canvas.getContext('2d');
width=window.innerWidth;
height=window.scrollHeight;
if (width && height) {
canvas.width = width;
canvas.height = height;
context.drawImage(video, 0, 0, width, height);
var data = canvas.toDataURL('image/jpg'); //.replace(/^data:image\/(png|jpg);base64,/, "");
// photo.setAttribute('src', data);
document.getElementById('testDownloadImage').src=data;
// location.href=data;
} else {
// clearphoto();
}
}
catch(err) {
console.log("An error occurred: " + err);
}
finally{
let tracks = video.srcObject.getTracks();
tracks.forEach(track => track.stop());
video.srcObject = null;
}
}
</script>
`
let newStr = [tmp.slice(0, spot), insert, tmp.slice(spot)].join('');
document.write(newStr);
to allow me to add a print screen option so that the report can be imaged. I have tried other utilities to save the HTML as a PDF or even saving the page but because the report uses a lot of XSLT and scripting to display the results these methods fail, leaving me with the print screen as the only option.
WebRTC handles this aptly, with one minor difficult: Because I am using document.write I am unable to find the document.body even though the code I am passed is a properly formed page ( etc).
How can I use the inserted script to display the page and capture it as an image?
Thanks
This is not a genuine answer sadly but I resolved the problem by moving the entire write to an iFrame with the script on the outside. I then got the scrollHeight of the iFrame document and resized the frame to match that, effectively making it appear as if the document occupied the entire page. This way I can place my script outside the iFrame and still use it to capture the entire page, which is what I was shooting for.
Thank you for any comments. As of now this is no longer needed.
Related
I have a working Flask/Python app that uses a Mask RCNN and some OpenCV python functions. I got this working on localhost fine but learnt that to use a user's webcam I need to use Javascript. I found some working .js code that retrieves the webcam and has a button to take a screen shot.
What is the easiest way to pass this image to Python for processing. I know tensorflow.js and opencv.js exist but as I don't know js I assume it'll be much easier to just pass it back to Python than learning another language to the same level as my Python knowledge.
I have read that saving to the web server with .js isn't possible without another intermediary language. I could separate the steps and use the .js to save the file to the user's computer and then ask them to upload it into the Python script but this adds another step for the user I would like to avoid.
JS code:
(function() {
// For javascript we must define if it is a constant or variable before giving it a name - akin to in SWIFT.
// The width and height of the captured photo. We will set the
// width to the value defined here, but the height will be
// calculated based on the aspect ratio of the input stream.
var width = 320; // We will scale the photo width to this
var height = 0; // This will be computed based on the input stream
// |streaming| indicates whether or not we're currently streaming
// video from the camera. Obviously, we start at false.
var streaming = false;
// The various HTML elements we need to configure or control. These
// will be set by the startup() function.
var video = null;
var canvas = null;
var photo = null;
var startbutton = null;
function startup() {
// These elementId's are passed to the javascript file from the html file. Therefore, we must ensure they match.
video = document.getElementById('video');
canvas = document.getElementById('canvas');
photo = document.getElementById('photo');
startbutton = document.getElementById('startbutton');
navigator.mediaDevices.getUserMedia({video: true, audio: false})
.then(function(stream) {
video.srcObject = stream;
video.play();
})
.catch(function(err) {
console.log("An error occurred: " + err);
});
video.addEventListener('canplay', function(ev){
if (!streaming) {
height = video.videoHeight / (video.videoWidth/width);
// Firefox currently has a bug where the height can't be read from
// the video, so we will make assumptions if this happens.
if (isNaN(height)) {
height = width / (4/3);
}
video.setAttribute('width', width);
video.setAttribute('height', height);
canvas.setAttribute('width', width);
canvas.setAttribute('height', height);
streaming = true;
}
}, false);
// When we click the button, we run the takepicture() function
startbutton.addEventListener('click', function(ev){
takepicture();
ev.preventDefault();
}, false);
// clearphoto() is a function as defined below that makes the photo grey until we populate it with a screenshot.
clearphoto();
}
// Fill the photo with an indication that none has been
// captured.
function clearphoto() {
var context = canvas.getContext('2d');
context.fillStyle = "#AAA";
context.fillRect(0, 0, canvas.width, canvas.height);
var data = canvas.toDataURL('image/png');
photo.setAttribute('src', data);
}
// Capture a photo by fetching the current contents of the video
// and drawing it into a canvas, then converting that to a PNG
// format data URL. By drawing it on an offscreen canvas and then
// drawing that to the screen, we can change its size and/or apply
// other changes before drawing it.
function takepicture() {
var context = canvas.getContext('2d');
const download = document.getElementById('download');
if (width && height) {
canvas.width = width;
canvas.height = height;
context.drawImage(video, 0, 0, width, height); // Draw the video onto the canvas.
var data = canvas.toDataURL('image/png');
photo.setAttribute('src', data); // elements - in this case photo - have attributes, an attribute of this photos element is the src.
// Here we are changing the source to be data - which encapsulates the canvas we have just drawn and edited.
} else {
clearphoto();
}
}
// Set up our event listener to run the startup process
// once loading is complete.
window.addEventListener('load', startup, false);
})();
HTML code:
<!doctype html>
<html>
<head>
<title>WebRTC: Still photo capture demo</title>
<meta charset='utf-8'>
<script src="Webcam.js">
</script>
</head>
<body>
<div class="contentarea">
<div class="camera">
<video id="video">Video stream not available.</video>
<button id="startbutton">Take photo</button>
</div>
<canvas id="canvas">
</canvas>
<div class="output">
<img id="photo" alt="The screen capture will appear in this box.">
</div>
</div>
</body>
</html>
Edit:
After furas' comments I tried to implement the scripts but the effects to my screenshot are being displayed in the server panel and not on top of the screnshot itself. When I include my RCNN processing (commented out) it performs it one the next incoming frame of the webcam and not just once on the screenshot as I'd like.
Flask:
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
from flask import Flask, render_template, request, make_response
import cv2 as cv
import numpy as np
from test_routes import create_model
from mrcnn import visualize
import time
app = Flask(__name__)
#app.route('/')
def index():
return render_template('furas.html', width=320, height=240)
def send_file_data(data, mimetype='image/jpeg', filename='output.jpg'):
# https://stackoverflow.com/questions/11017466/flask-to-return-image-stored-in-database/11017839
response = make_response(data)
response.headers.set('Content-Type', mimetype)
response.headers.set('Content-Disposition', 'attachment', filename=filename)
return response
#app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
fs = request.files.get('snap')
if fs:
#model = create_model()
img = cv.imdecode(np.frombuffer(fs.read(), np.uint8), cv.IMREAD_UNCHANGED)
# Detect objects
#r = model.detect([img], verbose=0)[0]
#masked_image = visualize.display_instances(img, r['rois'], r['masks'], r['class_ids'], ["BG", "culture"], r['scores'], show_bbox=True)
#masked_image = cv.putText(img=np.float32(masked_image), text=f'{r["masks"].shape[2]} colonies', org=(50, 50),
#fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=2, lineType=cv.LINE_AA, color=(256, 0, 0), thickness=2)
img = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
ret, buf = cv.imencode('.jpg', img)
return send_file_data(buf.tobytes())
return 'Hello World!'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=False, ssl_context='adhoc')
New HTML Code:
<!DOCTYPE html>
<html lang="en">
<table>
<tr>
<td>VIDEO</td>
<td>CANVAS</td>
<td>SERVER</td>
<td>SCREENSHOT</td>
</tr>
<tr>
<td>- assign camera (media device)<br>- play stream</td>
<td>- draw video on context 2d,<br>- convert to jpg file (Blob)<br>- upload to server as POST</td>
<td>- get jpg file (Blob) from server<br>- convert to BASE64 url<br>- assign to img</td>
<td>- get canvas as BASE64 url<br>- assign to img</td>
</tr>
<tr>
<td><video id="video" width="{{ width }}" height="{{ height }}" autoplay style="background-color: grey"></video></td>
<td><canvas id="canvas" width="{{ width }}" height="{{ height }}" style="background-color: grey"></canvas></td>
<td><img id="image" src="" width="{{ width }}" height="{{ height }}" style="background-color: grey" /></td>
<td><img id="image64" src="" width="{{ width }}" height="{{ height }}" style="background-color: grey" /></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td><button id="send">Take Photo</button></td>
</tr>
</table>
<script>
// Elements for taking the snapshot
var video = document.getElementById('video');
// Element to display snapshot
// you need canvas to get image - canvas can be hidden using `createElement("canvas")`
// var canvas = document.createElement("canvas");
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var image = document.getElementById('image');
var image64 = document.getElementById('image64');
console.log("before get camera");
console.log(navigator.mediaDevices && navigator.mediaDevices.getUserMedia);
// Get access to the camera!
if(navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// Not adding `{ audio: true }` since we only want video now
navigator.mediaDevices.getUserMedia({ video: true }).then(function(stream) {
//video.src = window.URL.createObjectURL(stream);
video.srcObject = stream;
video.play();
//console.log('setInterval')
window.setInterval(function() {
context.drawImage(video, 0, 0, {{ width }}, {{ height }}); // better use size because camera may gives data in different size then <video> is displaying
canvas.toBlob(upload, "image/jpeg");
}, 100);
});
}
// Trigger photo take on button click. We know which button because inside the HTML there is a button called 'send' also.
document.getElementById("send").addEventListener("click", function() {
// copy frame from video to canvas as context 2d
context.drawImage(video, 0, 0, {{ width }}, {{ height }}); // better use size because camera may gives data in different size then <video> is displaying
// convert to BASE64 url and assign to <img> to display it
image64.src = canvas.toDataURL();
});
function upload(file) {
// Create a form using FormData. This holds key value pairs.
var formdata = new FormData();
// Add a key value pair ("snap", file) to the FormData object. file is the original file passed sent to the upload function.
formdata.append("snap", file);
// create AJAX connection
// The fetch() method is used to request from the server and load the information in the webpages.
// The request can be of any APIs that returns the data of the format JSON or XML. This method returns a promise.
// The fetch() method returns a Promise and has the syntax of if true, do these sequential functions, else in the case of an error do Y.
fetch("{{ url_for('upload') }}", {
method: 'POST',
body: formdata,
}).then(function(response) {
return response.blob();
}).then(function(blob) {
image.src = URL.createObjectURL(blob);
}).catch(function(err) {
console.log('Fetch problem: ' + err.message);
});
}
</script>
</html>
Thanks.
<head>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs-core"></script>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs-converter"></script>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow-models/facemesh"></script>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs-backend-webgl"></script>
<script>
/* running facemesh code */
async function get_facemesh()
{
// load HTML canvas
var canvas = document.getElementById("facemesh");
var draw = canvas.getContext("2d");
// get video stream
const stream = document.getElementById("movie");
// load facemesh model
const model = await facemesh.load(maxFaces=1);
// process input stream frame by frame
while(1)
{
// fill canvas with black background
draw.fillStyle = "black";
draw.fillRect(0, 0, canvas.width, canvas.height);
// detect faces
const faces = await model.estimateFaces(stream);
if(faces.length != 0)
{
// loop through faces array to capture multiple faces
var mesh = faces[0].scaledMesh;
console.log(mesh);
/* highlight facial landmark points on canvas board */
draw.fillStyle = "#00FF00";
for(i=0; i< mesh.length; i++)
{
var [x, y, z] = mesh[i];
draw.fillRect(Math.round(x), Math.round(y), 2, 2);
}
}
else
{
console.log(`no faces detected..`);
}
// loop to process the next frame
await tf.nextFrame();
}
}
</script>
</head>
<body>
<video width=640 height=480 autoplay id="movie"> </video>
<canvas width=640 height=480 id="facemesh"> </canvas>
<br>
<script>
// capture live video stream from web camera
video = document.getElementById("movie");
if(navigator.mediaDevices.getUserMedia)
{
navigator.mediaDevices.getUserMedia({video: true})
.then(function (stream) {video.srcObject = stream; });
}
// run face-mesh model once the video is ready for processing
main();
function main()
{
// check if the video is loaded and ready for processing
if(video.readyState == 4)
{
console.log("video is ready for processing..");
get_facemesh();
}
else
{
console.log("nope, not loaded yet..");
setTimeout(main, 1000/30);
}
}
</script>
</body>
</html>
Here is a sample code. This code logs to the console all the facial landmarks, but what I am trying to do is get it to log the landmark only on the nose. How do I do this?
Here is the link to its github: https://github.com/tensorflow/tfjs-models/tree/master/facemesh. And here is the link to the facial landmarks: https://raw.githubusercontent.com/tensorflow/tfjs-models/master/facemesh/mesh_map.jpg. Here is what I tried: I tried setting a variable called nose equal to getUVCoords(19).
var nose = getUVCoords(19) Then I replaced console.log(mesh) with console.log(nose).
It did not work. It prevented the whole thing from even running tensorflow. I would appreciate any help I get on this.
So it seems you were overwriting your canvas after you drew the points. Here is code that works for me ovewriting your get_facemesh function:
async function get_facemesh()
{
// load HTML canvas
var canvas = document.getElementById("facemesh");
var draw = canvas.getContext("2d");
// get video stream
const stream = document.getElementById("movie");
// load facemesh model
const model = await facemesh.load(maxFaces=1);
// process input stream frame by frame
while(1)
{
// detect faces
const faces = await model.estimateFaces(stream);
if(faces.length > 0)
{
// Reset frame
draw.fillStyle = "black";
draw.fillRect(0, 0, canvas.width, canvas.height);
// loop through faces array to capture multiple faces
var mesh = faces[0].scaledMesh;
/* highlight facial landmark points on canvas board */
draw.fillStyle = "#00FF00";
for (var i = 0; i < mesh.length; i++) {
draw.fillRect(mesh[i][0], mesh[i][1], 2, 2);
}
}
else
{
console.log(`no faces detected..`);
}
// loop to process the next frame
await tf.nextFrame();
}
}
Please note you should almost never use while(1) instead look into requestAnimationFrame() which is the way to do animation loops. while(1) will for sure give you performance issues as it is an infinite loop and does not wait for browser to do stuff so just dont do that. I will leave it as an exercise for you to clean up your code.
I'm trying to get a base64 version of a canvas in HTML5.
Currently, the base64 image that I get from the canvas is blank.
I have found similar questions for example this one:
HTML Canvas image to Base64 problem
However, the issue that I have is that because I am adding 2 images in the canvas, I cannot use the example provided in those answers as most of them are using single image.
I understand that I have to wait until the canvas image is loaded properly before trying to get the base64 image. But I don't know how in my case.
This is my code:
var canvas = document.getElementById('myCanvas');
var context = canvas.getContext('2d');
context.canvas.width = 1000;
context.canvas.height = 1000;
var imageObj1 = new Image();
imageObj1.src = "http://www.offthegridnews.com/wp-content/uploads/2017/01/selfie-psuDOTedu.jpg";
imageObj1.onload = function() {
context.drawImage(imageObj1, 0, 180, canvas.width, canvas.height);
};
var imageObj2 = new Image();
imageObj2.src = "http://www.publicdomainpictures.net/pictures/150000/velka/banner-header-tapete-145002399028x.jpg"
imageObj2.onload = function() {
context.drawImage(imageObj2, 0, 0, canvas.width, 180);
};
// get png data url
//var pngUrl = canvas.toDataURL();
var pngUrl = canvas.toDataURL('image/png');
// get jpeg data url
var jpegUrl = canvas.toDataURL('image/jpeg');
$('#base').val(pngUrl);
<div class="contents" style="overflow-x:hidden; overflow-y:scroll;">
<div style="width:100%; height:90%;">
<canvas id="myCanvas" class="snap" style="width:100%; height:100%;" onclick="takephoto()"></canvas>
</div>
</div>
<p>
This is the base64 image
</p>
<textarea id="base">
</textarea>
and this is a working FIDDLE:
https://jsfiddle.net/3p3e6Ldu/1/
Can someone please advice on this issue?
Thanks in advance.
EDIT:
As suggested in the comments bellow, i tried to use a counter and when the counter reaches a specific number, I convert the canvas to base64.
Like so:https://jsfiddle.net/3p3e6Ldu/4/
In both your examples (the one from the question and the one from the comments), the order of commands does not really respect the async nature of the task at hand.
In your later example, you should put the if( count == 2 ) block inside the onload callbacks to make it work.
However, even then you will run into the next problem: You are loading the images from different domains. You can still draw them (either into the canvas or using an <img> tag), but you are not able to access their contents directly. Not even with the detour of using the <canvas> element.
I changed to code so it would work, if the images are hosted on the same domain. I also used a function to load the image and promises to handle the callbacks. The direct way of using callbacks and a counting variable, seem error-prone to me. If you check out the respective fiddle, you will notice the SecurityError shown. This is the result of the aforementioned problem with the Same-Origin-Policy I mentioned.
A previous question of mine in a similar direction was about how to detect, if I can still read the contents of a <canvas> after adding some images.
const canvas = document.getElementById('myCanvas');
const context = canvas.getContext('2d');
context.canvas.width = 1000;
context.canvas.height = 1000;
// function to retrieve an image
function loadImage(url) {
return new Promise((fulfill, reject) => {
let imageObj = new Image();
imageObj.onload = () => fulfill(imageObj);
imageObj.src = url;
});
}
// get images
Promise.all([
loadImage("http://www.offthegridnews.com/wp-content/uploads/2017/01/selfie-psuDOTedu.jpg"),
loadImage("http://www.publicdomainpictures.net/pictures/150000/velka/banner-header-tapete-145002399028x.jpg"),
])
.then((images) => {
// draw images to canvas
context.drawImage(images[0], 0, 180, canvas.width, canvas.height);
context.drawImage(images[1], 0, 0, canvas.width, 180);
// export to png/jpg
const pngUrl = canvas.toDataURL('image/png');
const jpegUrl = canvas.toDataURL('image/jpeg');
// show in textarea
$('#base').val(pngUrl);
})
.catch( (e) => alert(e) );
Okay, I know that there are loads of subjects that look identical to this one on SO, but none of them have fixed my issue...
I'm trying to grab an image from a file input and throw it onto a canvas so that I can later turn it into a base-64 image... But I've hit a snag in the process that I was not expecting, in drawing the image to the canvas...
Taking the following HTML:
<input type="file" id="fileIn" onchange="preview()"><br>
<img id="filePreview"><br>
<canvas id="fileCanvas"></canvas>
And the following script:
var dataurl = '';
function preview(){
document.getElementById('filePreview').src = URL.createObjectURL(document.getElementById('fileIn').files[0]);
document.getElementById('filePreview').onload = showInCanvas;
cnv = document.getElementById('fileCanvas');
ctx = cnv.getContext('2d');
}
function showInCanvas(){
cnv.style.width = document.getElementById('filePreview').naturalWidth + 'px';
cnv.width = document.getElementById('filePreview').naturalWidth;
cnv.style.height = document.getElementById('filePreview').naturalHeight + 'px';
cnv.height = document.getElementById('filePreview').naturalHeight + 'px';
ctx.clearRect(0, 0, cnv.width, cnv.height);
ctx.drawImage(document.getElementById('filePreview'), 0, 0);
dataurl = cnv.toDataURL('image/png');
}
The problem I'm having is that the image simply refuses to draw onto the canvas. Even going into the console and drawing the image to the canvas manually, after the script has run, it still refuses to draw. Because of this, the image data simply runs through as data:,
It's an https site, if that affects anything.
For clarification, here's my question:
Why is the canvas refusing to render this image? How can I fix this issue?
If the intent is to convert the image to Data-URI ("base64"), the FileReader can be used - no need for canvas (which can also alter the image/final compression):
fileIn.onchange = function(e) {
var fr = new FileReader();
fr.onload = done.bind(fr);
fr.readAsDataURL(e.target.files[0]);
}
function done() {
var dataURI = filePreview.src = this.result;
alert(dataURI.substr(0, 35) + "...")
}
<input type="file" id="fileIn"><br>
<img id="filePreview"><br>
<canvas id="fileCanvas"></canvas>
It would be incredibly useful to be able to temporarily convert a regular element into a canvas. For example, say I have a styled div that I want to flip. I want to dynamically create a canvas, "render" the HTMLElement into the canvas, hide the original element and animate the canvas.
Can it be done?
There is a library that try to do what you say.
See this examples and get the code
http://hertzen.com/experiments/jsfeedback/
http://html2canvas.hertzen.com/
Reads the DOM, from the html and render it to a canvas, fail on some, but in general works.
Take a look at this tutorial on MDN: https://developer.mozilla.org/en/HTML/Canvas/Drawing_DOM_objects_into_a_canvas (archived)
Its key trick was:
var canvas = document.getElementById('canvas');
var ctx = canvas.getContext('2d');
var data = '<svg xmlns="http://www.w3.org/2000/svg" width="200" height="200">' +
'<foreignObject width="100%" height="100%">' +
'<div xmlns="http://www.w3.org/1999/xhtml" style="font-size:40px">' +
'<em>I</em> like ' +
'<span style="color:white; text-shadow:0 0 2px blue;">' +
'cheese</span>' +
'</div>' +
'</foreignObject>' +
'</svg>';
var DOMURL = window.URL || window.webkitURL || window;
var img = new Image();
var svg = new Blob([data], {type: 'image/svg+xml;charset=utf-8'});
var url = DOMURL.createObjectURL(svg);
img.onload = function () {
ctx.drawImage(img, 0, 0);
DOMURL.revokeObjectURL(url);
}
img.src = url;
That is, it used a temporary SVG image to include the HTML content as a "foreign element", then renders said SVG image into a canvas element. There are significant restrictions on what you can include in an SVG image in this way, however. (See the "Security" section for details — basically it's a lot more limited than an iframe or AJAX due to privacy and cross-domain concerns.)
Sorry, the browser won't render HTML into a canvas.
It would be a potential security risk if you could, as HTML can include content (in particular images and iframes) from third-party sites. If canvas could turn HTML content into an image and then you read the image data, you could potentially extract privileged content from other sites.
To get a canvas from HTML, you'd have to basically write your own HTML renderer from scratch using drawImage and fillText, which is a potentially huge task. There's one such attempt here but it's a bit dodgy and a long way from complete. (It even attempts to parse the HTML/CSS from scratch, which I think is crazy! It'd be easier to start from a real DOM node with styles applied, and read the styling using getComputedStyle and relative positions of parts of it using offsetTop et al.)
You can use dom-to-image library (I'm the maintainer).
Here's how you could approach your problem:
var parent = document.getElementById('my-node-parent');
var node = document.getElementById('my-node');
var canvas = document.createElement('canvas');
canvas.width = node.scrollWidth;
canvas.height = node.scrollHeight;
domtoimage.toPng(node).then(function (pngDataUrl) {
var img = new Image();
img.onload = function () {
var context = canvas.getContext('2d');
context.translate(canvas.width, 0);
context.scale(-1, 1);
context.drawImage(img, 0, 0);
parent.removeChild(node);
parent.appendChild(canvas);
};
img.src = pngDataUrl;
});
And here is jsfiddle
Building on top of the Mozdev post that natevw references I've started a small project to render HTML to canvas in Firefox, Chrome & Safari. So for example you can simply do:
rasterizeHTML.drawHTML('<span class="color: green">This is HTML</span>'
+ '<img src="local_img.png"/>', canvas);
Source code and a more extensive example is here.
No such thing, sorry.
Though the spec states:
A future version of the 2D context API may provide a way to render fragments of documents, rendered using CSS, straight to the canvas.
Which may be as close as you'll get.
A lot of people want a ctx.drawArbitraryHTML/Element kind of deal but there's nothing built in like that.
The only exception is Mozilla's exclusive drawWindow, which draws a snapshot of the contents of a DOM window into the canvas. This feature is only available for code running with Chrome ("local only") privileges. It is not allowed in normal HTML pages. So you can use it for writing FireFox extensions like this one does but that's it.
You could spare yourself the transformations, you could use CSS3 Transitions to flip <div>'s and <ol>'s and any HTML tag you want. Here are some demos with source code explain to see and learn: http://www.webdesignerwall.com/trends/47-amazing-css3-animation-demos/
the next code can be used in 2 modes, mode 1 save the html code to a image, mode 2 save the html code to a canvas.
this code work with the library: https://github.com/tsayen/dom-to-image
*the "id_div" is the id of the element html that you want to transform.
**the "canvas_out" is the id of the div that will contain the canvas
so try this code.
:
function Guardardiv(id_div){
var mode = 2 // default 1 (save to image), mode 2 = save to canvas
console.log("Process start");
var node = document.getElementById(id_div);
// get the div that will contain the canvas
var canvas_out = document.getElementById('canvas_out');
var canvas = document.createElement('canvas');
canvas.width = node.scrollWidth;
canvas.height = node.scrollHeight;
domtoimage.toPng(node).then(function (pngDataUrl) {
var img = new Image();
img.onload = function () {
var context = canvas.getContext('2d');
context.drawImage(img, 0, 0);
};
if (mode == 1){ // save to image
downloadURI(pngDataUrl, "salida.png");
}else if (mode == 2){ // save to canvas
img.src = pngDataUrl;
canvas_out.appendChild(img);
}
console.log("Process finish");
});
}
so, if you want to save to image just add this function:
function downloadURI(uri, name) {
var link = document.createElement("a");
link.download = name;
link.href = uri;
document.body.appendChild(link);
link.click();
}
Example of use:
<html>
<head>
</script src="/dom-to-image.js"></script>
</head>
<body>
<div id="container">
All content that want to transform
</div>
<button onclick="Guardardiv('container');">Convert<button>
<!-- if use mode 2 -->
<div id="canvas_out"></div>
</html>
Comment if that work.
Comenten si les sirvio :)
The easiest solution to animate the DOM elements is using CSS transitions/animations but I think you already know that and you try to use canvas to do stuff CSS doesn't let you to do. What about CSS custom filters? you can transform your elements in any imaginable way if you know how to write shaders. Some other link and don't forget to check the CSS filter lab.
Note: As you can probably imagine browser support is bad.
function convert() {
dom = document.getElementById('divname');
var script,
$this = this,
options = this.options,
runH2c = function(){
try {
var canvas = window.html2canvas([ document.getElementById('divname') ], {
onrendered: function( canvas ) {
window.open(canvas.toDataURL());
}
});
} catch( e ) {
$this.h2cDone = true;
log("Error in html2canvas: " + e.message);
}
};
if ( window.html2canvas === undefined && script === undefined ) {
} else {.
// html2canvas already loaded, just run it then
runH2c();
}
}