canvas shown on IE but not chrome and firefox - javascript

I want to use canvas to make an image to grayscale. There are a number of examples. But it has problem with my latest Chrome and Firefox. Surprisingly, IE9 is good. Is it the problem of my code?
And here is my code:
function draw() {
canvas = document.getElementById('canvas')
ctx = canvas.getContext('2d');
image = new Image();
image.src = 'ichiro.jpg';
ctx.drawImage(image, 0, 0);
imgd = ctx.getImageData(0, 0, 480, 400);
for (i=0; i<imgd.data.length; i+=4) {
grays = imgd.data[i]*.3 + imgd.data[i+1]*.6 + imgd.data[i+2]*.1;
imgd.data[i ] = grays; // red
imgd.data[i+1] = grays; // green
imgd.data[i+2] = grays; // blue
}
ctx.putImageData(imgd, 0, 0);
imggray = new Image();
imggray.src = canvas.toDataURL();
imggray.onload = function() {
ctx.drawImage(imggray, 0, 0);
}
}
I am new to HTML5 and javascript. So any help will be appreciated.

EDIT:
Sorry, I misread your question. It is almost certainly because of a security error. You are not allowed to use getImageData if you have drawn an image to the canvas that is from a different "origin" (a different domain or from your local file system). In Chrome locally you can get around it if you do:
C:\Users\root\AppData\Local\Google\Chrome\Application\chrome.exe --allow-file-access-from-files
There's something called the origin-clean flag and it is removed once you drawImage from a different origin. All files are of a different origin for (good) security reasons.
Original answer:
You need to wait for image to load:
working example: http://jsfiddle.net/SYLW2/1107/
...
// this now happens only after the image is loaded:
image.onload = function() {
ctx.drawImage(image, 0, 0);
imgd = ctx.getImageData(0, 0, 480, 400);
for (i=0; i<imgd.data.length; i+=4) {
grays = imgd.data[i]*.3 + imgd.data[i+1]*.6 + imgd.data[i+2]*.1;
imgd.data[i ] = grays; // red
imgd.data[i+1] = grays; // green
imgd.data[i+2] = grays; // blue
}
ctx.putImageData(imgd, 0, 0);
imggray = new Image();
imggray.src = canvas.toDataURL();
imggray.onload = function() {
ctx.drawImage(imggray, 0, 0);
}
}
image.src = 'http://placekitten.com/400/400';

Related

Take full screenshot in Chrome

I developed a little chrome extension, which takes a screenshot from the current page. The Problem is, when a part of the current area is covered by e.g. the inspector or not in view the screenshot gets cropped.
Is there a way to get the full screen?
Here my Code (stripped down):
function createScreenshot() {
chrome.tabs.query({active: true, lastFocusedWindow: true}, tabs => {
chrome.tabs.captureVisibleTab({ format: "png" }, function (screenshotUrl) {
cropImage(screenshotUrl, "Screenshot");
});
});
}
function cropImage(url, fileName) {
var img = new Image();
img.onload = function () {
let canvas = document.createElement("canvas");
let ctx = canvas.getContext("2d");
canvas.width = img.width;
canvas.height = img.height;
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
chrome.storage.sync.get(null, function(data) {
downloadImage(canvas, fileName + ".png");
});
};
img.src = url;
}
for example, this is the page i want to shot in a 4k res:
source
as you can see, it doesn't fit to the actual screen resolution but the result is only the active area (even smaller):
result
is there a way to get the "not visible" part?
Regrads,
Martin
There is minor correction in your code
you have to pass canvas width, height in drawImage
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
You will need to get the base64 string
let image = canvas.toDataURL("image/png");
Finally, you will get the result you want
Hope this solution will help you

Rendering only partial image using node-webGL, but complete rendering happens in browser

Currently I am using a webGL based browser implementation code at client end. It is working perfectly. But, I am using the same code at server end. Yes, this is not browser based, pure javascript code using node-webgl library. Slight modifications are done, for example replacing new Image() of browser to node-webGL supported format.
I am writing the resulting rendered image into an image file. But the image file shows partial rendering but of same canvas size. Yes with discussion #gman I came to know that browser WebGL and node-webgl are not 100% same implementation, so I am trying to find some alternative functions also to compensate these.
Here is the code;
Entry point:
var nodejs=true,
WebGL = require('./node_modules/node-webgl/index'),
document = WebGL.document();
requestAnimationFrame = document.requestAnimationFrame;
alert=console.error;
start1 = new Date().getTime();
//Read and eval library
fs=require('fs');
eval(fs.readFileSync(__dirname+ '/sylvester.js','utf8'));
eval(fs.readFileSync(__dirname+ '/glUtils.js','utf8'));
eval(fs.readFileSync(__dirname+ '/glAux.js','utf8'));
eval(fs.readFileSync(__dirname+ '/volumercserver.js','utf8'));
end1 = new Date().getTime();
console.log(end1-start1);
volumerc_main('/raycast-color.frag','./aorta-high1.jpg','./tfold.png');
Excerpt of volumercserver.js (Including complete code)
var Image = require("node-webgl").Image;
var XMLHttpRequest = require("xmlhttprequest").XMLHttpRequest;
var Canvas = require('canvas')
, canvas123 = new Canvas(512, 512)
, ctx123 = canvas123.getContext('2d');
var drawVolume;
var time, end, start;
var count=0;
/*
main function
*/
function volumerc_main(rayfrag, imgData, imgTF)
{
start = new Date().getTime();
var canvas = document.createElement("canvas");
canvas.id = 'canvas_win';
canvas.width= 512;
canvas.height= 512;
var gl;
try {
gl = canvas.getContext("webgl", {premultipliedAlpha: false}) || canvas.getContext("experimental-webgl",{premultipliedAlpha: false});
} catch (e) {
}
if (!gl) {
alert("Could not initialise WebGL, sorry :-(");
}
gl.shaderProgram_BackCoord = initShaders(gl,'/simple.vert','/simple.frag');
gl.shaderProgram_RayCast = initShaders(gl,'/raycast.vert',rayfrag, imgData, imgTF);
gl.fboBackCoord = initFBO(gl, canvas.width, canvas.height);
initTexture(gl, imgData, imgTF);
//console.log(gl);
var cube = cubeBuffer(gl);
drawVolume = function()
{
gl.clearColor(0.0, 0.0, 0.0, 0.0);
gl.enable(gl.DEPTH_TEST);
gl.bindFramebuffer(gl.FRAMEBUFFER, gl.fboBackCoord);
gl.shaderProgram = gl.shaderProgram_BackCoord;
gl.useProgram(gl.shaderProgram);
gl.clearDepth(-50.0);
gl.depthFunc(gl.GEQUAL);
drawCube(gl,cube);
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.shaderProgram = gl.shaderProgram_RayCast;
gl.useProgram(gl.shaderProgram);
gl.clearDepth(50.0);
gl.depthFunc(gl.LEQUAL);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, gl.fboBackCoord.tex);
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_2D, gl.vol_tex);
gl.activeTexture(gl.TEXTURE2);
gl.bindTexture(gl.TEXTURE_2D, gl.tf_tex);
gl.uniform1i(gl.getUniformLocation(gl.shaderProgram, "uBackCoord"), 0);
gl.uniform1i(gl.getUniformLocation(gl.shaderProgram, "uVolData"), 1);
gl.uniform1i(gl.getUniformLocation(gl.shaderProgram, "uTransferFunction"), 2);
//Set Texture
drawCube(gl,cube);
var pixels = new Uint8Array(canvas.width * canvas.height * 4);
gl.readPixels(0, 0, canvas.width, canvas.height, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
var imageData = ctx123.createImageData(canvas.width, canvas.height);
for (var i = pixels.length - 1; i >= 0; i--) {
{imageData.data[i] = pixels[i]};
};
ctx123.putImageData(imageData, 0, 0);
console.log(canvas123);
var fs = require('fs')
, out = fs.createWriteStream(__dirname + '/text.png')
, stream = canvas123.pngStream();
stream.on('data', function(chunk){
out.write(chunk);
});
stream.on('end', function(){
// console.log('saved png');
});
end = new Date().getTime();
}
setTimeout(drawVolume, 15);
}
The left partial image is rendered in node-webGL and right is rendered in browser. I do not understand why is there partial rendering in node-webgl. I do not know whether it is limitation of node-webGL or any error in code, how to debug this? (I have not included the complete code of volumercserver.js since it is big).
Is the partial rendering display because of origin being considered at lower left corner in node-webgl(openGL) and origin is considered at top left corner in browser WebGL. how to handle this. If needed I can share the complete code here.

Background Image Canvas [duplicate]

I am drawing on the canvas each time a user presses a button, however sometimes the image is not getting drawn on the canvas. I think this could be that the image isn't loaded in time before the context.drawimage function runs, as some of the smaller files sometimes get drawn. I've used the console and checked resources and so this is the only problem I can think of.
How do I avoid this problem?
This is my Javascript code.
var canvas = document.getElementById("myCanvas");
var context = canvas.getContext("2d");
var questionbg = new Image();
var answerbg = new Image();
//this code is inside a function that is called each time a user presses a button
if(questiontype == "text"){
questionbg.src = "./resources/textquestionbg.png";
context.drawImage(questionbg, 0, 0);
}
//if image question
else if(questiontype == "image"){
questionbg.src = "./resources/imageaudiovideoquestionbg.png";
context.drawImage(questionbg, 0, 0);
}
//if audio question
else if(questiontype == "audio"){
questionbg.src = "./resources/imageaudiovideoquestionbg.png";
context.drawImage(questionbg, 0, 0);
}
//else it is a video question
else{
questionbg.src = "./resources/imageaudiovideoquestionbg.png";
context.drawImage(questionbg, 0, 0);
}
You should check if the image is loaded. If not then listen to the load event.
questionbg.src = "./resources/imageaudiovideoquestionbg.png";
if (questionbg.complete) {
context.drawImage(questionbg, 0, 0);
} else {
questionbg.onload = function () {
context.drawImage(questionbg, 0, 0);
};
}
MDN (Mozilla Doc, great source btw) suggests:
function draw() {
var ctx = document.getElementById('canvas').getContext('2d');
var img = new Image();
img.onload = function(){
ctx.drawImage(img,0,0);
ctx.beginPath();
ctx.moveTo(30,96);
ctx.lineTo(70,66);
ctx.lineTo(103,76);
ctx.lineTo(170,15);
ctx.stroke();
};
img.src = '/files/4531/backdrop.png';
}
Obviously, you are not wanting to apply the stroke or fill. However, the idea is the same.

HTML5 Canvas Make Black Transparent

I have a large amount of images with a black background, here is one for example:
Is it possible through Javascript to have to ignore the black (#000000) and have it draw on canvas? to appear like this?
Basically trying to take the black pixels and make it an alpha channel.
So you'll need to run through all the pixels and change the alpha value of all the black pixels.
https://jsfiddle.net/0kuph15a/2/
This code creates a buffer (empty canvas) to draw the original image to. Once thats done, it takes all the pixels of this buffer canvas and then iterates over all the pixels and checks their values. I add up the Red, Blue, and Green values to see if they are less then 10 (just incase some pixels aren't pure black 0), but would visually appear black to the human eye. If it is less then 10 it simply turns the alpha value of that pixel to 0.
var canvas = document.getElementById('main');
var ctx = document.getElementById('main').getContext('2d');
var tile = new Image();
tile.src = document.getElementById('image').src;
tile.onload = function() {
draw(tile);
}
function draw(img) {
var buffer = document.createElement('canvas');
var bufferctx = buffer.getContext('2d');
bufferctx.drawImage(img, 0, 0);
var imageData = bufferctx.getImageData(0,0,buffer.width, buffer.height);
var data = imageData.data;
var removeBlack = function() {
for (var i = 0; i < data.length; i += 4) {
if(data[i]+ data[i + 1] + data[i + 2] < 10){
data[i + 3] = 0; // alpha
}
}
ctx.putImageData(imageData, 0, 0);
};
removeBlack();
}
You can easily change this line if(data[i]+ data[i + 1] + data[i + 2] < 10){ to if(data[i]+ data[i+1] + data[i+2]==0){ if you know for a fact only #000 blacks are used.
You can accomplish that using blend modes.
Change the context globalCompositeOperation to screen, and you can get that result. Here's an example:
var canvas = document.getElementById("canvas");
var context = canvas.getContext("2d");
var image = new Image();
image.src = "https://images.blogthings.com/thecolorfulpatterntest/pattern-4.png";
image.onload = function() {
context.drawImage(image, 0, 0, canvas.width, canvas.height);
var blackImage = new Image();
blackImage.src="http://www.printmag.com/wp-content/uploads/Sillitoe-black-white.gif";
blackImage.onload = function(){
context.globalCompositeOperation = "screen";
context.drawImage(blackImage, 0, 0, canvas.width, canvas.height);
}
};
<canvas id="canvas" width="300" height="250"></canvas>
<hr/>
<h1>Images used:</h1>
<img src="https://images.blogthings.com/thecolorfulpatterntest/pattern-4.png"/>
<img src="http://www.printmag.com/wp-content/uploads/Sillitoe-black-white.gif"/>
How about saving the picture as an .svg file...from there you can change all colors and other settings
Felipe's answer addressed my issue. Alpha pixel manipulation does not work
(eg, setting every 4th pixel to 0) for preserving alphatransparency with multiple images added into the same context at the same time.
eg:
this.ctx1.putImageData(output, 0, 0); // without setting the context's globalCompositeOperation, this image is written over by the next putImage operation
this.ctx1.putImageData(output, 20, 0);
Go here to review the blending options. https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/globalCompositeOperation

How do I set crossOrigin attribute when using canvas.toDataURL?

So I'm trying to create a print map function for an OpenLayers 3 application I'm building. I'm aware of their example but whenever I attempt to use it I run into the dreaded tainted canvas issue. I've read the whole internet and come across folks saying first to set CORS correctly (done and done) but also to do:
var img = new Image();
img.setAttribute('crossOrigin', 'anonymous');
img.src = url;
The above is described here.
My question is, I've never really used toDataURL() before and I'm not really sure how I make sure the image being created has the crossOrigin attribute correctly set before it slams into the:
Error: Failed to execute 'toDataURL' on 'HTMLCanvasElement': Tainted canvases may not be exported.
Any thoughts?
I have seen this. My question is how they incorporate that into a function that works. Something like:
var printMap = function(){
var img = new Image();
img.setAttribute('crossOrigin', 'anonymous');
img.src = url;
img.onload = function() {
var canvas = document.getElementsByTagName('canvas');
var dataURL = canvas.toDataURL("image/png");
console.log(dataURL);
};
};
If the crossOrigin property/attribute is supported by the browser (it is now in FF, Chrome, latest Safari and Edge ), but the server doesn't answer with the proper headers (Access-Control-Allow-Origin: *), then the img's onerror event fires.
So we can just handle this event and remove the attribute if we want to draw the image anyway.
For browsers that don't handle this attribute, the only way o test if the canvas is tainted is to call the toDataURL into a try catch block.
Here is an example :
var urls =
["http://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png",
"http://lorempixel.com/200/200"];
var tainted = false;
var img = new Image();
img.crossOrigin = 'anonymous';
var canvas = document.createElement('canvas');
var ctx = canvas.getContext('2d');
document.body.appendChild(canvas);
var load_handler = function() {
canvas.width = 200;
canvas.height = 200;
ctx.fillStyle = 'white';
ctx.font = '15px sans-serif';
ctx.drawImage(this, 0, 0, 200, 200*(this.height/this.width));
// for browsers supporting the crossOrigin attribute
if (tainted) {
ctx.strokeText('canvas tainted', 20, 100);
ctx.fillText('canvas tainted', 20, 100);
} else {
// for others
try {
canvas.toDataURL();
} catch (e) {
tainted = true;
ctx.strokeText('canvas tainted after try catch', 20, 100);
ctx.fillText('canvas tainted after try catch', 20, 100);
}
}
};
var error_handler = function() {
// remove this onerror listener to avoid an infinite loop
this.onerror = function() {
return false
};
// certainly that the canvas was tainted
tainted = true;
// we need to removeAttribute() since chrome doesn't like the property=undefined way...
this.removeAttribute('crossorigin');
this.src = this.src;
};
img.onload = load_handler;
img.onerror = error_handler;
img.src = urls[0];
btn.onclick = function() {
// reset the flag
tainted = false;
// we need to create a new canvas, or it will keep its marked as tainted flag
// try to comment the 3 next lines and switch multiple times the src to see what I mean
ctx = canvas.cloneNode(true).getContext('2d');
canvas.parentNode.replaceChild(ctx.canvas, canvas);
canvas = ctx.canvas;
// reset the attributes and error handler
img.crossOrigin = 'anonymous';
img.onerror = error_handler;
img.src = urls[+!urls.indexOf(img.src)];
};
<button id="btn"> change image src </button><br>
But since toDataURL can be a really heavy call for just a check and that code in try catch is deoptimized, a better alternative for older browsers is to create a 1px*1px tester canvas, draw the images on it first and call its toDataURL in the try-catch block :
var urls = ["http://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png", "http://lorempixel.com/200/200"];
var img = new Image();
img.crossOrigin = 'anonymous';
var canvas = document.createElement('canvas');
var ctx = canvas.getContext('2d');
document.body.appendChild(canvas);
//create a canvas only for testing if our images will taint our canvas or not;
var taintTester = document.createElement('canvas').getContext('2d');
taintTester.width = 1;
taintTester.height = 1;
var load_handler = function() {
// our image flag
var willTaint = false;
// first draw on the tester
taintTester.drawImage(this, 0, 0);
// since it's only one pixel wide, toDataURL is way faster
try {
taintTester.canvas.toDataURL();
} catch (e) {
// update our flag
willTaint = true;
}
// it will taint the canvas
if (willTaint) {
// reset our tester
taintTester = taintTester.canvas.cloneNode(1).getContext('2d');
// do something
ctx.fillStyle = 'rgba(0,0,0,.7)';
ctx.fillRect(0, 75, ctx.measureText('we won\'t diplay ' + this.src).width + 40, 60);
ctx.fillStyle = 'white';
ctx.font = '15px sans-serif';
ctx.fillText('we won\'t diplay ' + this.src, 20, 100);
ctx.fillText('canvas would have been tainted', 20, 120);
} else {
// all clear
canvas.width = this.width;
canvas.height = this.height;
ctx.fillStyle = 'white';
ctx.font = '15px sans-serif';
ctx.drawImage(this, 0, 0);
}
};
var error_handler = function() {
// remove this onerror listener to avoid an infinite loop
this.onerror = function() {
return false
};
// we need to removeAttribute() since chrome doesn't like the property=undefined way...
this.removeAttribute('crossorigin');
this.src = this.src;
};
img.onload = load_handler;
img.onerror = error_handler;
img.src = urls[0];
btn.onclick = function() {
// reset the attributes and error handler
img.crossOrigin = 'anonymous';
img.onerror = error_handler;
img.src = urls[+!urls.indexOf(img.src)];
};
<button id="btn">change image src</button>
Note
Cross-origin requests are not the only way to taint a canvas :
In IE < Edge, drawing an svg on the canvas will taint the canvas for security issues, in the same way, latest Safari does taint the canvas if a <foreignObject> is present in an svg drawn onto the canvas and last, any UA will taint the canvas if an other tainted canvas is painted to it.
So the only solution in those cases to check if the canvas is tainted is to try-catch, and the best is to do so on a 1px by 1px test canvas.
So Pointy and Kaiido both had valid ways of making this work but they both missed that this was an OpenLayers issue (and in the case of Pointy, not a duplicate question).
The answer was to do this:
source = new ol.source.TileWMS({
crossOrigin: 'anonymous'
});
Basically you had to tell the map AND the layers that you wanted crossOrigin: anonymous. Otherwise your canvas would still be tainted. The more you know!

Categories