I'm currently working on web app for photo editing using FabricJS and one of features I need to implement is something like Clipping masks from Photoshop.
For example I have this assets: frame, mask and image. I need to insert image inside frame and clip it with mask. Most tricky part is in requirements:
User should be able to modify image inside frame, e.g. move, rotate, skew... Frame itself also can be moved inside canvas.
Number of layers is not limited so user can add objects under or above masked image.
Masks, frames and images is not predefined, user should be able to upload and use new assets.
My current solution is this:
Load assets
Set globalCompositeOperation of image to source-out
Set clipTo function for image.
Add assets on canvas as a group
In this solution clipTo function preserve image inside rectangular area of frame and with help of globalCompositeOperation I'm clipping image to actual mask. At first sight it works fine but if I add new layer above this newly added group it will be cutted off because of globalCompositeOperation="source-out" rule. I've created JSFiddle to show this.
So, that else could I try? I've seen some posts on StackOverflow with advices to use SVGs for clipping mask, but if I understand it correctly SVG must contain only one path. This could be a problem because of third requirement of my app.
Any advice in right direction will help, because right now I'm totally stuck with this problem.
You can do this by using ClipPath property of Img Object which you want to mask. With this, you can Mask Any Type of Object. and also you need to add some Ctx Configuration in ClipTo function of Img Object.
check this link https://jsfiddle.net/naimsajjad/8w7hye2v/8/
(function() {
var img01URL = 'http://fabricjs.com/assets/printio.png';
var img02URL = 'http://fabricjs.com/lib/pug.jpg';
var img03URL = 'http://fabricjs.com/assets/ladybug.png';
var img03URL = 'http://fabricjs.com/assets/ladybug.png';
var canvas = new fabric.Canvas('c');
canvas.backgroundColor = "red";
canvas.setHeight(500);
canvas.setWidth(500);
canvas.setZoom(1)
var circle = new fabric.Circle({radius: 40, top: 50, left: 50, fixed: true, fill: '', stroke: '1' });
canvas.add(circle);
canvas.renderAll();
fabric.Image.fromURL(img01URL, function(oImg) {
oImg.scale(.25);
oImg.left = 10;
oImg.top = 10;
oImg.clipPath = circle;
oImg.clipTo = function(ctx) {
clipObject(this,ctx)
}
canvas.add(oImg);
canvas.renderAll();
});
var bili = new fabric.Path('M85.6,606.2c-13.2,54.5-3.9,95.7,23.3,130.7c27.2,35-3.1,55.2-25.7,66.1C60.7,814,52.2,821,50.6,836.5c-1.6,15.6,19.5,76.3,29.6,86.4c10.1,10.1,32.7,31.9,47.5,54.5c14.8,22.6,34.2,7.8,34.2,7.8c14,10.9,28,0,28,0c24.9,11.7,39.7-4.7,39.7-4.7c12.4-14.8-14-30.3-14-30.3c-16.3-28.8-28.8-5.4-33.5-11.7s-8.6-7-33.5-35.8c-24.9-28.8,39.7-19.5,62.2-24.9c22.6-5.4,65.4-34.2,65.4-34.2c0,34.2,11.7,28.8,28.8,46.7c17.1,17.9,24.9,29.6,47.5,38.9c22.6,9.3,33.5,7.8,53.7,21c20.2,13.2,62.2,10.9,62.2,10.9c18.7,6.2,36.6,0,36.6,0c45.1,0,26.5-15.6,10.1-36.6c-16.3-21-49-3.1-63.8-13.2c-14.8-10.1-51.4-25.7-70-36.6c-18.7-10.9,0-30.3,0-48.2c0-17.9,14-31.9,14-31.9h72.4c0,0,56-3.9,70.8,26.5c14.8,30.3,37.3,36.6,38.1,52.9c0.8,16.3-13.2,17.9-13.2,17.9c-31.1-8.6-31.9,41.2-31.9,41.2c38.1,50.6,112-21,112-21c85.6-7.8,79.4-133.8,79.4-133.8c17.1-12.4,44.4-45.1,62.2-74.7c17.9-29.6,68.5-52.1,113.6-30.3c45.1,21.8,52.9-14.8,52.9-14.8c15.6,2.3,20.2-17.9,20.2-17.9c20.2-22.6-15.6-28-16.3-84c-0.8-56-47.5-66.1-45.1-82.5c2.3-16.3,49.8-68.5,38.1-63.8c-10.2,4.1-53,25.3-63.7,30.7c-0.4-1.4-1.1-3.4-2.5-6.6c-6.2-14-74.7,30.3-74.7,30.3s-108.5,64.2-129.6,68.9c-21,4.7-18.7-9.3-44.3-7c-25.7,2.3-38.5,4.7-154.1-44.4c-115.6-49-326,29.8-326,29.8s-168.1-267.9-28-383.4C265.8,13,78.4-83.3,32.9,168.8C-12.6,420.9,98.9,551.7,85.6,606.2z',{top: 0, left: 180, fixed: true, fill: 'white', stroke: '', scaleX: 0.2, scaleY: 0.2 });
canvas.add(bili);
canvas.renderAll();
fabric.Image.fromURL(img02URL, function(oImg) {
oImg.scale(0.5);
oImg.left = 180;
oImg.top = 0;
oImg.clipPath = bili;
oImg.clipTo = function(ctx) {
clipObject(this,ctx)
}
canvas.add(oImg);
canvas.renderAll();
});
function clipObject(thisObj,ctx)
{
if (thisObj.clipPath) {
ctx.save();
if (thisObj.clipPath.fixed) {
var retina = thisObj.canvas.getRetinaScaling();
ctx.setTransform(retina, 0, 0, retina, 0, 0);
// to handle zoom
ctx.transform.apply(ctx, thisObj.canvas.viewportTransform);
thisObj.clipPath.transform(ctx);
}
thisObj.clipPath._render(ctx);
ctx.restore();
ctx.clip();
var x = -thisObj.width / 2, y = -thisObj.height / 2, elementToDraw;
if (thisObj.isMoving === false && thisObj.resizeFilter && thisObj._needsResize()) {
thisObj._lastScaleX = thisObj.scaleX;
thisObj._lastScaleY = thisObj.scaleY;
thisObj.applyResizeFilters();
}
elementToDraw = thisObj._element;
elementToDraw && ctx.drawImage(elementToDraw,
0, 0, thisObj.width, thisObj.height,
x, y, thisObj.width, thisObj.height);
thisObj._stroke(ctx);
thisObj._renderStroke(ctx);
}
}
})();
<script src="https://cdnjs.cloudflare.com/ajax/libs/fabric.js/3.6.3/fabric.min.js"></script>
<canvas id="c" width="400" height="400"></canvas>
Not sure what you want.
If you want the last image loaded (named img2), the one you send to the back to not effect the layers above do the following.
You have mask,frame,img, and img2;
Put them in the following order and with the following comp settings.
img2, source-over
img, source-over
mask, destination-out
frame, source-over
If you want something else you will have to explain it in more detail.
Personally when I provide masking to the client I give them full access to all the composite methods and allow them to work out what they need to do to achieve a desired effect. Providing a UI that allows you to change the comp setting, and layer order makes it a lot easier to sort out the sometimes confusing canvas composite rules.
I'd suggest looking at this solution.
Multiple clipping areas on Fabric.js canvas
You end up with a shape layer that is used to define the mask shape. That shape then gets applied as a clipTo to your image.
The one limitation I can think off though that you might run into is when you start to rotate various shapes. I know I have it working great with a rectangle and a circle, however ran into some issues with polygons from what I recall... This was all setup under and older version of FabricJS however, so there may have been some improvements there that I'm not experienced with.
The other issue I ran into was drop shadows didn't render correctly when passed to a NodeJS server running FabricJS.
I have been practicing using sprites for a game I am going to make and have watched and read a few tutorials, I thought I was close to getting my sprite to appear so I could finally start my game but while practicing I cant get it to work, I have dont 2 seperate tutorials where I can get the sprite and the background to appear by themselfs but cannot get them to work together, I have been using EaselJS too. some of the sprite animation code has been copied from tutorials too.
<!DOCTYPE HTML>
<html>
<head>
<meta charset="UTF-8">
<title>sprite prac<title>
<!-- EaselJS library -->
<script src="lib/easel.js"></script>
<script>
// Initialize on start up so game runs smoothly
function init() {
canvas = document.getElementById("canvas");
stage = new Stage(canvas);
bg = new Image();
bg.src = "img/grassbg.jpg";
bg.onload = setBG;
stage.addChild(background);
imgMonsterARun = new Image();
imgMonsterARun.onload = handleImageLoad;
imgMonsterARun.onerror = handleImageError;
imgMonsterARun.src = "img/MonsterARun.png";
stage.update();
}
function handleImageLoad(e) {
startGame();
}
// Simple function for setting up the background
function setBG(event){
var bgrnd = new Bitmap(bg);
stage.addChild(bgrnd);
stage.update();
}
function startGame() {
// create a new stage and point it at our canvas:
stage = new createjs.Stage(canvas);
// grab canvas width and height for later calculations:
screen_width = canvas.width;
screen_height = canvas.height;
// create spritesheet and assign the associated data.
var spriteSheet = new createjs.SpriteSheet({
// image to use
images: [imgMonsterARun],
// width, height & registration point of each sprite
frames: {width: 64, height: 64, regX: 32, regY: 32},
animations: {
walk: [0, 9, "walk"]
}
});
// create a BitmapAnimation instance to display and play back the sprite sheet:
bmpAnimation = new createjs.BitmapAnimation(spriteSheet);
// start playing the first sequence:
bmpAnimation.gotoAndPlay("walk"); //animate
// set up a shadow. Note that shadows are ridiculously expensive. You could display hundreds
// of animated rats if you disabled the shadow.
bmpAnimation.shadow = new createjs.Shadow("#454", 0, 5, 4);
bmpAnimation.name = "monster1";
bmpAnimation.direction = 90;
bmpAnimation.vX = 4;
bmpAnimation.x = 16;
bmpAnimation.y = 32;
// have each monster start at a specific frame
bmpAnimation.currentFrame = 0;
stage.addChild(bmpAnimation);
// we want to do some work before we update the canvas,
// otherwise we could use Ticker.addListener(stage);
createjs.Ticker.addListener(window);
createjs.Ticker.useRAF = true;
createjs.Ticker.setFPS(60);
}
//called if there is an error loading the image (usually due to a 404)
function handleImageError(e) {
console.log("Error Loading Image : " + e.target.src);
}
function tick() {
// Hit testing the screen width, otherwise our sprite would disappear
if (bmpAnimation.x >= screen_width - 16) {
// We've reached the right side of our screen
// We need to walk left now to go back to our initial position
bmpAnimation.direction = -90;
}
if (bmpAnimation.x < 16) {
// We've reached the left side of our screen
// We need to walk right now
bmpAnimation.direction = 90;
}
// Moving the sprite based on the direction & the speed
if (bmpAnimation.direction == 90) {
bmpAnimation.x += bmpAnimation.vX;
}
else {
bmpAnimation.x -= bmpAnimation.vX;
}
// update the stage:
stage.update();
}
</script>
</head>
<body onload="init();">
<canvas id="canvas" width="500" height="500" style="border: thin black solid;" ></canvas>
</body>
</html>
There are a few places where you are using some really old APIs, which may or may not be supported depending on your version of EaselJS. Where did you get the easel.js script you reference?
Assuming you have a version of EaselJS that matches the APIs you are using, there are a few issues:
You add background to the stage. There is no background, so you are probably getting an error when you add it. You already add bgrnd in the setBackground method, which should be fine. If you get an error here, then this could be your main issue.
You don't need to update the stage any time you add something, just when you want the stage to "refresh". In your code, you update after setting the background, and again immediately at the end of your init(). These will fire one after the other.
Are you getting errors in your console? That would be a good place to start debugging. I would also recommend posting code if you can to show an actual demo if you continue to have issues, which will help identify what is happening.
If you have a newer version of EaselJS:
BitmapAnimation is now Sprite, and doesn't support direction. To flip Sprites, use scaleX=-1
Ticker no longer uses addListener. Instead it uses the EventDispatcher. createjs.Ticker.addEventListener("tick", tickFunction);
You can get new versions of the CreateJS libraries at http://code.createjs.com, and you can get updated examples and code on the website and GitHub.
I have a page which allows you to browse in an image, then draw on it and save both the original and the annotated version. I am leveraging megapix-image.js and exif.js to help in rendering images from multiple mobile devices properly. It works great, except in certain orientations. For example, a vertical photo taken on an iPhone4s is considered orientation 6 by exif and gets flipped accordingly by megapix-image so it's rendered nicely on the canvas. For some reason, when I draw on it afterward, it seems like the drawing is reversed. Mouse and touch both behave the same way. The coordinates look right to me (meaning they match a working horizontal pic and a non-working vertical pic), as does the canvas height and width when megapix-image.js flips it. This leads me to believe it has something to do with the context, but honestly, I am not really sure. I have a JS fiddle of the part of my work that shows the behavior. Just browse in a vertically taken pic from a mobile device or take a pic in vertical format on a mobile device and use it. I think all will show this same behavior.
The final rendering is done like this:
function RenderImage(file2) {
if (typeof file2[0].files[0] != 'undefined') {
EXIF.getData(file2[0].files[0], function () {
orientation = EXIF.getTag(this, "Orientation");
var file = file2[0].files[0];
var mpImg = new MegaPixImage(file);
var resCanvas1 = document.getElementById('annoCanvas');
mpImg.render(resCanvas1, {
maxWidth: 700,
maxHeight: 700,
orientation: orientation
});
});
}
}
But the full jsfiddle is here:
http://jsfiddle.net/awebster28/Tq3qU/6/
Does anyone have any clues for me?
If you look at the lib you are using there is a transformCoordinate function that is used to set the right transform before drawing.
And they don't save/restore the canvas (boooo!!!) so it remains with this transform after-wise.
Solution for you is to do what the lib should do : save the context before the render and restore it after :
function RenderImage(file2) {
// ... same code ...
var mpImg = new MegaPixImage(file);
var eData = EXIF.pretty(this);
// Render resized image into canvas element.
var resCanvas1 = document.getElementById('annoCanvas');
var ctx = resCanvas1.getContext('2d');
ctx.save();
//setting the orientation flips it
mpImg.render(resCanvas1, {
maxWidth: 700,
maxHeight: 700,
orientation: orientation
});
ctx.restore();
//...
}
I ended up fixing this by adding another canvas to my html (named "annoCanvas2"). Then, I updated megapix-image.js to include this function, which draws the contents of the new canvas to a fresh one:
function drawTwin(sourceCanvas)
{
var id = sourceCanvas.id + "2";
var destCanvas = document.getElementById(id);
if (destCanvas !== null) {
var twinCtx = destCanvas.getContext("2d");
destCanvas.width = sourceCanvas.width;
destCanvas.height = sourceCanvas.height;
twinCtx.drawImage(sourceCanvas, 0, 0, sourceCanvas.width, sourceCanvas.height);
}
}
Then, just after the first is rotated and flipped and rendered, I rendered the resulting canvas to my "twin". Then I had a nice canvas, with my updated image that I could then draw on and also save!
var tagName = target.tagName.toLowerCase();
if (tagName === 'img') {
target.src = renderImageToDataURL(this.srcImage, opt, doSquash);
} else if (tagName === 'canvas') {
renderImageToCanvas(this.srcImage, target, opt, doSquash);
//------I added this-----------
drawTwin(target);
}
I was glad to have it fixed so I met my deadline, but I am still not sure why I had to do this. If anyone out there can explain it, I'd love to know why.
I'm trying to build a transform manager for KineticJS that would build a bounding box and allow users to scale, move, and rotate an image on their canvas. I'm getting tripped up with the logic for the anchor points.
http://jsfiddle.net/mharrisn/whK2M/
I just want to allow a user to scale their image proportionally from any corner, and also rotate as the hold-drag an anchor point.
Can anyone help point me in the right direction?
Thank you!
Here is a proof of concept of a rotational control I've made:
http://codepen.io/ArtemGr/pen/ociAD
While the control is dragged around, the dragBoundFunc is used to rotate the content alongside it:
controlGroup.setDragBoundFunc (function (pos) {
var groupPos = group.getPosition()
var rotation = degrees (angle (groupPos.x, groupPos.y, pos.x, pos.y))
status.setText ('x: ' + pos.x + '; y: ' + pos.y + '; rotation: ' + rotation); layer.draw()
group.setRotationDeg (rotation); layer.draw()
return pos
})
I am doing the same thing, and I've posted a question which is allmoast the same, but I found a link where you have the resize and move tool ready developed. So I have used the same. It does not contain the rotate tool however, but this can be a good start for you too, it is very simple and logical. Here is the link: http://www.html5canvastutorials.com/labs/html5-canvas-drag-and-drop-resize-and-invert-images/
I will come back with the rotation tool as well if I manage to get it working perfectly.
I hope I am not late yet for posting this code snippet that I made. I had the same problem with you guys dealing with this kind of task. Its been 3 days since I tried so many workarounds to mimic the fabricjs framework capability when dealing with images and objects. I could use Fabricjs though but it seems that Kineticjs is more faster/consistent to deal with html5.
Luckily, we already have existing plugin/tool that we could easily implement together with kineticjs and this is jQuery Transform tool. SUPER THANKS TO THE AUTHOR OF THIS! Just search this on google and download it.
I hope the code below that I created would help lots of developers out there who is pulling their hair off to solve this kind of assignment.
$(function() {
//Declare components STAGE, LAYER and TEXT
var _stage = null;
var _layer = null;
var simpleText = null;
_stage = new Kinetic.Stage({
container: 'canvas',
width: 640,
height: 480
});
_layer = new Kinetic.Layer();
simpleText = new Kinetic.Text({
x: 60,
y: 55,
text: 'Simple Text',
fontSize: 30,
fontFamily: 'Calbiri',
draggable: false,
name:'objectInCanvas',
id:'objectCanvas',
fill: 'green'
});
//ADD LAYER AND TEXT ON STAGE
_layer.add(simpleText);
_stage.add(_layer);
_stage.draw();
//Add onclick event listener to the Stage to remove and add transform tool to the object
_stage.on('click', function(evt) {
//Remove all objects' transform tool inside the stage
removeTransformToolSelection();
// get the shape that was clicked on
ishape = evt.targetNode;
//Add and show again the transform tool to the selected object and update the stage layer
$(ishape).transformTool('show');
ishape.getParent().moveToTop();
_layer.draw();
});
function removeTransformToolSelection(){
//Search all objects inside the stage or layer who has the name of "objectInCanvas" using jQuery iterator and hide the transform tool.
$.each(_stage.find('.objectInCanvas'), function( i, child ) {
$(child).transformTool('hide');
});
}
//Event listener/Callback when selecting image using file upload element
function handleFileSelect(evt) {
//Remove all objects' transform tool inside the stage
removeTransformToolSelection();
//Create image object for selected file
var imageObj = new Image();
imageObj.onload = function() {
var myImage = new Kinetic.Image({
x: 0,
y: 0,
image: imageObj,
name:'objectInCanvas',
draggable:false,
id:'id_'
});
//Add to layer and add transform tool
_layer.add(myImage);
$(myImage).transformTool();
_layer.draw();
}
//Adding source to Image object.
var f = document.getElementById('files').files[0];
var name = f.name;
var url = window.URL;
var src = url.createObjectURL(f);
imageObj.src = src;
}
//Attach event listener to FILE element
document.getElementById('files').addEventListener('change', handleFileSelect, false);
});
I am trying to generate a group of thumbnails in the browser out of a HTML5 video using canvas with this code:
var fps = video_model.getFps(); //frames per second, comes from another script
var start = shot.getStart(); //start time of capture, comes from another script
var end = shot.getEnd(); //end time of capture, comes from another script
for(var i = start; i <= end; i += 50){ //capture every 50 frames
video.get(0).currentTime = i / fps;
var capture = $(document.createElement("canvas"))
.attr({
id: video.get(0).currentTime + "sec",
width: video.get(0).videoWidth,
height: video.get(0).videoHeight
})
var ctx = capture.get(0).getContext("2d");
ctx.drawImage(video.get(0), 0, 0, video.get(0).videoWidth, video.get(0).videoHeight);
$("body").append(capture, " ");
}
The the amount of captures is correct, but the problem is that in Chrome all the canvases appear black and in Firefox they always show the same image.
Maybe the problem is that the loop is too fast to let the canvases be painted, but I read that .drawImage() is asynchronous, therefore, in theory, it should let the canvases be painted before jumping to the next line.
Any ideas on how to solve this issue?
Thanks.
After hours of fighting with this I finally came up with a solution based on the "seeked" event. For this to work, the video must be completely loaded:
The code goes like this:
var fps = video_model.getFps(); //screenshot data, comes from another script
var start = shot.getStart();
var end = shot.getEnd();
video.get(0).currentTime = start/fps; //make the video jump to the start
video.on("seeked", function(){ //when the time is seeked, capture screenshot
setTimeout( //the trick is in giving the canvas a little time to be created and painted, 500ms should be enough
function(){
if( video.get(0).currentTime <= end/fps ){
var capture = $(document.createElement("canvas")) //create canvas element on the fly
.attr({
id: video.get(0).currentTime + "sec",
width: video.get(0).videoWidth,
height: video.get(0).videoHeight
})
.appendTo("body");
var ctx = capture.get(0).getContext("2d"); //paint canvas
ctx.drawImage(video.get(0), 0, 0, video.get(0).videoWidth, video.get(0).videoHeight);
if(video.get(0).currentTime + 50/fps > end/fps){
video.off("seeked"); //if last screenshot was captured, unbind
}else{
video.get(0).currentTime += 50/fps; //capture every 50 frames
}
}
}
, 500); //timeout of 500ms
});
This has worked for me in Chrome and Firefox, I've read that the seeked event can be buggy in some version of particular browsers.
Hope this can be useful to anybody. If anyone comes up with a cleaner, better solution, it would be nice to see it.