2D zoom to point in webgl - javascript

I'm trying to create a 2D graph visualization using WebGL (regl, to be more specific). With my current implementation I can already see the force layout being applied to each node, which is good. The problem comes when I try to zoom with respect to the current mouse position. According to my research, to achieve this behavior, it is necessary to apply matrix transformations in the following order:
translate(nodePosition, mousePosition)
scale(scaleFactor)
translate(nodePosition, -mousePosition)
So, every time the wheel event is fired, the mouse position is recalculated and the transform matrix is updated with the new mouse position information.
The current behavior is weird and I can't seem to understand what is wrong. Here is a live example.
Apparently, if I zoom in and out with the mouse fixed at the initial position, everything works just fine. However, if I move the mouse and try to focus on another node, then it fails.
The function for retrieving the mouse position is:
const getMousePosition = (event) => {
var canvas = event.currentTarget
var rect = canvas.getBoundingClientRect()
var x = event.clientX - rect.left
var y = event.clientY - rect.top
var projection = mat3.create()
var pos = vec2.fromValues(x,y)
// this converts the mouse coordinates from
// pixel space to WebGL clipspace
mat3.projection(projection, canvas.clientWidth, canvas.clientHeight)
vec2.transformMat3(pos, pos, projection)
return(pos)
}
The wheel event listener callback:
var zoomFactor = 1.0
var mouse = vec2.fromValues(0.0, 0.0)
options.canvas.addEventListener("wheel", (event) => {
event.preventDefault()
mouse = getMousePosition(event)
var direction = event.deltaY < 0 ? 1 : -1
zoomFactor = 1 + direction * 0.1
updateTransform()
})
And the function that updates the transform:
var transform = mat3.create()
function updateTransform() {
var negativeMouse = vec2.create()
vec2.negate(negativeMouse, mouse)
mat3.translate(transform, transform, mouse)
mat3.scale(transform, transform, [zoomFactor, zoomFactor])
mat3.translate(transform, transform, negativeMouse)
}
This transform matrix is made available as an uniform in the vertex shader:
precision highp float;
attribute vec2 position;
uniform mat3 transform;
uniform float stageWidth;
uniform float stageHeight;
vec2 normalizeCoords(vec2 position) {
float x = (position[0]+ (stageWidth / 2.0));
float y = (position[1]+ (stageHeight / 2.0));
return vec2(
2.0 * ((x / stageWidth ) - 0.5),
-(2.0 * ((y / stageHeight) - 0.5))
);
}
void main () {
gl_PointSize = 7.0;
vec3 final = transform * vec3(normalizeCoords(position), 1);
gl_Position = vec4(final.xy, 0, 1);
}
where, position is the attribute holding the node position.
What I've tried, so far:
I already tried changing the order of the transformations. The result is even weirder.
When I apply either translation or scaling independently, everything looks ok.
This is my first interaction with something that is not the usual SVG/canvas stuff. The solution is probably obvious, but I really don't know where to look anymore. What am I doing wrong?
Update 06/11/2018
I followed #Johan's suggestions and implemented it on the live demo. Although the explanation was rather convincing, the result is not quite what I was expecting. The idea of inverting the transform to get the mouse position in the model space makes sense to me, but my intuition (which is probably wrong) says that applying the transform directly on the screen space should also work. Why can't I project both the nodes and the mouse in the screen space and apply the transform directly there?
Update 07/11/2018
After struggling a little, I decided to take a different approach and adapt the solution from this answer for my use case. Although things are working as expected for the zoom (with the addition of panning as well), I still believe there are solutions that do not depend on d3-zoom at all. Maybe isolating the view matrix and controlling it independently to achieve the expected behavior, as suggested in the comments. To see my current solution, check my answer bellow.

Alright, after failing with the original approach, I managed to make this solution work for my use case.
The updateTransform function is now:
var transform = mat3.create();
function updateTransform(x, y, scale) {
mat3.projection(transform, options.canvas.width, options.canvas.height);
mat3.translate(transform, transform, [x,y]);
mat3.scale(transform, transform, [scale,scale]);
mat3.translate(transform, transform, [
options.canvas.width / 2,
options.canvas.height / 2
]);
mat3.scale(transform, transform, [
options.canvas.width / 2,
options.canvas.height / 2
]);
mat3.scale(transform, transform, [1, -1]);
}
And is called by d3-zoom:
import { zoom as d3Zoom } from "d3-zoom";
import { select } from "d3-selection";
var zoom = d3Zoom();
d3Event = () => require("d3-selection").event;
select(options.canvas)
.call(zoom.on("zoom", () => {
var t = d3Event().transform
updateTransform(t.x, t.y, t.k)
}));
Here is the live demonstration with this solution.

Your transformation maps the model to the target view-port. If you want to correct for a translation due to scaling (say delta), which is a distance in target coordinates, you need to transform this delta in model coordinates. That is, determine the inverse of your transformation and calculate with that the correction in model coordinates
Simple example preparing the transformation for scaling around a center in view-port coordinates is given below:
function map (a, p) {
return [a[0] * p[0] + a[3] * p[1] + a[6],a[1] * p[0] + a[4] * p[1] + a[7]];
}
function scale(transform,scale,viewCenter1) {
var inverted = mat3.create();
mat3.invert(inverted,transform);
var modelCenter1 = map(inverted,viewCenter1); // scale from this point in model
mat3.scale(transform,transform,[scale,scale]);
var viewCenter2 = map(transform,modelCenter1); // map model center to screen
var viewShift = [viewCenter1[0]-viewCenter2[0],viewCenter1[1]-viewCenter2[1]];
mat3.invert(inverted,transform);
var modelShift = map(inverted,viewShift) - map(inverted,[0,0]);
mat3.translate(transform,[-modelShift[0],-modelShift[1]]); // correct for the shift
}
// pass the transformation to webgl environment

Related

Why does fabric resize my object when I move it?

I am implementing snap to canvas edges on the object's scaling event.
In order to snap it to the left edge of the canvas, I have to set the left position to be equal to the canvas' left. I would also have to add to the width the amount I moved the object by.
The odd thing is, just from setting the new left position, sometimes my object shrinks/grows.
Is it because fabric is doing some kind of scaling under the hood? If I add the offset to my width to keep the object at the correct size, the odd behavior becomes more apparent.
threshold = 5;
_fabric.on('object:scaling', handleScaled);
function handleScaled(event) {
const shape = event.target && event.target.shape;
const shapeSize = shape.left + shape.width;
const snapeAmnt = Math.abs(screenSize - shapeSize);
if (Math.abs(screenSize - shapeSize) > threshold) {
shape.width += snapeAmnt;
}
}

fabric.js - Rotate "Child" Object based on Parent w/o Grouping

Using fabric.js (1.7.11) , I am trying to have one object follow the rotation of another without grouping them, and while maintaining the relative position of the child object to the parent. So I believe I need to (1) set the rotation center of the "child" object to that of the "parent", then (2) as the parent object is rotating, apply that rotation delta to the child ( which may already start in a rotated state ). The end result is such that the child appears "stuck" to the parent ( like a sticky-note on a game-board, and you then rotate the game-board )
Here is the pseudo-code
canvas.on('object:rotating', canvasEvent_ObjectRotating);
function canvasEvent_ObjectRotating(e)
{
// set r2'2 center of rotation = r1's center of rotation
// r2.angle += ( delta of rotation of r1 angle )
}
Here is a fiddle:
https://jsfiddle.net/emaybert/bkb5awj6/
Any help with 1 or 2 would be appreciated. Either how to get the angle delta in the object:rotating callback, and/or how to pivot a object around an arbitrary point. I've seen the reference to fabric.util.rotatePoint and an example of how to rotate a Line using it, but not an object, and couldn't make that mental transformation.
Is this something that you're trying to do?
https://jsfiddle.net/logie17/ofr8e6qd/1/
function canvasEvent_ObjectRotating(e)
{
origX = r2.getCenterPoint().x;
origY = r2.getCenterPoint().y;
let topLeftPoint = new fabric.Point(origX, origY);
if (!previousAngle) {
previousAngle = r1.getAngle();
}
let angle = fabric.util.degreesToRadians(r1.getAngle() - previousAngle);
previousAngle = r1.getAngle();
let center = r1.getCenterPoint();
let origin = new fabric.Point(r1.left, r1.top);
let newCoords = fabric.util.rotatePoint(topLeftPoint,origin,angle);
r2.set({ left: newCoords.x, top: newCoords.y }).setCoords();
}

anchoring a geomety/mesh to it's corner for scaling three.js

I'm create a rectangle in three.js based on 2 coordinates. The first coordinate is the cell of the first user click, the second coordinate is where the user drags the cursor.
The rectanlge that is being created is the correct size, however it 'grows' from it's center, whereas I want it to 'grow' from the corner of the first user click. I've tried a few potential solutions to change the origin of the geometry but I haven't found a fix yet.
The demo can be see here - with the code below.
var startPoint = startPlace;
var endPoint = endPlace;
var zIntersect = new THREE.Vector3(startPoint.x, 0, endPoint.z);
var xIntersect = new THREE.Vector3(endPoint.x, 0, startPoint.z);
var differenceZ = Math.abs(startPlace.z - zIntersect.z);
var differenceX = Math.abs(startPlace.x - xIntersect.x);
floorGeometryNew.rotateX(-Math.PI / 2);
var floorGeometryNew = new THREE.PlaneGeometry(differenceX, differenceZ);
floorGeometryNew.rotateX(-Math.PI / 2);
var x = startPoint.x;
var y = startPoint.y;
var z = startPoint.z;
var voxel = new THREE.Mesh(floorGeometryNew, tempMaterial);
voxel.position.set(x, y, z);
Center of your rectangle is in the middle between startPoint and endPoint and it's the average of them:
voxel.position.addVectors(startPoint, endPoint).divideScalar(2);
Approach 1. Without creating of a new geometry every time when you change size of a rectangle. The idea is:
Create a mesh of a plane once on start with a double-sided material
Set the first vertex of the plane's geometry with the current point of intersection
Track the point of intersection and apply its value to the last vertex of the plane's geometry and change the second and the third vertices accordingly to positions of the first and the last vertices
For example, we created a plane mesh newRect on mouseDown event and set its geometry's first vertex to the point of intersection which was on that moment:
newRectGeom.vertices[0].set(onPlanePoint.x, onPlanePoint.y + .5, onPlanePoint.z);
and then on mouseMove we get the point of intersection and apply its coordinate to the fourth (last) vertex, also we change values of vertices 1 and 2:
newRect.geometry.vertices[1].set(onPlanePoint.x, newRect.geometry.vertices[0].y, newRect.geometry.vertices[0].z);
newRect.geometry.vertices[2].set(newRect.geometry.vertices[0].x, newRect.geometry.vertices[0].y, onPlanePoint.z);
newRect.geometry.vertices[3].set(onPlanePoint.x, onPlanePoint.y + .5, onPlanePoint.z);
It's simplier than it sounds :)
jsfiddle example. Build mode off - OrbitControls are enabled; Build mode on - controls are disabled, you can draw rectangles.
Approach 2. Instead of controlling vertices we can control position and scaling of rectangle.
On mousedown event we'll set the startPoint with the point of intersection
startPoint.copy(onPlanePoint);
and then we'll find position and scaling of our rectangle:
newRect.position.addVectors(startPoint, onPlanePoint).divideScalar(2);
newRect.position.y = 0.5; // to avoid z-fight
newRect.scale.set(Math.abs(onPlanePoint.x - startPoint.x), 1, Math.abs(onPlanePoint.z - startPoint.z))
jsfiddle example. Visually and functionally it's the same as the Approach 1. From my point of view, Approach 2 is simplier.
When you call
voxel.position.set(x, y, z);
then the center of your mesh is setted to this position. So you have to take half of the length and half of the width of your rectangle to add to this position. These values you can get with a bounding box.
var bbox = new THREE.Box3();
bbox.setFromObject( voxel );
var val = bbox.max.x - bbox.min.x;

Matter.js calculating force needed

Im trying to apply a force to an object. To get it to move in the angle that my mouseposition is generating relative to the object.
I have the angle
targetAngle = Matter.Vector.angle(myBody.pos, mouse.position);
Now I need to apply a force, to get the body to move along that angle.
What do I put in the values below for the applyForce method?
// applyForce(body, position, force)
Body.applyForce(myBody, {
x : ??, y : ??
},{
x:??, y: ?? // how do I derive this force??
});
What do I put in the x and y values here to get the body to move along the angle between the mouse and the body.
To apply a force to move your object in that direction you need to take the sine and cosine of the angle in radians. You'll want to just pass the object's position as the first vector to not apply torque (rotation).
var targetAngle = Matter.Vector.angle(myBody.pos, mouse.position);
var force = 10;
Body.applyForce(myBody, myBody.position, {
x: cos(targetAngle) * force,
y: sin(targetAngle) * force
});
Also if you need it, the docs on applyForce() are here.
(I understand this question is old, I'm more or less doing this for anyone who stumbles across it)
You can rely on the Matter.Vector module and use it to substract, normalize and multiply positions vectors:
var force = 10;
var deltaVector = Matter.Vector.sub(mouse.position, myBody.position);
var normalizedDelta = Matter.Vector.normalise(deltaVector);
var forceVector = Matter.Vector.mult(normalizedDelta, force);
Body.applyForce(myBody, myBody.position, forceVector);

How do I fix this image (pixel by pixel) distortion issue?

I am attempting to distort an image displayed within a canvas, so it looks like a "planet". However I am struggling to find away to deal with a distortion issue. The only solution coming to mind is to find a way to reduce the radiusDistance variable, the bigger it is. That said, I am unsure how to achieve this. Any suggestions?
Below is the math and objects I am currently using to achieve this:
polarArray = [];
var polar = function(a,r,c){ //polar object, similar to pixel object.
this.a = a; //angle
this.r = r; //radius (distance)
this.color = c; //color, stored using an object containg r,g,b,a variables
};
loopAllPixels(function(loop){//loop through every pixel, stored in a pixel array
var pixel = loop.pixel;
/*each pixel object is structured like this:
pixel {
x,
y,
color {
r,
g,
b,
a
}
}
*/
var angle = pixel.x/360*Math.PI;
var radiusDistance = pixel.y/Math.PI;
polarArray.push(new polar(angle,radiusDistance,pixel.color));//store polar coordinate pixel + colour.
pixel.color = new colorRGBA(255,255,255,255);//set background as white.
});
for (var i=0;i<polarArray.length;i++){//loop through polarArray (polar coordinate pixels + colour)
var p = polarArray[i]; //polar pixel
var x = (p.r*Math.cos(p.a))+(canvas.width/2); //x coordinate
var y = (p.r*Math.sin(p.a))+(canvas.height/2); //y coordinate
if (setpixel(x,y,p.color)==true){ //set pixel at location.
continue;
}
break;
}
updatePixelsToContext();//draw to canvas
And here is the effect it currently produces (note that I flip the image horizontally before applying it to the canvas, and in this example, I set the background with a magenta kind of colour, for better clarity of the issue):
Note:
I am intending for the warping effect, just not the "ripping" of the pixels, caused by not obtaining all the neccessary pixel data required.
Also bear in mind that speed and effeciency isn't my priority here as of yet.

Categories