Three.js Raycast not producing intersections - javascript

I am trying to produce a list of intersections of an object from a raycast, I keep getting a empty array back from intersectObjects() despite the ray intersecting the object. Any help would be much appreciated. Please see the fiddle and the below code https://jsfiddle.net/4dcfsvL6/
var rayStart = {
x: 1180.0475900351605,
y: 900.491932895052,
z: 50.01035975094526,
};
var rayEnd = {
x: 1162.0475900351605,
y: 930.491932895052,
z: 15.01035975094526,
};
const rayStartV = new THREE.Vector3(
rayStart.x - xMin - (xRange / 2),
rayStart.y - yMin - (yRange / 2),
rayStart.z - zMin - (zRange / 2),
);
const rayEndV = new THREE.Vector3(
rayEnd.x - xMin - (xRange / 2),
rayEnd.y - yMin - (yRange / 2),
rayEnd.z - zMin - (zRange / 2),
);
const directionV = new THREE.Vector3(
rayEnd.x - rayStart.x,
rayEnd.y - rayStart.y,
rayEnd.z - rayStart.z,
);
scene.updateMatrixWorld();
var raycaster = new THREE.Raycaster(rayStartV, directionV);
scene.add(new THREE.ArrowHelper(raycaster.ray.direction, raycaster.ray.origin, 300, 0xff0000) );
var intersects = raycaster.intersectObjects( [obj], true );
console.log(intersects)

You missed an important part in Raycaster's documentation::
direction — The direction vector that gives direction to the ray. Should be normalized.
You have to normalize the direction vector:
var raycaster = new THREE.Raycaster(rayStartV, directionV);
var raycaster = new THREE.Raycaster(rayStartV, directionV.normalize());

Related

Tiling Texture via Custom UVGenerator for ExtrudeGeometry in Three.js

I'm trying to create a textured surface along a path in three.js. I want the texture to tile/repeat along the direction of the path, like this example created in blender:
One way to accomplish this is to create each of the faces "by hand" and to apply a material/texture to each. That works out fine for simple paths (e.g. straight lines) but gets complicated for more elaborate paths.
One of the tools three.js provides is ExtrudeGeometry. Applying a texture to a mesh created this way looks like this with the default UV mapping:
So clearly I need to write a custom UVGenerator function to pass to ExtrudeGeometry. Unfortunately, this doesn't appear to be something for which there is documentation, and previous questions that show up in search results are either out of date (the most complete answers involve a different API for the UVGenerator functions: example) or have no answers (an example).
Here's a jsFiddle example that illustrates the undesired/default behavior. The code is reproduced below. The uvGenerator() function in it is functionally identical to the default three.js uvGenerator, THREE.WorldUVGenerator. It's in the example just to make it easier to fiddle with.
window.onload = function() {
var camera, dataURI, renderer, scene, surface;
var texture, uvGenerator;
var width = 800, height = 600;
dataURI = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAi0lEQVRIx83WQQ6AIAwEwMXwb/p0b4SI1CJsS08e2k0nKDGVUsCsDEBESOkikusTIx3ABXJdJEEN9BLsRbRRjoJdiEeIr2Ad0Y+7C1YQr4MRgn+I0UiQYBahNMcJ7Ai9LVRgQXw2RAv0HS2HdIBgtKnxHTtD0O9r/86PEbRbT11TdEHecukrldi/7zd8KDG/sdhdzgAAAABJRU5ErkJggg==';
function initCamera() {
camera = new THREE.PerspectiveCamera(60, width/height, 1, 1000);
camera.position.set(0, 0, 0);
scene.add(camera);
}
function initLights() {
var lights;
lights = [
new THREE.PointLight(0xffffff, 1, 0),
new THREE.PointLight(0xffffff, 1, 0),
new THREE.PointLight(0xffffff, 1, 0),
];
lights[0].position.set(0, 200, 0);
lights[1].position.set(100, 200, 100);
lights[2].position.set(-100, -200, -100);
scene.add(lights[0]);
scene.add(lights[1]);
scene.add(lights[2]);
}
function initRenderer() {
var canvas;
canvas = document.getElementById('canvas');
renderer = new THREE.WebGLRenderer();
renderer.setSize(width, height);
canvas.appendChild(renderer.domElement);
}
function initScene() {
scene = new THREE.Scene();
}
function initSurface() {
surface = extrudeSurface();
surface.position.set(50, -50, 50);
scene.add(surface);
}
function generalInit() {
initScene();
initCamera();
initLights();
initSurface();
initRenderer();
animate();
}
uvGenerator = {
generateTopUV: function(geometry, vertices, idxA, idxB, idxC) {
var ax, ay, bx, by, cx, cy;
ax = vertices[idxA * 3];
ay = vertices[(idxA * 3) + 1];
bx = vertices[idxB * 3];
by = vertices[(idxB * 3) + 1];
cx = vertices[idxC * 3];
cy = vertices[(idxC * 3) + 1];
return([
new THREE.Vector2(ax, ay),
new THREE.Vector2(bx, by),
new THREE.Vector2(cx, cy),
]);
},
generateSideWallUV: function(geometry, vertices,
idxA, idxB, idxC, idxD) {
var ax, ay, az, bx, by, bz, cx, cy, cz;
var dx, dy, dz, bb, bbx, bby, bbz;
geometry.computeBoundingBox();
bb = geometry.boundingBox;
bbx = bb.max.x - bb.min.x;
bby = bb.max.y - bb.min.y;
bbz = bb.max.z - bb.min.z;
ax = vertices[idxA * 3];
ay = vertices[(idxA * 3) + 1];
az = vertices[(idxA * 3) + 2];
bx = vertices[idxB * 3];
by = vertices[(idxB * 3) + 1];
bz = vertices[(idxB * 3) + 2];
cx = vertices[idxC * 3];
cy = vertices[(idxC * 3) + 1];
cz = vertices[(idxC * 3) + 2];
dx = vertices[idxD * 3];
dy = vertices[(idxD * 3) + 1];
dz = vertices[(idxD * 3) + 2];
if(Math.abs(ay - by) < 0.01) {
return([
new THREE.Vector2(ax, 1 - az),
new THREE.Vector2(bx, 1 - bz),
new THREE.Vector2(cx, 1 - cz),
new THREE.Vector2(dx, 1 - dz),
]);
} else {
return([
new THREE.Vector2(ay, 1 - az),
new THREE.Vector2(by, 1 - bz),
new THREE.Vector2(cy, 1 - cz),
new THREE.Vector2(dy, 1 - dz),
]);
}
},
}
function extrudeSurface() {
var extrudeCfg, geometry, material, mesh, size, shape, curve;
size = 20;
curve = new THREE.CatmullRomCurve3([
new THREE.Vector3(-50, 0, -25),
new THREE.Vector3(50, 0, 75),
]);
extrudeCfg = {
steps: 200,
bevelEnabled: false,
extrudePath: curve,
UVGenerator: uvGenerator,
//UVGenerator: THREE.WorldUVGenerator,
};
shape = new THREE.Shape();
shape.moveTo(0, 0);
shape.lineTo(0, size);
shape.lineTo(0.1, size);
shape.lineTo(0.1, 0);
shape.lineTo(0, 0);
geometry = new THREE.ExtrudeGeometry(shape, extrudeCfg);
geometry.computeBoundingBox();
geometry.computeVertexNormals(true);
material = new THREE.MeshBasicMaterial({ map: texture });
mesh = new THREE.Mesh(geometry, material);
new THREE.Vector3(0, 0, 0),
return(mesh);
}
function animate() {
if(!scene)
return;
animID = requestAnimationFrame(animate);
render();
update();
}
function render() {
if(!scene || !camera || !renderer)
return;
renderer.render(scene, camera);
}
function update() {
if(!scene || !camera || !surface)
return;
camera.lookAt(surface.position);
//surface.rotation.x += 0.01;
surface.rotation.y += 0.01;
}
function loadTexture() {
var loader;
loader = new THREE.TextureLoader();
loader.load(dataURI,
function(t) {
t.wrapS = THREE.RepeatWrapping;
t.wrapT = THREE.RepeatWrapping;
t.magFilter = THREE.NearestFilter;
t.minFilter = THREE.NearestFilter;
t.repeat.set(1/20, 1/20);
texture = t;
generalInit();
}
);
}
loadTexture();
};
I've tried converting the vertex coords into a UV-ish 0-1 range by dividing by the size of the geometry's bounding box, but this does not produce the desired result. E.g. something of the form (here only shown for one of the return values):
generateSideWallUV: function(geometry, vertices, idxA, idxB, idxC, idxD) {
var ax = vertices[idxA * 3];
geometry.computeBoundingBox();
var bb = geometry.boundingBox;
var bbx = bb.max.x - bb.min.x;
var bbz = bb.max.z - bb.min.z;
...
return([
new THREE.Vector2(ax / bbx, 1 - (az / bbz),
...
]);
}
}
This approach not working makes sense, because what we care about is the position of the vertex as a fraction of the length of the extruded path, not as a fraction of the bounding box. But I don't know and have not been able to look up how to get that information out of the geometry, vertices, or anything else documented in THREE.
Any help/pointers would be appreciated.
Correction: Dividing the vertex coords by the size of the bounding box also doesn't work in this case because when ExtrudeGeometry calls generateSideWallUV() the bounding box is always
min":{"x":null,"y":null,"z":null},"max":{"x":null,"y":null,"z":null}}
...which means anything/(max.x - min.x) will always evaluate as infinite.
So now I'm even more confused about what we can hope to accomplish in a custom UV generator.
If I'm missing something obvious (for example, if I shouldn't be using ExtrudeGeometry for this sort of thing at all) I'd love to be educated.
Answering my own question:
Here's a link to a jsFiddle of the solution.
Here's the interesting parts. First, instead of using THREE.RepeatWrapping we use ClampToEdgeWrapping and get rid of the repeat setting:
t.wrapS = THREE.ClampToEdgeWrapping;
t.wrapT = THREE.ClampToEdgeWrapping;
//t.repeat.set(1 / 20, 1/20);
Then when we create the config object to pass to ExtrudeGeometry we set the steps to be exactly the number of faces we want. This is kinda a kludge because it seems like we shouldn't have to make a decision about the geometry of the object just to get our UVs right--there are plenty of cases where we might have bends/twists were we probably want more vertices to avoid folding/tearing/just looking wonky. But eh, I'm willing to worry about that later. Anyway, our updated extrudeCfg (in extrudeSurface() in the example) becomes:
extrudgeCfg = {
steps: 20,
bevelEnabled: false,
extrudePath: curve,
UVGenerator: uvGenerator,
};
And then finally we re-write our UVGenerator. And since we're now clamping the texture and using a smaller number of faces, we can do something painfully simple like:
generateSideWallUV: function(geometry, vertices, idxA, idxB, idxC, idxD) {
return([
new THREE.Vector2(0, 0),
new THREE.Vector2(1, 0),
new THREE.Vector2(1, 1),
new THREE.Vector2(0, 1),
]);
}
...which is to say we just stretch a copy of the texture across each face (with the only complication being because the sides of the extruded geometry are a rectangle consisting of two triangles instead of a single quad).
Et voilà:

How to compute the size of the rectangle that is visible to the camera at a given coordinate? [duplicate]

This question already has answers here:
Three.js - Width of view
(2 answers)
Closed 5 years ago.
I made a small three.js app that moves a bunch of circles from the bottom of the canvas to the top:
let renderer, scene, light, circles, camera;
initialize();
animate();
function initialize() {
renderer = new THREE.WebGLRenderer({ alpha: true, antialias: true });
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
scene = new THREE.Scene();
light = new THREE.AmbientLight();
scene.add(light);
circles = new THREE.Group();
scene.add(circles);
camera = new THREE.PerspectiveCamera(45, renderer.domElement.clientWidth / renderer.domElement.clientHeight, 1);
camera.position.z = circles.position.z + 500;
}
function animate() {
// Update each circle.
Array.from(circles.children).forEach(circle => {
if (circle.position.y < visibleBox(circle.position.z).max.y) {
circle.position.y += 4;
} else {
circles.remove(circle);
}
});
// Create a new circle.
let circle = new THREE.Mesh();
circle.geometry = new THREE.CircleGeometry(30, 30);
circle.material = new THREE.MeshToonMaterial({ color: randomColor(), transparent: true, opacity: 0.5 });
circle.position.z = _.random(camera.position.z - camera.far, camera.position.z - (camera.far / 10));
circle.position.x = _.random(visibleBox(circle.position.z).min.x, visibleBox(circle.position.z).max.x);
circle.position.y = visibleBox(circle.position.z).min.y;
circles.add(circle);
// Render the scene.
renderer.render(scene, camera);
requestAnimationFrame(animate);
}
function visibleBox(z) {
return new THREE.Box2(
new THREE.Vector2(-1000, -1000),
new THREE.Vector2(1000, 1000)
);
}
function randomColor() {
return `#${ _.sampleSize("abcdef0123456789", 6).join("")}`;
}
body {
width: 100%;
height: 100%;
overflow: hidden;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/87/three.js">
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.4/lodash.min.js">
</script>
I use the function visibleBox(z) to determine where to create and destroy each circle. I've hard-coded a return value for this function, but instead I would like it to compute the size of the rectangle that is visible to the camera at a given depth, z.
In other words, I want each circle to be created exactly at the bottom of the camera frustum (the bottom edge of the red rectangle in the image above), and destroyed exactly when it reaches the top of the frustum (the top edge of the red rectangle).
So, how I do compute this rectangle?
Change the function like this:
function visibleBox(z) {
var t = Math.tan( THREE.Math.degToRad( camera.fov ) / 2 )
var h = t * 2 * z;
var w = h * camera.aspect;
return new THREE.Box2(new THREE.Vector2(-w, h), new THREE.Vector2(w, -h));
}
And set up the circle position like this:
circle.position.z = _.random(-camera.near, -camera.far);
var visBox = visibleBox(circle.position.z)
circle.position.x = _.random(visBox.min.x, visBox.max.x);
circle.position.y = visBox.min.y;
Code demonstration:
let renderer, scene, light, circles, camera;
initialize();
animate();
function initialize() {
renderer = new THREE.WebGLRenderer({ alpha: true, antialias: true });
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
scene = new THREE.Scene();
light = new THREE.AmbientLight();
scene.add(light);
circles = new THREE.Group();
scene.add(circles);
camera = new THREE.PerspectiveCamera(45, renderer.domElement.clientWidth / renderer.domElement.clientHeight, 1);
camera.position.z = circles.position.z + 500;
}
function animate() {
// Update each circle.
Array.from(circles.children).forEach(circle => {
if (circle.position.y < visibleBox(circle.position.z).max.y) {
circle.position.y += 4;
} else {
circles.remove(circle);
}
});
// Create a new circle.
let circle = new THREE.Mesh();
circle.geometry = new THREE.CircleGeometry(30, 30);
circle.material = new THREE.MeshToonMaterial({ color: randomColor(), transparent: true, opacity: 0.5 });
circle.position.z = _.random(-(camera.near+(camera.far-camera.near)/5), -camera.far);
var visBox = visibleBox(circle.position.z)
circle.position.x = _.random(visBox.min.x, visBox.max.x);
circle.position.y = visBox.min.y;
circles.add(circle);
// Render the scene.
renderer.render(scene, camera);
requestAnimationFrame(animate);
}
function visibleBox(z) {
var t = Math.tan( THREE.Math.degToRad( camera.fov ) / 2 )
var h = t * 2 * z;
var w = h * camera.aspect;
return new THREE.Box2(new THREE.Vector2(-w, h), new THREE.Vector2(w, -h));
}
function randomColor() {
return `#${ _.sampleSize("abcdef0123456789", 6).join("")}`;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/87/three.js">
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.4/lodash.min.js">
</script>
Explanation
The projection matrix describes the mapping from 3D points of a scene, to 2D points of the viewport. It transforms from eye space to the clip space, and the coordinates in the clip space are transformed to the normalized device coordinates (NDC) by dividing with the w component of the clip coordinates. The NDC are in range (-1,-1,-1) to (1,1,1).
In the perspective projection the relation between the depth value and the z distance to the camera is not linear.
A perspective projection matrix looks like this:
r = right, l = left, b = bottom, t = top, n = near, f = far
2*n/(r-l) 0 0 0
0 2*n/(t-b) 0 0
(r+l)/(r-l) (t+b)/(t-b) -(f+n)/(f-n) -1
0 0 -2*f*n/(f-n) 0
From this follows the relation between the z coordinate in view space and the normalized device coordinates z component and the depth.:
z_ndc = ( -z_eye * (f+n)/(f-n) - 2*f*n/(f-n) ) / -z_eye
depth = (z_ndc + 1.0) / 2.0
The reverse operation looks like this:
n = near, f = far
z_ndc = 2.0 * depth - 1.0;
z_eye = 2.0 * n * f / (f + n - z_ndc * (f - n));
If the perspective projection matrix is known this can be done as follows:
A = prj_mat[2][2]
B = prj_mat[3][2]
z_eye = B / (A + z_ndc)
See How to render depth linearly in modern OpenGL with gl_FragCoord.z in fragment shader?
The realtion between the projected area in view space and the Z coordinate of the view space is linear. It dpends on the field of view angle and the aspect ratio.
The normaized dievice size can be transformed to a size in view space like this:
aspect = w / h
tanFov = tan( fov_y * 0.5 );
size_x = ndx_size_x * (tanFov * aspect) * z_eye;
size_y = ndx_size_y * tanFov * z_eye;
if the perspective projection matrix is known and the projection is symmetrically (the line of sight is in the center of the viewport and the field of view is not displaced), this can be done as follows:
size_x = ndx_size_x * / (prj_mat[0][0] * z_eye);
size_y = ndx_size_y * / (prj_mat[1][1] * z_eye);
See Field of view + Aspect Ratio + View Matrix from Projection Matrix (HMD OST Calibration)
Note each position in normalized device coordinates can be transformed to view space coordinates by the inverse projection matrix:
mat4 inversePrjMat = inverse( prjMat );
vec4 viewPosH = inversePrjMat * vec3( ndc_x, ndc_y, 2.0 * depth - 1.0, 1.0 );
vec3 viewPos = viewPos.xyz / viewPos.w;
See How to recover view space position given view space depth value and ndc xy
This means the unprojected rectangle with a specific depth, can be calculated like this:
vec4 viewLowerLeftH = inversePrjMat * vec3( -1.0, -1.0, 2.0 * depth - 1.0, 1.0 );
vec4 viewUpperRightH = inversePrjMat * vec3( 1.0, 1.0, 2.0 * depth - 1.0, 1.0 );
vec3 viewLowerLeft = viewLowerLeftH.xyz / viewLowerLeftH.w;
vec3 viewUpperRight = viewUpperRightH.xyz / viewUpperRightH.w;

Three.js shape from random points

I have a N number of random points (in this case 20), with a X,Y and Z constrains.
How can I create ANY (preferably closed) shape (using Three.js library) , given and starting only from N random points.
There are probably many variants, please share yours.
var program = new Program(reset,step)
program.add('g',false)
function reset() {
scene.clear()
scene.add(new THREE.GridHelper(100,1))
}
function step() {
}
program.startup()
var numpoints = 20;
var dots = []; //If you want to use for other task
for (var i = 0 ; i < numpoints ; i++){
var x = Math.random() * (80 - 1) + 1 //Math.random() * (max - min) + min
var y = Math.random() * (80 - 1) + 1
var z = Math.random() * (80 - 1) + 1
var dotGeometry = new THREE.Geometry();
dots.push(dotGeometry);
dotGeometry.vertices.push(new THREE.Vector3( x, y, z));
var dotMaterial = new THREE.PointsMaterial( { size: 3, sizeAttenuation: false, color: 0xFF0000 } );
var dot = new THREE.Points( dotGeometry, dotMaterial );
scene.add(dot);
}
Triangulation, Voronoi, I don't care, just show me ANY ideas you have, will help me learn a lot!
You can create a polyhedron which is the convex hull of a set of 3D points by using a pattern like so:
var points = [
new THREE.Vector3( 100, 0, 0 ),
new THREE.Vector3( 0, 100, 0 ),
...
new THREE.Vector3( 0, 0, 100 )
];
var geometry = new THREE.ConvexGeometry( points );
var material = new THREE.MeshPhongMaterial( {
color: 0xff0000,
shading: THREE.FlatShading
} );
mesh = new THREE.Mesh( geometry, material );
scene.add( mesh );
You must include the following in your project
<script src="/examples/js/geometries/ConvexGeometry.js"></script>
three.js r.78

3d coordinates to 2d screen position

Trying to create interactive GUI for my app using threejs.
I've found this tutorial:
http://zachberry.com/blog/tracking-3d-objects-in-2d-with-three-js/
which explains exactly what I need, but uses some old release.
function getCoordinates(element, camera) {
var p, v, percX, percY, left, top;
var projector = new THREE.Projector();
// this will give us position relative to the world
p = element.position.clone();
console.log('project p', p);
// projectVector will translate position to 2d
v = p.project(camera);
console.log('project v', v);
// translate our vector so that percX=0 represents
// the left edge, percX=1 is the right edge,
// percY=0 is the top edge, and percY=1 is the bottom edge.
percX = (v.x + 1) / 2;
percY = (-v.y + 1) / 2;
// scale these values to our viewport size
left = percX * window.innerWidth;
top = percY * window.innerHeight;
console.log('2d coords left', left);
console.log('2d coords top', top);
}
I had to change the projector to vector.project and matrixWorld.getPosition().clone() to position.clone().
passing position (0,0,0) results with v = {x: NaN, y: NaN, z: -Infinity}, which is not what expected
camera which im passing is camera = new THREE.PerspectiveCamera( 75, window.innerWidth / window.innerHeight, 1, 10000 )
function getCoordinates( element, camera ) {
var screenVector = new THREE.Vector3();
element.localToWorld( screenVector );
screenVector.project( camera );
var posx = Math.round(( screenVector.x + 1 ) * renderer.domElement.offsetWidth / 2 );
var posy = Math.round(( 1 - screenVector.y ) * renderer.domElement.offsetHeight / 2 );
console.log( posx, posy );
}
http://jsfiddle.net/L0rdzbej/122/

How to fix texture on rounded corner plane in three.js

I made a rounded corner plane by merging circle and plane geometries.
The rendered version with a flat color works well, but the textured gets chopped up.
http://jsfiddle.net/28usyw12/
I suspect I have to somehow add some hinting or define how the texture should be rendered, but I don't really know how.
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera( 90, 1, 0.1, 1000 );
WIDTH = HEIGHT = 500
renderer = new THREE.WebGLRenderer({ antialias: true });
renderer.setClearColor( 0xffffff );
renderer.setSize(WIDTH, HEIGHT);
light = new THREE.PointLight(0xffffff);
light.position.set(0,0,100);
scene.add(light);
# 628 × 697
camera.position.z = 5;
document.body.appendChild(renderer.domElement);
meshA = new THREE.Mesh()
makeRoundedCornerPlane = (offset=2, radius=2, smooth=16) ->
geometry = new THREE.Geometry()
offset = (offset - radius) / 2
radius = radius / 4
smooth = 16
cornerA = new THREE.CircleGeometry(radius, smooth, (Math.PI * 2 / 4) * 1, Math.PI * 2 / 4);
matrixA = new THREE.Matrix4();
matrixA.makeTranslation(0-offset, 0+offset, 0)
geometry.merge(cornerA, matrixA)
cornerB = new THREE.CircleGeometry(radius, smooth, (Math.PI * 2 / 4) * 0, Math.PI * 2 / 4);
matrixB = new THREE.Matrix4();
matrixB.makeTranslation(0+offset, 0+offset, 0)
geometry.merge(cornerB, matrixB)
cornerC = new THREE.CircleGeometry(radius, smooth, (Math.PI * 2 / 4) * 3, Math.PI * 2 / 4);
matrixC = new THREE.Matrix4();
matrixC.makeTranslation(0+offset, 0-offset, 0)
geometry.merge(cornerC, matrixC)
cornerD = new THREE.CircleGeometry(radius, smooth, (Math.PI * 2 / 4) * 2, Math.PI * 2 / 4);
matrixD = new THREE.Matrix4();
matrixD.makeTranslation(0-offset, 0-offset, 0)
geometry.merge(cornerD, matrixD)
planeA = new THREE.PlaneGeometry((offset+radius) * 2, offset * 2)
geometry.merge(planeA)
planeB = new THREE.PlaneGeometry(offset * 2, (offset+radius) * 2)
geometry.merge(planeB)
return geometry
meshA.geometry = makeRoundedCornerPlane(2, 0.5)
meshA.material = new THREE.MeshBasicMaterial
side:THREE.DoubleSide
color: new THREE.Color("rgb(255,0,0)")
#wireframe: true
meshB = new THREE.Mesh()
meshB.geometry = makeRoundedCornerPlane(2, 0.5)
meshB.material = new THREE.MeshBasicMaterial
side:THREE.DoubleSide
color: new THREE.Color("rgb(255,0,0)")
#wireframe: true
texture = new THREE.ImageUtils.loadTexture("/img/initializing.png");
texture.wrapS = texture.wrapT = THREE.RepeatWrapping;
meshB.material.map = texture
meshB.material.color = new THREE.Color(0xffffff)
meshB.position.x = -1
meshB.position.y = -1
scene.add(meshA)
scene.add(meshB)
update = ->
# meshA.scale.x += 0.001
# meshA.scale.y += 0.001
meshA.rotation.z += 0.002
meshA.rotation.y += 0.002
meshB.rotation.z += 0.002
meshB.rotation.y += 0.002
render = ->
renderer.render(scene, camera)
tick = ->
window.requestAnimationFrame(tick)
update()
render()
tick()
You'll need to fix the UV coordinates of each face in the mesh. UV Coordinates are what tells GL how to map the image onto the face. The top left corner should have UV Coordinate (0,0) and the bottom right should have (1,1).
One solution is to just iterate through each face and give it the normalized UV coordinates based on it's position in space, which could work for a simple example like yours. You basically calculate the bounding box of your shape to normalize the vertex coordinates, then create 3 UV Coordinates for each face (since each face is a triangle).
Try this: http://jsfiddle.net/MacroMeez/e84y9bbq/1/
remapUVs = (geo) ->
geo.computeBoundingBox()
min = geo.boundingBox.min
max = geo.boundingBox.max
offset = new THREE.Vector2(0 - min.x, 0 - min.y)
size = new THREE.Vector2(max.x - min.x, max.y - min.y)
# Remove the old UVs that were incorrect
geo.faceVertexUvs[0] = []
for face, i in geo.faces
v1 = geo.vertices[face.a]
v2 = geo.vertices[face.b]
v3 = geo.vertices[face.c]
# Push on a new UV based on its position inside the shape
geo.faceVertexUvs[0].push [
new THREE.Vector2((v1.x + offset.x)/size.x, (v1.y + offset.y)/size.y),
new THREE.Vector2((v2.x + offset.x)/size.x, (v2.y + offset.y)/size.y),
new THREE.Vector2((v3.x + offset.x)/size.x, (v3.y + offset.y)/size.y)
]
geo.uvsNeedUpdate = true
based off the code found THREE.js generate UV coordinate
If you're familiar at all with blender or other 3d software, you can also create and map the mesh there then import it and not deal with this.

Categories