I'm trying to create a textured surface along a path in three.js. I want the texture to tile/repeat along the direction of the path, like this example created in blender:
One way to accomplish this is to create each of the faces "by hand" and to apply a material/texture to each. That works out fine for simple paths (e.g. straight lines) but gets complicated for more elaborate paths.
One of the tools three.js provides is ExtrudeGeometry. Applying a texture to a mesh created this way looks like this with the default UV mapping:
So clearly I need to write a custom UVGenerator function to pass to ExtrudeGeometry. Unfortunately, this doesn't appear to be something for which there is documentation, and previous questions that show up in search results are either out of date (the most complete answers involve a different API for the UVGenerator functions: example) or have no answers (an example).
Here's a jsFiddle example that illustrates the undesired/default behavior. The code is reproduced below. The uvGenerator() function in it is functionally identical to the default three.js uvGenerator, THREE.WorldUVGenerator. It's in the example just to make it easier to fiddle with.
window.onload = function() {
var camera, dataURI, renderer, scene, surface;
var texture, uvGenerator;
var width = 800, height = 600;
dataURI = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAi0lEQVRIx83WQQ6AIAwEwMXwb/p0b4SI1CJsS08e2k0nKDGVUsCsDEBESOkikusTIx3ABXJdJEEN9BLsRbRRjoJdiEeIr2Ad0Y+7C1YQr4MRgn+I0UiQYBahNMcJ7Ai9LVRgQXw2RAv0HS2HdIBgtKnxHTtD0O9r/86PEbRbT11TdEHecukrldi/7zd8KDG/sdhdzgAAAABJRU5ErkJggg==';
function initCamera() {
camera = new THREE.PerspectiveCamera(60, width/height, 1, 1000);
camera.position.set(0, 0, 0);
scene.add(camera);
}
function initLights() {
var lights;
lights = [
new THREE.PointLight(0xffffff, 1, 0),
new THREE.PointLight(0xffffff, 1, 0),
new THREE.PointLight(0xffffff, 1, 0),
];
lights[0].position.set(0, 200, 0);
lights[1].position.set(100, 200, 100);
lights[2].position.set(-100, -200, -100);
scene.add(lights[0]);
scene.add(lights[1]);
scene.add(lights[2]);
}
function initRenderer() {
var canvas;
canvas = document.getElementById('canvas');
renderer = new THREE.WebGLRenderer();
renderer.setSize(width, height);
canvas.appendChild(renderer.domElement);
}
function initScene() {
scene = new THREE.Scene();
}
function initSurface() {
surface = extrudeSurface();
surface.position.set(50, -50, 50);
scene.add(surface);
}
function generalInit() {
initScene();
initCamera();
initLights();
initSurface();
initRenderer();
animate();
}
uvGenerator = {
generateTopUV: function(geometry, vertices, idxA, idxB, idxC) {
var ax, ay, bx, by, cx, cy;
ax = vertices[idxA * 3];
ay = vertices[(idxA * 3) + 1];
bx = vertices[idxB * 3];
by = vertices[(idxB * 3) + 1];
cx = vertices[idxC * 3];
cy = vertices[(idxC * 3) + 1];
return([
new THREE.Vector2(ax, ay),
new THREE.Vector2(bx, by),
new THREE.Vector2(cx, cy),
]);
},
generateSideWallUV: function(geometry, vertices,
idxA, idxB, idxC, idxD) {
var ax, ay, az, bx, by, bz, cx, cy, cz;
var dx, dy, dz, bb, bbx, bby, bbz;
geometry.computeBoundingBox();
bb = geometry.boundingBox;
bbx = bb.max.x - bb.min.x;
bby = bb.max.y - bb.min.y;
bbz = bb.max.z - bb.min.z;
ax = vertices[idxA * 3];
ay = vertices[(idxA * 3) + 1];
az = vertices[(idxA * 3) + 2];
bx = vertices[idxB * 3];
by = vertices[(idxB * 3) + 1];
bz = vertices[(idxB * 3) + 2];
cx = vertices[idxC * 3];
cy = vertices[(idxC * 3) + 1];
cz = vertices[(idxC * 3) + 2];
dx = vertices[idxD * 3];
dy = vertices[(idxD * 3) + 1];
dz = vertices[(idxD * 3) + 2];
if(Math.abs(ay - by) < 0.01) {
return([
new THREE.Vector2(ax, 1 - az),
new THREE.Vector2(bx, 1 - bz),
new THREE.Vector2(cx, 1 - cz),
new THREE.Vector2(dx, 1 - dz),
]);
} else {
return([
new THREE.Vector2(ay, 1 - az),
new THREE.Vector2(by, 1 - bz),
new THREE.Vector2(cy, 1 - cz),
new THREE.Vector2(dy, 1 - dz),
]);
}
},
}
function extrudeSurface() {
var extrudeCfg, geometry, material, mesh, size, shape, curve;
size = 20;
curve = new THREE.CatmullRomCurve3([
new THREE.Vector3(-50, 0, -25),
new THREE.Vector3(50, 0, 75),
]);
extrudeCfg = {
steps: 200,
bevelEnabled: false,
extrudePath: curve,
UVGenerator: uvGenerator,
//UVGenerator: THREE.WorldUVGenerator,
};
shape = new THREE.Shape();
shape.moveTo(0, 0);
shape.lineTo(0, size);
shape.lineTo(0.1, size);
shape.lineTo(0.1, 0);
shape.lineTo(0, 0);
geometry = new THREE.ExtrudeGeometry(shape, extrudeCfg);
geometry.computeBoundingBox();
geometry.computeVertexNormals(true);
material = new THREE.MeshBasicMaterial({ map: texture });
mesh = new THREE.Mesh(geometry, material);
new THREE.Vector3(0, 0, 0),
return(mesh);
}
function animate() {
if(!scene)
return;
animID = requestAnimationFrame(animate);
render();
update();
}
function render() {
if(!scene || !camera || !renderer)
return;
renderer.render(scene, camera);
}
function update() {
if(!scene || !camera || !surface)
return;
camera.lookAt(surface.position);
//surface.rotation.x += 0.01;
surface.rotation.y += 0.01;
}
function loadTexture() {
var loader;
loader = new THREE.TextureLoader();
loader.load(dataURI,
function(t) {
t.wrapS = THREE.RepeatWrapping;
t.wrapT = THREE.RepeatWrapping;
t.magFilter = THREE.NearestFilter;
t.minFilter = THREE.NearestFilter;
t.repeat.set(1/20, 1/20);
texture = t;
generalInit();
}
);
}
loadTexture();
};
I've tried converting the vertex coords into a UV-ish 0-1 range by dividing by the size of the geometry's bounding box, but this does not produce the desired result. E.g. something of the form (here only shown for one of the return values):
generateSideWallUV: function(geometry, vertices, idxA, idxB, idxC, idxD) {
var ax = vertices[idxA * 3];
geometry.computeBoundingBox();
var bb = geometry.boundingBox;
var bbx = bb.max.x - bb.min.x;
var bbz = bb.max.z - bb.min.z;
...
return([
new THREE.Vector2(ax / bbx, 1 - (az / bbz),
...
]);
}
}
This approach not working makes sense, because what we care about is the position of the vertex as a fraction of the length of the extruded path, not as a fraction of the bounding box. But I don't know and have not been able to look up how to get that information out of the geometry, vertices, or anything else documented in THREE.
Any help/pointers would be appreciated.
Correction: Dividing the vertex coords by the size of the bounding box also doesn't work in this case because when ExtrudeGeometry calls generateSideWallUV() the bounding box is always
min":{"x":null,"y":null,"z":null},"max":{"x":null,"y":null,"z":null}}
...which means anything/(max.x - min.x) will always evaluate as infinite.
So now I'm even more confused about what we can hope to accomplish in a custom UV generator.
If I'm missing something obvious (for example, if I shouldn't be using ExtrudeGeometry for this sort of thing at all) I'd love to be educated.
Answering my own question:
Here's a link to a jsFiddle of the solution.
Here's the interesting parts. First, instead of using THREE.RepeatWrapping we use ClampToEdgeWrapping and get rid of the repeat setting:
t.wrapS = THREE.ClampToEdgeWrapping;
t.wrapT = THREE.ClampToEdgeWrapping;
//t.repeat.set(1 / 20, 1/20);
Then when we create the config object to pass to ExtrudeGeometry we set the steps to be exactly the number of faces we want. This is kinda a kludge because it seems like we shouldn't have to make a decision about the geometry of the object just to get our UVs right--there are plenty of cases where we might have bends/twists were we probably want more vertices to avoid folding/tearing/just looking wonky. But eh, I'm willing to worry about that later. Anyway, our updated extrudeCfg (in extrudeSurface() in the example) becomes:
extrudgeCfg = {
steps: 20,
bevelEnabled: false,
extrudePath: curve,
UVGenerator: uvGenerator,
};
And then finally we re-write our UVGenerator. And since we're now clamping the texture and using a smaller number of faces, we can do something painfully simple like:
generateSideWallUV: function(geometry, vertices, idxA, idxB, idxC, idxD) {
return([
new THREE.Vector2(0, 0),
new THREE.Vector2(1, 0),
new THREE.Vector2(1, 1),
new THREE.Vector2(0, 1),
]);
}
...which is to say we just stretch a copy of the texture across each face (with the only complication being because the sides of the extruded geometry are a rectangle consisting of two triangles instead of a single quad).
Et voilĂ :
I"m not exactly sure how I can do this. I read a lot about raycasting and that seems to be good for finding points that intersect with something, but in this case I just want it to interpolate the 2d mouse coordinates to the 3d point exactly where the mouse clicks, regardless of scale, rotation, whether or not there's an object there, etc.
One method I've thought of but not approached would be making an invisible plane that is parallel to the camera, always oriented upright and always intersecting the y axis. Then use a raycaster to hit the plane, draw as needed, then delete the plane. Seems like a silly way to do this though.
At the moment I have a method that works pretty well but it has some issues when the line gets further away from the origin, or the camera gets zoomed
In this photo I drew two lines from two different perspectives. The vertical line what it looks like when the camera is level with the x and z axis, and I draw a straight line down the y axis, while the horizontal line is what happens when i scribble with the camera facing down.
https://i.imgur.com/f8qw5xV.png
As you can see, it seems to use the distance to the camera to make this calculation, so the further the distance from the camera, the more distortion is in the calculation. How can get rid of this distortion?
source: https://github.com/AskAlice/mandala-3d-threejs
live demo: https://askalice.me/mandala/
Here is the relevant code:
js/content.js#112
function get3dPointZAxis(event)
{
camPos = camera.position;
var mv = new THREE.Vector3((event.clientX / window.innerWidth) * 2 - 1, -(event.clientY/window.innerHeight) * 2 + 1, 1).unproject(camera);
var m2 = new THREE.Vector3(0,0,0);
var pos = camPos.clone();
pos.add(mv.sub(camPos).normalize().multiplyScalar(m2.distanceTo(camPos)));
return pos;
}
I used information from two stackoverflow posts to come up with this, and it has the issues I've described.
firstly, this post shows how to draw and convert it to the z axis. It is flat. But I had a lot of trouble trying to make that work in three dimensions.
How to draw a line segment at run time using three.js
and then i used information in the below post to at least get it parallel to the camera on the x-z axis like such: https://i.imgur.com/E9AQNpH.png
Moving objects parallel to projection plane in three.js
That option with THREE.Plane() and THREE.Raycaster().ray.intersectPlane():
var raycaster = new THREE.Raycaster();
var mouse = new THREE.Vector2();
var plane = new THREE.Plane();
var planeNormal = new THREE.Vector3();
var point = new THREE.Vector3();
function getPoint(event){
mouse.x = ( event.clientX / window.innerWidth ) * 2 - 1;
mouse.y = - ( event.clientY / window.innerHeight ) * 2 + 1;
planeNormal.copy(camera.position).normalize();
plane.setFromNormalAndCoplanarPoint(planeNormal, scene.position);
raycaster.setFromCamera(mouse, camera);
raycaster.ray.intersectPlane(plane, point);
}
Run the code snippet, click the "draw" checkbox to set it as checked, move your mouse randomly (without mouse down), click the checkbox again, rotate the scene with mousedown. All points are on the same plane.
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.set(0, 0, 10);
var renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
var controls = new THREE.OrbitControls(camera, renderer.domElement);
var raycaster = new THREE.Raycaster();
var mouse = new THREE.Vector2();
var plane = new THREE.Plane();
var planeNormal = new THREE.Vector3();
var point = new THREE.Vector3();
document.addEventListener("mousedown", onMouseDown, false);
document.addEventListener("mousemove", onMouseMove, false);
function getPoint(event) {
mouse.x = (event.clientX / window.innerWidth) * 2 - 1;
mouse.y = -(event.clientY / window.innerHeight) * 2 + 1;
planeNormal.copy(camera.position).normalize();
plane.setFromNormalAndCoplanarPoint(planeNormal, scene.position);
raycaster.setFromCamera(mouse, camera);
raycaster.ray.intersectPlane(plane, point);
}
function setPoint() {
var sphere = new THREE.Mesh(new THREE.SphereBufferGeometry(.125, 4, 2), new THREE.MeshBasicMaterial({
color: "yellow",
wireframe: true
}));
sphere.position.copy(point);
scene.add(sphere);
}
function onMouseDown(event) {
getPoint(event);
if (draw.checked) setPoint();
}
function onMouseMove(event) {
getPoint(event);
if (draw.checked) setPoint();
}
render();
function render() {
requestAnimationFrame(render);
renderer.render(scene, camera);
}
body {
overflow: hidden;
margin: 0;
}
<script src="https://threejs.org/build/three.min.js"></script>
<script src="https://threejs.org/examples/js/controls/OrbitControls.js"></script>
<div style="position:absolute;">
<input id="draw" type="checkbox">
<label for="draw" style="color: white;">draw</label>
</div>
I'm trying to implement an object, flying around a position (POI - Point Of Interest) and facing it. when you press WASD, you can change the POI's rotation. A and D -> change y-axis, W and S for the x-axis.
As you can see in the demo (http://jsbin.com/yodusufupi) the y-axis rotation is done based on local rotation of the helper object, but the x-axis is calculated in global space. Setting the rotation is done via: helper.rotation.set(rotX, rotY, 0);.
What I'm doing wrong? I want to have both rotations beeing done in local space.
Thx!
PS: minimal working example (the rotation around Y seems correct while x-axis is calculated globally)
var scene = new THREE.Scene();
var DEG_2_RAD = Math.PI / 180;
var rotX = -45 * DEG_2_RAD;
var rotY = 45 * DEG_2_RAD;
var helper = new THREE.AxisHelper(2);
var cam = new THREE.AxisHelper(1);
helper.add(cam);
scene.add(helper);
cam.translateZ(4);
var camera = new THREE.PerspectiveCamera( 75, window.innerWidth / window.innerHeight, 1, 1000 );
camera.position.z = 10;
camera.position.y = 10;
camera.lookAt(new THREE.Vector3(0, 0, 0));
var renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild( renderer.domElement );
helper.rotation.set(rotX, rotY, 0);
renderer.render( scene, camera );
You need to understand how Euler angles work in three.js. For that, see this answer.
For your use case you can use these methods:
object.rotateX( angle ); // angle is in radians
object.rotateY( angle );
three.js r.77
I have a working threejs example in which particles are rendered via canvas on to a spherical object using the program function, like this:
var material = new THREE.ParticleCanvasMaterial( {
color: 0xffffff,
program: function ( context ) {
context.beginPath();
context.arc( 0, 0, 1, 0, PI2, true );
context.closePath();
context.fill();
}
} );
for ( var i = 0; i < 1000; i ++ ) {
particle = new THREE.Particle( material );
particle.position.x = Math.random() * 2 - 1;
particle.position.y = Math.random() * 2 - 1;
particle.position.z = Math.random() * 2 - 1;
particle.position.normalize();
particle.position.multiplyScalar( Math.random() * 10 + 600 );
initParticle( particle, i * 10 );
scene.add( particle );
}
However, I'd like to switch to the webGL renderer in order for things to run a little faster, but it doesn't have a program option. It seems like maybe I need to use map, but I'm not sure how. Anybody have any ideas on how to adjust this code to accomplish the same thing with the webGL renderer.
Here's an example that shows how to procedurally create textures for your particles using the WebGLRenderer: http://jsfiddle.net/y18ag3dq/
However, if you wish to use your own texture, simply load whatever texture you want into the map field:
var texture = THREE.ImageUtils.loadTexture('/images/my_texture.png');
// Do NOT set this flag. Explanation provided by WestLangley in the comments
// texture.needsUpdate=true;
var material = new THREE.ParticleBasicMaterial({
// ...
map: texture,
// add all relevant fields as described in the link above
});
// geometry should contain your particle vertices
var particle = new THREE.ParticleSystem(geometry, material);
scene.add(particle);
I'm trying to create event handler on the particles, with alert message on a sphere, aiming always on the camera.
Something similar to this demo ( and making it to work on IE 9+ )
here is my code..
http://jsfiddle.net/praveenv29/cVnKV/11/
var renderer, projector;
var mouseX, mouseY, stats, container;
var objects = [];
var INTERSECTED;
var camera, scene, renderer, material, mesh, cont;
var w1 = 960;
var h1 = 700;
var halfWidth = w1 / 2;
var halfHeigth = h1 / 2;
function init() {
cont = document.createElement('div');
cont.id = "cont";
document.body.appendChild(cont);
camera = new THREE.PerspectiveCamera(75, w1 / h1, 1, 10000);
camera.position.set(90, 90, -200);
scene = new THREE.Scene();
scene.add(camera);
controls = new THREE.OrbitControls(camera);
controls = new THREE.TrackballControls(camera, cont);
controls.rotateSpeed = 0.8;
controls.zoomSpeed = 1.2;
controls.panSpeed = 2.5;
controls.noZoom = true;
controls.noPan = true;
controls.staticMoving = false;
controls.target.set(0, 0, 0);
controls.keys = [95, 90, 84];
renderer = new THREE.CanvasRenderer();
material = new THREE.MeshBasicMaterial({
color: 0x000000,
wireframe: true
});
renderer.setSize(w1, h1);
cont.appendChild(renderer.domElement);
generateGeometry();
var light = new THREE.PointLight(0xffffff);
light.position.set(10, 0, 0);
scene.add(light);
}
function animate() {
requestAnimationFrame(animate);
render();
}
function render() {
controls.update();
renderer.render(scene, camera);
}
function generateGeometry() {
var axis = new THREE.AxisHelper();
scene.add(axis);
for (var i = 0; i < 20; i++) {
var gloom = new THREE.ImageUtils.loadTexture('map_pin.png');
materialr = new THREE.MeshBasicMaterial({
map: gloom,
overdraw: true,
side: THREE.DoubleSide
});
var geometry = new THREE.PlaneGeometry(15, 15, 2, 2);
var cube = new THREE.Mesh(geometry, materialr);
cube.position.x = Math.random() * 2 - 1;
cube.position.y = Math.random() * 2 - 1;
cube.position.z = Math.random() * 2 - 1;
cube.position.normalize();
cube.position.multiplyScalar(125);
cube.rotation.x = cube.position.x / Math.PI; //57.38
cube.rotation.y = 360 / Math.PI * 2;
objects.push(cube);
scene.add(cube);
}
//earth
var texture = THREE.ImageUtils.loadTexture('world.jpg');
var materials = new THREE.MeshBasicMaterial({
map: texture,
overdraw: true
});
var cone = new THREE.SphereGeometry(120, 35, 35);
var coneMesh = new THREE.Mesh(cone, material);
coneMesh.position.y = 0;
coneMesh.rotation.set(0, 0, 0);
scene.add(coneMesh);
}
init();
animate();
It is pretty unclear what you are looking for; your demo link seems unrelated...
Are you trying to make cubes appear camera normal (always facing the camera)? If so, you'll need logic to re-orient them to re-face the camera anytime the user moves the camera view, as I see you are also setting up the TrackballControls, which actually move the camera, not the scene. This means a user can change the camera view of your scene, and items you want facing the camera need to be re-orientated. That re-orientation logic needs to be placed inside your render() function.
BTW, to get an object to always face the camera:
Define it such that when not rotated, it is facing the direction you
want;
Place the object into your scene via any method you want,
including whatever hierarchical rotations or translations you want to use to get them positioned where you want; (Note, they may not be facing
where you want at this point, but that is okay at this step);
Request from three.js that it calculate the local to world space
transformations for your scene. After that, each object's world
transformation matrix contains the concatenated rotations, and
translations that transform each object from local space to world
space.
Go into each object's local-to-world transform matrix and
replace the rotation 3x3 matrix components with the identity
transformation { [1 0 0] [0 1 0] [0 0 1] }. This effectively wipes
out the rotations in world space, making all the objects you do this
to always face the camera.