Three JS, WebGl, display sprite on top of panorama - javascript

I'm building a simple panorama webapp, where a user will be able to turn in circles and click on sprites in the loaded panorama. I have this working in CSS3D using three.js, now I need to get it to work in WebGL. I've loaded the panorama, and a sprite must be getting added to the scene since there are no errors, but I can't see the sprite.
How do I make it visible?
Here's the relevant code (omitting all the standard event functions and the rendering loop):
function init() {
var container, mesh;
container = document.getElementById('container');
camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 1, 1100);
camera.target = new THREE.Vector3(0, 0, 0);
scene = new THREE.Scene();
var sides = [
{
url: '/assets/posx.jpg'
},
{
url: '/assets/negx.jpg'
},
{
url: '/assets/posy.jpg'
},
{
url: '/assets/negy.jpg'
},
{
url: '/assets/posz.jpg'
},
{
url: '/assets/negz.jpg'
}
];
var k = 8
for (var i = 0; i < sides.length; i++) {
var side = sides[ i ];
var geometry = new THREE.SphereGeometry(5, k, k);
k += 8;
geometry.applyMatrix(new THREE.Matrix4().makeScale(-1, 1, 1));
var material = new THREE.MeshBasicMaterial({
map: THREE.ImageUtils.loadTexture(side.url)
});
mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
}
var map = THREE.ImageUtils.loadTexture("/assets/soap.png");
material = new THREE.SpriteMaterial({ map: map, color: 0xffffff, fog: false });
var sprite = new THREE.Sprite(material);
sprite.position.x = 128;
sprite.position.y = 128;
sprite.position.z = 128;
scene.add(sprite);
renderer = new THREE.WebGLRenderer();
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
document.addEventListener('mousedown', onDocumentMouseDown, false);
document.addEventListener('mousewheel', onDocumentMouseWheel, false);
document.addEventListener('touchstart', onDocumentTouchStart, false);
document.addEventListener('touchmove', onDocumentTouchMove, false);
window.addEventListener('resize', onWindowResize, false);
}
Any help appreciated!

If I'm not mistaken your sphere has a radius of 5 while your sprite is positioned way beyond the sphere at 128,128,128. You need to either increase your sphere radius or reduce your sprite position to something that lies within the radius.
var geometry = new THREE.SphereGeometry(256, k, k);

Related

How does three.js render video according to spherical UV?

I have a dash streaming video. According to its title, it is a 3*3. Now I can splice the complete video through the THREE,
// 3*3 PlaneGeometry
var geometry = new THREE.PlaneGeometry(400, 200, 3, 3);
const video1 = document.getElementById("videos1");
...................
...................
const texture1 = new THREE.VideoTexture(video3);
texture1.maxFilter = THREE.NearestFilter;
texture1.minFilter = THREE.NearestFilter;
...................
...................
var geometryfaces = geometry.faces;
for (let i = 0; i < geometryfaces.length; i++) {
const faces = geometryfaces[i];
materials[i] = new THREE.MeshBasicMaterial({
map: textures[i],
});
}
var uv = [
new THREE.Vector2(0, 0),
new THREE.Vector2(0, 1),
new THREE.Vector2(1, 1),
new THREE.Vector2(1, 0),
];
// Set the texture coordinates
for (var m = 0; m < geometryfaces.length; m += 2) {
geometry.faces[m].materialIndex = faceId;
console.log(geometry.faces);
geometry.faces[m + 1].materialIndex = faceId;
geometry.faceVertexUvs[0][m] = [uv[2], uv[3], uv[1]];
geometry.faceVertexUvs[0][m + 1] = [uv[3], uv[0], uv[1]];
faceId++;
}
var bufferGeometry = new THREE.BufferGeometry().fromGeometry(geometry);
var material = new THREE.MeshFaceMaterial(materials);
var mesh = new THREE.Mesh(bufferGeometry, material); //网格模型对象Mesh
This way I can get a full flat video, but this video is panoramic and I need to render it on the ball, and I don't know much about the UV of the ball。
I need help. Thank you
picture:enter image description here
picture2:enter image description here
Three.js SphereGeometry automatically creates the UV mapping for you. Since the image you're using is equirectangular, you can just map the image onto the inside of the sphere for the effect you want:
var camera, scene, renderer, vp, sphere;
vp = new THREE.Vector2(window.innerWidth, window.innerHeight);
// Init WebGL stuff
function init() {
// WebGL Renderer
renderer = new THREE.WebGLRenderer({antialias: true});
renderer.setSize(vp.x, vp.y);
renderer.domElement.classList.add("canvasWebGL");
// append to DOM
document.body.appendChild(renderer.domElement);
// scene
scene = new THREE.Scene();
scene.background = new THREE.Color(0xe1e1e1);
// camera
camera = new THREE.PerspectiveCamera(55, vp.x / vp.y, 0.05, 100);
// Sphere radius is large enough to surround the camera
let sphereGeom = new THREE.SphereGeometry( 20, 128, 128 );
// Just use your equirect image as the texture
// And render the INSIDE of the sphere, not the outside
const tex = new THREE.TextureLoader().load("https://i.imgur.com/1VECsLy.jpg");
let sphereMat = new THREE.MeshBasicMaterial({
map: tex,
side: THREE.BackSide
});
sphere = new THREE.Mesh(sphereGeom, sphereMat);
scene.add(sphere);
}
function animate(s) {
sphere.rotation.set(
Math.cos(s / 3000), s / 3000, 0
);
renderer.render(scene, camera);
requestAnimationFrame(animate);
}
init();
animate(0);
<script src="https://cdn.jsdelivr.net/npm/three#0.123/build/three.js"></script>
Read more about sphere geometry here: https://threejs.org/docs/?q=material#api/en/geometries/SphereGeometry

three.js raycast on skinning mesh

I'm trying to raycast skinning mesh (of knowing issue) after some skeleton changes (without animation on it, so performance isn't a priority).
The tricky thing i imagine in this attempt is:
Load skinned mesh add to scene
Make some changes in positions of specific bones at loaded mesh
Copy geometries of transformed loaded mesh (maybe from buffer?)
Create new mesh (some kind of imitation ghost mesh) from copied geometries and apply to it
set raycast on ghost mesh with opacity material= 0.0
Above list should work, but I'm stuck third day on point 3 cause I can't get transformed vertices after skinning.
var scene, camera, renderer, mesh, ghostMesh;
var raycaster = new THREE.Raycaster();
var raycasterMeshHelper;
initScene();
render();
function initScene() {
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 200);
camera.position.set(20, 7, 3);
renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
window.addEventListener('resize', onWindowResize, false);
var orbit = new THREE.OrbitControls(camera, renderer.domElement);
//lights stuff
var ambientLight = new THREE.AmbientLight(0xffffff, 0.3);
scene.add(ambientLight);
var lights = [];
lights[0] = new THREE.PointLight(0xffffff, 1, 0);
lights[1] = new THREE.PointLight(0xffffff, 1, 0);
lights[2] = new THREE.PointLight(0xffffff, 1, 0);
lights[0].position.set(0, 200, 0);
lights[1].position.set(100, 200, 100);
lights[2].position.set(-100, -200, -100);
scene.add(lights[0]);
scene.add(lights[1]);
scene.add(lights[2]);
//raycaster mesh
var raycasterMaterial = new THREE.MeshBasicMaterial({
color: 0xdddddd,
opacity: 0.7,
transparent: true
});
var geometrySphere = new THREE.SphereGeometry(0.5, 16, 16);
raycasterMeshHelper = new THREE.Mesh(geometrySphere, raycasterMaterial);
raycasterMeshHelper.visible = false;
scene.add(raycasterMeshHelper);
renderer.domElement.addEventListener('mousemove', onMouseMove, false);
//model Loading
var loader = new THREE.JSONLoader();
loader.load("https://raw.githubusercontent.com/visus100/skinnedTests/master/js_fiddle/skinned_mesh.json", function(geometry) {
var meshMaterial = new THREE.MeshStandardMaterial({
color: 0x00df15,
skinning: true
});
mesh = new THREE.SkinnedMesh(geometry, meshMaterial);
scene.add(mesh);
var skeleton = new THREE.SkeletonHelper(mesh);
scene.add(skeleton);
//some experimental skeletonal changes
mesh.skeleton.bones[1].rotation.z += 0.10;
mesh.skeleton.bones[2].rotation.x += -0.65;
mesh.skeleton.bones[3].rotation.y += -0.45;
mesh.skeleton.bones[3].position.x += 0.11;
//updates matrix
mesh.updateMatrix();
mesh.geometry.applyMatrix(mesh.matrix);
mesh.updateMatrixWorld(true);
//crate ghost mesh geometry
createGhostMesh();
//crate point cloud helper from buffergeometry
var bufferGeometry = new THREE.BufferGeometry().fromGeometry(mesh.geometry);
var particesMaterial = new THREE.PointsMaterial({
color: 0xff00ea,
size: 0.07,
sizeAttenuation: false
});
particles = new THREE.Points(bufferGeometry, particesMaterial);
particles.sortParticles = true;
scene.add(particles);
});
}
function createGhostMesh() {
var geometryForGhostMesh = new THREE.Geometry();
//push vertices and other stuff to geometry
for (i = 0; i < mesh.geometry.vertices.length; i++) {
var temp = new THREE.Vector3(mesh.geometry.vertices[i].x, mesh.geometry.vertices[i].y, mesh.geometry.vertices[i].z);
geometryForGhostMesh.vertices.push(temp);
//////
//here should be the code for calc translation vertices of skinned mesh and added to geometryForGhostMesh
//////
geometryForGhostMesh.skinIndices.push(mesh.geometry.skinIndices[i]);
geometryForGhostMesh.skinWeights.push(mesh.geometry.skinWeights[i]);
}
for (i = 0; i < mesh.geometry.faces.length; i++) {
geometryForGhostMesh.faces.push(mesh.geometry.faces[i]);
}
//create material and add to scene
var ghostMaterial = new THREE.MeshBasicMaterial({
color: 0xff0000,
opacity: 0.1,
transparent: true,
skinning: true
});
ghostMesh = new THREE.Mesh(geometryForGhostMesh, ghostMaterial);
scene.add(ghostMesh);
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function render() {
requestAnimationFrame(render);
renderer.render(scene, camera);
};
function onMouseMove(event) {
//raycaster for ghostMesh
if (ghostMesh) {
var rect = renderer.domElement.getBoundingClientRect();
var mouseX = ((event.clientX - rect.left) / rect.width) * 2 - 1;
var mouseY = -((event.clientY - rect.top) / rect.height) * 2 + 1;
raycaster.setFromCamera(new THREE.Vector2(mouseX, mouseY), camera);
var intersects = raycaster.intersectObject(ghostMesh);
if (intersects.length > 0) {
raycasterMeshHelper.visible = true;
raycasterMeshHelper.position.set(0, 0, 0);
raycasterMeshHelper.lookAt(intersects[0].face.normal);
raycasterMeshHelper.position.copy(intersects[0].point);
} else {
raycasterMeshHelper.visible = false;
}
}
}
body {
margin: 0px;
background-color: #000000;
overflow: hidden;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/98/three.min.js"></script>
<script src="https://threejs.org/examples/js/controls/OrbitControls.js"></script>
Please note that I need this in thre.js build r98 or less, because the rest of my code (not included here) and without morph tangents only skinning bones.
I tried to write it clearly and please if anyone want help do it so because I'm not a pro.
I not including my approach of calculating transformed geometries because I failed too hard.
I dug a lot about this problem here e.g. issue6440 and for today it's still not fixed.
But there existing methods to work with it e.g https://jsfiddle.net/fnjkeg9x/1/ but after several of attempts I failed and my conclusion is that the stormtrooper works on morph tanges and this could be the reason I failed.
EDIT:
I created next codepen based on this topics get-the-global-position-of-a-vertex-of-a-skinned-mesh and Stormtrooper.
Decided to start with simple box to make bounding around skinned transformed mesh.
Result is fail because it giving 0 at line:
boneMatrix.fromArray(skeleton.boneMatrices, si * 16);
Here i comparing stormtrooper with my example output from console: Screen shot image
Codpen with new progress: https://codepen.io/donkeyLuck0/pen/XQbBMQ
My other idea is to apply this bones form loaded model and rig as a morph tangent programmatically (but i don't even know if it is possible and how to figure it out)
Founded example of animated model
Sketchfab animation with points tracking
This is super late to the game, but here's an example of GPU picking that works with skinned meshes and doesn't require a separate picking scene to keep in sync with your main scene, nor does it require the user to manage custom materials:
https://github.com/bzztbomb/three_js_gpu_picking
The trick that allows for easy material overriding and scene re-use is here:
https://github.com/bzztbomb/three_js_gpu_picking/blob/master/gpupicker.js#L58
A proper support for raycasting for skinned meshes was added in https://github.com/mrdoob/three.js/pull/19178 in revision 116.
You can use GPU picking to "pick" skinned object. It won't give you a position though
Note: GPU picking requires rendering every pickable object with a custom material. How you implement that is up to you. This article does it by making 2 scenes. That might not be as useful for skinned objects.
Unfortunately three.js provides no way to override materials AFAICT. Here's an example that replaces the materials on the pickable objects before rendering for picking and then restores them after. You would also need to hide any objects you don't want picked.
const renderer = new THREE.WebGLRenderer({
antialias: true,
canvas: document.querySelector('canvas'),
});
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(75, 2, 0.1, 200);
camera.position.set(20, 7, 3);
const orbit = new THREE.OrbitControls(camera, renderer.domElement);
//lights stuff
const ambientLight = new THREE.AmbientLight(0xffffff, 0.3);
scene.add(ambientLight);
const lights = [];
lights[0] = new THREE.PointLight(0xffffff, 1, 0);
lights[1] = new THREE.PointLight(0xffffff, 1, 0);
lights[2] = new THREE.PointLight(0xffffff, 1, 0);
lights[0].position.set(0, 200, 0);
lights[1].position.set(100, 200, 100);
lights[2].position.set(-100, -200, -100);
scene.add(lights[0]);
scene.add(lights[1]);
scene.add(lights[2]);
//raycaster mesh
const raycasterMaterial = new THREE.MeshBasicMaterial({
color: 0xdddddd,
opacity: 0.7,
transparent: true
});
const geometrySphere = new THREE.SphereGeometry(0.5, 16, 16);
raycasterMeshHelper = new THREE.Mesh(geometrySphere, raycasterMaterial);
raycasterMeshHelper.visible = false;
scene.add(raycasterMeshHelper);
//model Loading
const pickableObjects = [];
const loader = new THREE.JSONLoader();
loader.load("https://raw.githubusercontent.com/visus100/skinnedTests/master/js_fiddle/skinned_mesh.json", (geometry) => {
const meshMaterial = new THREE.MeshStandardMaterial({
color: 0x00df15,
skinning: true
});
const mesh = new THREE.SkinnedMesh(geometry, meshMaterial);
scene.add(mesh);
const id = pickableObjects.length + 1;
pickableObjects.push({
mesh,
renderingMaterial: meshMaterial,
pickingMaterial: new THREE.MeshPhongMaterial({
skinning: true,
emissive: new THREE.Color(id),
color: new THREE.Color(0, 0, 0),
specular: new THREE.Color(0, 0, 0),
//map: texture,
//transparent: true,
//side: THREE.DoubleSide,
//alphaTest: 0.5,
blending: THREE.NoBlending,
}),
});
//some experimental skeletonal changes
mesh.skeleton.bones[1].rotation.z += 0.10;
mesh.skeleton.bones[2].rotation.x += -0.65;
mesh.skeleton.bones[3].rotation.y += -0.45;
mesh.skeleton.bones[3].position.x += 0.11;
//updates matrix
mesh.updateMatrix();
mesh.geometry.applyMatrix(mesh.matrix);
mesh.updateMatrixWorld(true);
});
class GPUPickHelper {
constructor() {
// create a 1x1 pixel render target
this.pickingTexture = new THREE.WebGLRenderTarget(1, 1);
this.pixelBuffer = new Uint8Array(4);
}
pick(cssPosition, scene, camera) {
const {
pickingTexture,
pixelBuffer
} = this;
// set the view offset to represent just a single pixel under the mouse
const pixelRatio = renderer.getPixelRatio();
camera.setViewOffset(
renderer.context.drawingBufferWidth, // full width
renderer.context.drawingBufferHeight, // full top
cssPosition.x * pixelRatio | 0, // rect x
cssPosition.y * pixelRatio | 0, // rect y
1, // rect width
1, // rect height
);
// render the scene
// r102
//renderer.setRenderTarget(pickingTexture);
//renderer.render(scene, camera);
//renderer.setRenderTarget(null);
// r98
renderer.render(scene, camera, pickingTexture);
// clear the view offset so rendering returns to normal
camera.clearViewOffset();
//read the pixel
renderer.readRenderTargetPixels(
pickingTexture,
0, // x
0, // y
1, // width
1, // height
pixelBuffer);
const id =
(pixelBuffer[0] << 16) |
(pixelBuffer[1] << 8) |
(pixelBuffer[2]);
return id;
}
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
const pickPosition = {
x: 0,
y: 0,
};
const pickHelper = new GPUPickHelper();
let lastPickedId = 0;
let lastPickedObjectSavedEmissive;
function render(time) {
time *= 0.001; // convert to seconds;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
if (lastPickedId) {
pickableObjects[lastPickedId - 1].renderingMaterial.emissive.setHex(lastPickedObjectSavedEmissive);
lastPickedId = 0;
}
for (pickableObject of pickableObjects) {
pickableObject.mesh.material = pickableObject.pickingMaterial;
}
const id = pickHelper.pick(pickPosition, scene, camera, time);
for (pickableObject of pickableObjects) {
pickableObject.mesh.material = pickableObject.renderingMaterial;
}
const pickedObject = pickableObjects[id - 1];
if (pickedObject) {
lastPickedId = id;
lastPickedObjectSavedEmissive = pickedObject.renderingMaterial.emissive.getHex();
pickedObject.renderingMaterial.emissive.setHex((time * 8) % 2 > 1 ? 0xFFFF00 : 0xFF0000);
}
renderer.render(scene, camera);
requestAnimationFrame(render);
};
requestAnimationFrame(render);
function setPickPosition(event) {
pickPosition.x = event.clientX;
pickPosition.y = event.clientY;
}
function clearPickPosition() {
// unlike the mouse which always has a position
// if the user stops touching the screen we want
// to stop picking. For now we just pick a value
// unlikely to pick something
pickPosition.x = -100000;
pickPosition.y = -100000;
}
window.addEventListener('mousemove', setPickPosition);
window.addEventListener('mouseout', clearPickPosition);
window.addEventListener('mouseleave', clearPickPosition);
window.addEventListener('touchstart', (event) => {
// prevent the window from scrolling
event.preventDefault();
setPickPosition(event.touches[0]);
}, {
passive: false
});
window.addEventListener('touchmove', (event) => {
setPickPosition(event.touches[0]);
});
window.addEventListener('touchend', clearPickPosition);
window.addEventListener('mousemove', setPickPosition);
window.addEventListener('mouseout', clearPickPosition);
window.addEventListener('mouseleave', clearPickPosition);
body {
margin: 0;
}
canvas {
width: 100vw;
height: 100vh;
display: block;
}
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r98/three.min.js"></script>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r98/js/controls/OrbitControls.js"></script>
<canvas></canvas>

stereocubes(12*1) is getting rendered as black screen using threejs + webVr

I am trying to render 12x1 stereocube image strip using Three.js and want it to view in VR. But instead black screen is getting rendered every time without any error in console. basically I am extracting 12 images from the single stereocube image file using the function get TexturesFromAtlasFile , forming 2 meshes and passing to the scene. I am using a cube geometry. I am new to webVr and ThreeJs, please help. Code snippet used is given below:-
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/93/three.js"></script>
<script src="https://threejs.org/examples/js/controls/VRControls.js"></script>
<script src="https://threejs.org/examples/js/effects/VREffect.js"></script>
<script>
//Setup three.js WebGL renderer
var renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
// Append the canvas element created by the renderer to document body element.
document.body.appendChild(renderer.domElement);
// Create a three.js scene.
var scene = new THREE.Scene();
// Create a three.js camera.
var camera = new THREE.PerspectiveCamera(90, window.innerWidth / window.innerHeight, 1, 1000);
//camera.layers.enable( 1 );
// Apply VR headset positional data to camera.
var controls = new THREE.VRControls(camera);
// Apply VR stereo rendering to renderer.
var effect = new THREE.VREffect(renderer);
effect.setSize(window.innerWidth, window.innerHeight);
//create a sphere - we'll use the inner surface to project our Mars image on to it
var geometry = new THREE.CubeGeometry(100, 100, 100);
var textures = this.getTexturesFromAtlasFile("img/san_miguel_vr_Samsung.png", 12);
var materialArray = [];
for (var i = 0; i < 6; i++) {
materialArray.push(new THREE.MeshBasicMaterial({
map: textures[i]
}));
}
// create the mesh based on geometry and material
//var mesh = new THREE.Mesh(geometry, materialArray);
var skybox = new THREE.Mesh(geometry, materialArray);
//skybox.layers.set( 1 );
scene.add(skybox);
var materialsR = [];
for (var i = 6; i < 12; i++) {
materialsR.push(new THREE.MeshBasicMaterial({
map: textures[i]
}));
}
var skyBoxR = new THREE.Mesh(geometry, materialsR);
//skyBoxR.layers.set( 2 );
scene.add(skyBoxR);
// Create a VR manager helper to enter and exit VR mode.
var manager = new WebVRManager(renderer, effect, {
hideButton: false
});
function animate(timestamp) {
// Update VR headset position and apply to camera.
controls.update();
// Render the scene through the manager.
manager.render(scene, camera, timestamp);
requestAnimationFrame(animate);
}
// Kick off animation loop
animate();
function getTexturesFromAtlasFile(atlasImgUrl, tilesNum) {
var textures = [];
for (var i = 0; i < tilesNum; i++) {
textures[i] = new THREE.Texture();
}
var imageObj = new Image();
imageObj.onload = function() {
var canvas, context;
var tileWidth = imageObj.height;
for (var i = 0; i < textures.length; i++) {
canvas = document.createElement('canvas');
context = canvas.getContext('2d');
canvas.height = tileWidth;
canvas.width = tileWidth;
context.drawImage(imageObj, tileWidth * i, 0, tileWidth, tileWidth, 0, 0, tileWidth, tileWidth);
textures[i].image = canvas
textures[i].needsUpdate = true;
}
};
imageObj.src = atlasImgUrl;
return textures;
}
</script>

Threejs apply clipping to specific area of the object

I'm using THREE.Plane to clip my STL model.
localPlane = new THREE.Plane( new THREE.Vector3( 0, -1, 0 ), 4);
.
.
.
material = new THREE.MeshPhongMaterial( {
color: 0xffffff,
side: THREE.DoubleSide,
clippingPlanes: [
localPlane,
],
clipShadows: true
} );
It's working; but the problem is that the whole object's top is clipped by this infinity sized plane. I want it to clip just a small part of it (It seems that there is no way to scale THREE.Plane)
I also tried using ThreeCSG.js but it seems inconvenient with STL objects!
Here is what I get:
Yes, the removal of the intersection of clipping planes is supported in three.js. You can use a pattern like this one:
// clipping planes
var localPlanes = [
new THREE.Plane( new THREE.Vector3( - 1, 0, 0 ), 1 ),
new THREE.Plane( new THREE.Vector3( 0, - 1, 0 ), 1 )
];
// material
var material = new THREE.MeshPhongMaterial( {
color: 0xffffff,
side: THREE.DoubleSide,
clippingPlanes: localPlanes,
clipIntersection: true
} );
Also, see the three.js example.
three.js r.85
Edit: Follow WestLangley's advice. I'll leave this her as an alternate though less efficient means of performing the clipping.
Clipping planes are infinite. There's no getting around that. So what can you do? Multiple clipping planes in multiple render passes!
To do this, you'll need to turn off auto-clearing, and do your own manual buffer clearing.
renderer = new THREE.WebGLRenderer();
renderer.autoClear = false;
Now let's say plane1 is the clipping plane you currently have.
material = new THREE.MeshPhongMaterial( {
...
clippingPlanes: [
plane1,
],
clipShadows: true
} );
var myMesh = new THREE.Mesh(geometry, material);
That clips the top half of myMesh when you call render. So you want to work with the remainder.
First, make another plane, plane2, be the inverse of plane1. plane2 will then clip the BOTTOM of myMesh. But if you render one pass using plane1, and another using plane2, then you're back with a full mesh. So you'll need a third clip plane, plane3, which clips only the desired half of myMesh. Putting plane2 and plane3 in the same render pass will result in only 1/4 of myMesh rendering.
var pass1ClipPlanes = [
plane1
],
pass2ClipLanes = [
plane2, // this plane is the inverse of plane 1, so it clips the opposite of plane1
plane3 // this clips the left/right half of the model
];
Then when you go to render, clear the draw buffers first, then call two render passes, updating the clip planes between them.
// clear the draw buffers
renderer.clear();
// clip the top
myMesh.material.clipPlanes = pass1ClipPlanes;
renderer.render(scene, camera);
// clip the bottom and one side
myMesh.material.clipPlanes = pass2ClipPlanes;
renderer.render(scene, camera);
The first pass renders the bottom of the model, and the second pass renders half of the top.
ETA: Example
var renderer, scene, camera, controls, stats;
var cube,
pass1ClipPlanes,
pass2ClipPlanes;
var WIDTH = window.innerWidth,
HEIGHT = window.innerHeight,
FOV = 35,
NEAR = 1,
FAR = 1000;
function init() {
document.body.style.backgroundColor = "slateGray";
renderer = new THREE.WebGLRenderer({
antialias: true,
alpha: true
});
renderer.localClippingEnabled = true;
renderer.autoClear = false;
document.body.appendChild(renderer.domElement);
document.body.style.overflow = "hidden";
document.body.style.margin = "0";
document.body.style.padding = "0";
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(FOV, WIDTH / HEIGHT, NEAR, FAR);
camera.position.z = 50;
scene.add(camera);
controls = new THREE.TrackballControls(camera, renderer.domElement);
controls.dynamicDampingFactor = 0.5;
controls.rotateSpeed = 3;
var light = new THREE.PointLight(0xffffff, 1, Infinity);
camera.add(light);
stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.top = '0';
document.body.appendChild(stats.domElement);
resize();
window.onresize = resize;
// POPULATE EXAMPLE
var plane1 = new THREE.Plane(new THREE.Vector3(0, -1, 0), 0),
plane2 = new THREE.Plane(new THREE.Vector3(0, 1, 0), 0),
plane3 = new THREE.Plane(new THREE.Vector3(-1, 0, 0), 0);
pass1ClipPlanes = [plane1];
pass2ClipPlanes = [plane2, plane3];
var cubeGeo = new THREE.BoxBufferGeometry(10, 10, 10),
cubeMat = new THREE.MeshPhongMaterial({
color: "red",
side: THREE.DoubleSide
});
cube = new THREE.Mesh(cubeGeo, cubeMat);
scene.add(cube);
animate();
}
function resize() {
WIDTH = window.innerWidth;
HEIGHT = window.innerHeight;
if (renderer && camera && controls) {
renderer.setSize(WIDTH, HEIGHT);
camera.aspect = WIDTH / HEIGHT;
camera.updateProjectionMatrix();
controls.handleResize();
}
}
function render() {
renderer.clear();
cube.material.clippingPlanes = pass1ClipPlanes;
renderer.render(scene, camera);
cube.material.clippingPlanes = pass2ClipPlanes;
renderer.render(scene, camera);
}
function animate() {
requestAnimationFrame(animate);
render();
controls.update();
stats.update();
}
function threeReady() {
init();
}
(function() {
function addScript(url, callback) {
callback = callback || function() {};
var script = document.createElement("script");
script.addEventListener("load", callback);
script.setAttribute("src", url);
document.head.appendChild(script);
}
addScript("https://threejs.org/build/three.js", function() {
addScript("https://threejs.org/examples/js/controls/TrackballControls.js", function() {
addScript("https://threejs.org/examples/js/libs/stats.min.js", function() {
threeReady();
})
})
})
})();

square textures for circular particles in three.js

I'm having trouble rendering particles properly in three.js. I'm trying to follow this tutorial to set up particles. Unfortunately, while they're placed correctly and are displayed, they are clearly rectangular, rather than circular.
Here's the code for the material
var texture = new THREE.TextureLoader().load(
"textures/disc.png",
function(texture) {
var material = new THREE.PointsMaterial({
color: 0xFFFFFF,
size: 16,
map: texture,
transparent: true,
blending: THREE.AdditiveBlending,
});
particles = new THREE.Points( geometry, material );
});
and here's a pen with complete code.
I honestly have no idea why they're rendered like that, when it's working perfectly in the example in the tutorial.
The issue is the fog is being added to your particles. Set fog to false in the PointMaterial
var scene,
camera, fieldOfView, aspectRatio, nearPlane, farPlane, HEIGHT, WIDTH,
renderer, container;
function createScene() {
HEIGHT = window.innerHeight;
WIDTH = window.innerWidth;
scene = new THREE.Scene();
scene.fog = new THREE.Fog(0xf7d9aa, 100, 950)
aspectRatio = WIDTH / HEIGHT;
fieldOfView = 60;
nearPlane = 1;
farPlane = 10000;
camera = new THREE.PerspectiveCamera(
fieldOfView,
aspectRatio,
nearPlane,
farPlane
);
camera.position.x = 10;
camera.position.z = 290;
camera.position.y = 25;
renderer = new THREE.WebGLRenderer({
alpha: true,
antialias: true
});
renderer.setClearColor(new THREE.Color(0, 0, 0));
renderer.setSize(WIDTH, HEIGHT)
renderer.shadowMap.enabled = true;
container = document.getElementById('world');
container.appendChild(renderer.domElement);
window.addEventListener('resize', handleWindowResize, false);
}
function handleWindowResize() {
HEIGHT = window.innerHeight;
WIDTH = window.innerWidth;
renderer.setSize(WIDTH, HEIGHT);
camera.aspect = WIDTH / HEIGHT;
camera.updateProjectionMatrix();
}
var hemisphereLight, shadowLight;
function createLights() {
}
var sphere;
function createSphere() {
var geometry = new THREE.SphereGeometry( 150, 32, 32 );
var material = new THREE.MeshBasicMaterial( {color: 0xffff00} );
sphere = new THREE.Mesh( geometry, material );
sphere.visible = false;
scene.add( sphere );
}
function createParticles() {
var loader = new THREE.TextureLoader();
var texture = loader.load(
"https://i.imgur.com/9fW07EI.png",
function(texture) {
editGeometry = sphere.geometry;
var geometry = new THREE.Geometry();
for ( i = 0; i < editGeometry.vertices.length; i ++ ) {
geometry.vertices.push(editGeometry.vertices[i]);
}
var material = new THREE.PointsMaterial({
color: 0xFFFFFF,
size: 16,
map: texture,
transparent: true,
blending: THREE.AdditiveBlending,
fog: false,
depthTest: false,
});
particles = new THREE.Points( geometry, material );
particles.sortParticles = true;
scene.add( particles );
});
}
function loop() {
renderer.render(scene, camera);
requestAnimationFrame(loop);
}
function init() {
createScene();
createLights();
createSphere();
createParticles();
loop();
}
init()
body {
margin: 0;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r123/three.min.js"></script>
<div id="world">
</div>
Note that if your image is not on the same domain as your webpage then you can't use the image in WebGL unless you both request cross origin access and the server serving the image gives permission.
Also you can't use images from a 3rd party domain unless the server sends CORS headers for permission to use the image. The postimg.org server did not give permission for the image. imgur's server did.
You must also turn off the depthTest otherwise points drawn in front will end up blocking out points drawn in back. Since you're drawing with transparency and additive blending that's probably what you want.
It's not work for me, maybe I use a ShaderMaterial, but depthTest: false works.
let material = new THREE.ShaderMaterial({
uniforms: {
uColor: { value: new THREE.Color(0xffffff) },
uPointTexture: { value: new THREE.TextureLoader().load(require('./res/spark1')) }
},
vertexShader: _me.vertex_shader,
fragmentShader: _me.fragment_shader,
depthTest: false,
transparent: true,
// fog: false
})

Categories