How to raycast onto a pointcloud in ThreeJs R71 - javascript

I am working in autodesk forge which includes Threejs r71 and I want to use a raycaster to detect clicks on different elements within a pointcloud.
Sample code for how to do this with ThreeJs r71 be appreciated.
Right now, I register an extension with the forge api and run the code below within it. It creates creates a pointcloud and positions the points at predetermined locations (saved within the cameraInfo array).
let geometry = new THREE.Geometry();
this.cameraInfo.forEach( function(e) {
geometry.vertices.push(e.position);
}
)
const material = new THREE.PointCloudMaterial( { size: 150, color: 0Xff0000, sizeAttenuation: true } );
this.points = new THREE.PointCloud( geometry, material );
this.scene.add(this.points);
/* Set up event listeners */
document.addEventListener('mousemove', event => {
// console.log('mouse move!');
let mouse = {
x: ( event.clientX / window.innerWidth ) * 2 - 1,
y: - ( event.clientY / window.innerHeight ) * 2 + 1
};
let raycaster = new THREE.Raycaster();
raycaster.params.PointCloud.threshold = 15;
let vector = new THREE.Vector3(mouse.x, mouse.y, 0.5).unproject(this.camera);
raycaster.ray.set(this.camera.position, vector.sub(this.camera.position).normalize());
this.scene.updateMatrixWorld();
let intersects = raycaster.intersectObject(this.points);
if (intersects.length > 0) {
const hitIndex = intersects[0].index;
const hitPoint = this.points.geometry.vertices[ hitIndex ];
console.log(hitIndex);
console.log(hitPoint);
}
}, false);
The output seems to be illogical. At certain camera positions, it will constantly tell me that it is intersecting an item in the pointcloud (regardless of where the mouse is). And at certain camera positions, it won't detect an intersection at all.
TLDR: it doesn't actually detect an intersection b/w my pointcloud and the mouse.

I've simplified the code a bit, using some of the viewer APIs (using a couple of sample points in the point cloud):
const viewer = NOP_VIEWER;
const geometry = new THREE.Geometry();
for (let i = -100; i <= 100; i += 10) {
geometry.vertices.push(new THREE.Vector3(i, i, i));
}
const material = new THREE.PointCloudMaterial({ size: 50, color: 0Xff0000, sizeAttenuation: true });
const points = new THREE.PointCloud(geometry, material);
viewer.impl.scene.add(points);
const raycaster = new THREE.Raycaster();
raycaster.params.PointCloud.threshold = 50;
document.addEventListener('mousemove', function(event) {
const ray = viewer.impl.viewportToRay(viewer.impl.clientToViewport(event.clientX, event.clientY));
raycaster.ray.set(ray.origin, ray.direction);
let intersects = raycaster.intersectObject(viewer.impl.scene, true);
if (intersects.length > 0) {
console.log(intersects[0]);
}
});
I believe you'll need to tweak the raycaster.params.PointCloud.threshold value. The ray casting logic in three.js doesn't actually intersect the point "boxes" that you see rendered on the screen. It only computes distance between the ray and the point (in the world coordinate system), and only outputs an intersection when the distance is under the threshold value. In my example I tried setting the threshold to 50, and the intersection results were somewhat better.
As a side note, if you don't necessarily need point clouds inside the scene, consider overlaying HTML elements over the 3D view instead. We're using the approach in the https://forge-digital-twin.autodesk.io demo (source) to show rich annotations attached to specific positions in the 3D space. With this approach, you don't have to worry about custom intersections - the browser handles everything for you.

Related

Get latitude longitude on click sphere with threejs

I am here today because I would like your help.
I'm trying to create the earth globe with lat and long, but I can't get the geographic coordinates.
This is my code:
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(
75,
window.innerWidth / window.innerHeight,
0.1,
1000
);
camera.position.z = 5;
scene.add(camera);
const renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2));
document.body.appendChild(renderer.domElement);
renderer.render(scene, camera);
function anime() {
renderer.render(scene, camera);
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2));
renderer.setSize(window.innerWidth, window.innerHeight);
requestAnimationFrame(anime);
}
anime();
class Earth {
constructor(radius, scene, camera, renderer) {
this.scene = scene;
this.radius = radius;
this.camera = camera;
this.renderer = renderer;
this.raycaster = new THREE.Raycaster();
this.mouse = new THREE.Vector2();
this.geometry = new THREE.SphereGeometry(this.radius, 32, 32);
this.texture = new THREE.TextureLoader().load('https://i.ibb.co/4KGwCLD/earth-atmos-2048.jpg');
this.material = new THREE.MeshBasicMaterial({
color: 0xffffff,
map: this.texture,
});
this.mesh = new THREE.Mesh(this.geometry, this.material);
this.obj = new THREE.Object3D();
}
initObject() {
// this.scene.add(this.scene);
this.obj.add(this.mesh);
this.obj.name = 'earth';
this.scene.add(this.obj);
}
onClick(event) {
event.preventDefault();
let canvas = this.renderer.domElement;
let vector = new THREE.Vector3(
((event.offsetX) / canvas.width) * 2 - 1, -((event.offsetY) / canvas.height) * 2 + 1,
0.5);
vector.unproject(this.camera);
this.raycaster.set(this.camera.position, vector.sub(this.camera.position).normalize());
let intersects = this.raycaster.intersectObjects(this.scene.children);
if (intersects.length > 0) {
console.log(intersects[0].point.x);
console.log(intersects[0].point.y);
console.log(intersects[0].point.z);
}
}
earthRotate() {
this.obj.rotation.y += 0.01;
requestAnimationFrame(this.earthRotate.bind(this));
}
}
const earth = new Earth(3, scene, camera, renderer);
earth.initObject();
addEventListener("click", (event) => {
earth.onClick(event);
});
/* earth.earthRotate(); */
body {
margin: 0;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r99/three.min.js" integrity="sha512-0tlhMhMGPohLm/YwaskxH7jJuUGqU/XPTl+HE0dWrhGbpEBRIZYMQdbHC0CmyNPzZKTBd8JoVZnvMcL7hzlFOg==" crossorigin="anonymous" referrerpolicy="no-referrer"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r99/three.js" integrity="sha512-RixpleQfVXfhJUrmXlxWwitZGvMWk13+KhCsaYdeod5xryBN6gqo3RJ9xvaHn8VUeNuUnYfvzKBhLBnJnpEsgA==" crossorigin="anonymous" referrerpolicy="no-referrer"></script>
Is it possible to have geographic coordinates on a sphere with ThreeJS.
If so do you know how to do it?
Thank you in advance for your help !
Intersections are returned in world coordinates, so if you click the same place over and over as your globe is spinning, you will get (about) the same hit position every time.
THAT SAID, these hit points are based on the geometry of the sphere, so a low-poly sphere could actually return a position that does not match a lat/lon position as if the sphere were perfect.
To get a better result, you'll need to take a couple more steps.
1. Create a sphere representation
Use a Sphere to create a mathematically perfect representation of your globe.
2. Use Ray to find the intersection point
The Three Raycaster is a great tool for easily setting up raycasting. But you can use Raycaster.ray (Ray) to perform manual steps, too.
For example, use Ray.intersectSphere against your perfect sphere to get the "real" intersection point.
3. Convert to local coordinates
You were getting the same(-ish) click point because the intersection is in world coordinates. To convert to local coordinates, use the very convenient Object3D.worldToLocal to transform the returned world hit point into a local hit point (the Vector3 is changed using this method, so keep this in mind!).
4. Get your lat/lon
Pick a point on your globe which will represent the intersection point of your equator and prime meridian (0, 0). For a sphere where north is +Y, your origin could be new Vector3( 0, 0, sphere.radius )
Assuming you rotate about the Y axis...
For longitude, remove the Y component of your hit vector (hit.y = 0), set the length of the vector to your sphere radius (hit.setLength( sphere.radius )), then use Vector3.angleTo to find the angle (in radians) between the hit vector and and your "zero" vector, and convert to degrees. Use the X component to determine the East/West component of your value.
For latitude, create a copy of your hit vector. Remove the Y component from the copy and set its length to the sphere's radius, just like for longitude. But instead of comparing to the origin vector, use angleTo to get the angle between the copy and the hit vectors. Convert to degrees. Use the Y value to determine the North/South component of your value.
It's possible to use normalized vectors to perform these comparisons instead, but I think it's easier to visualize if you picture the vectors representing points on the surface of your sphere.

Moving Camera from Point A to Point B On the Geometry Created Autodesk Viewer using three js

I am trying move my camera view state from point A to Point B.
For that I am creating a path using LineDashedMaterial in autodesk viewer and I am able to create and show the line from Point A to B.
here is the code
geometry = new THREE.Geometry();
geometry.vertices.push(getBoundingBox([2112]).center(),
getBoundingBox([2109]).center());
material = new THREE.LineDashedMaterial({color: 0xFF0000,
dashSize: 3,
gapSize: 1,
transparent: true,
depthWrite: false,
depthTest: true,
});
checkLineDistance = geometry.computeLineDistances();
geometry.lineDistancesNeedUpdate = true;
NOP_VIEWER.impl.matman().addMaterial('material', material, true);
line= new THREE.Line(geometry, material);
NOP_VIEWER.impl.sceneAfter.skipDepthTarget = true;
NOP_VIEWER.impl.sceneAfter.skipIdTarget = true;
NOP_VIEWER.impl.sceneAfter.add(line);
which has resulted me with:
Now I am stuck with issue where I want to navigate or move my camera as per the direction of my line/path(A to B).
I am using a sample model right now consider this would be a building which have a room A and B.
2.Is there any way I can get all the vectors from where the line is passing I basically need position, target,and upVector for my camera movement
3.Is there any way I can get all the vectors or points from A to B using dbid in forge viewer api
4.I tried putting up Three.js with forge viewer but it seems difficult though.
This is what actually I am trying to achieve but instead of that moving geometry I need to show viewport in forge viewer
The navigation system in Forge Viewer supports a smooth, linear interpolation into custom camera state using the setRequestTransition method, for example, like so:
let newCameraPosition = new THREE.Vector3(1.0, 2.0, 3.0);
let newCameraTarget = new THREE.Vector3(4.0, 5.0, 6.0);
let fov = viewer.navigation.getHorizontalFov();
viewer.navigation.setRequestTransition(true, newCameraPosition, newCameraTarget, fov);
The default duration of this transition is 1 second, so if you had a list of vertices of a path that you would like to follow with the camera, you could do it with something like this:
function followPath(viewer, vertices, delayMs) {
let index = 0;
let timer = setInterval(function () {
if (index >= vertices.length) {
clearInterval(timer);
return;
}
let newPos = vertices[index];
let newTarget = vertices[index + 1];
if (!newTarget) {
let oldPos = viewer.navigation.getPosition();
let oldTarget = viewer.navigation.getTarget();
newTarget = new THREE.Vector3(
newPos.x + (oldTarget.x - oldPos.x),
newPos.y + (oldTarget.y - oldPos.y),
newPos.z + (oldTarget.z - oldPos.z)
);
}
viewer.navigation.setRequestTransition(true, newPos, newTarget, viewer.navigation.getHorizontalFov());
index++;
}, delayMs);
}
followPath(viewer, [
new THREE.Vector3(10, 20, 30),
new THREE.Vector3(40, 50, 60),
new THREE.Vector3(70, 80, 90),
new THREE.Vector3(0, 10, 0)
], 2000);
If you need more flexibility (e.g., interpolating around curves, custom ease-in/ease-out, etc.), you can still use three.js or other animation libraries, and simply control the camera position and target yourself.

Three JS Pivot point

What I'm trying to achieve is a rotation of the geometry around pivot point and make that the new definition of the geometry. I do not want te keep editing the rotationZ but I want to have the current rotationZ to be the new rotationZ 0.
This way when I create a new rotation task, it will start from the new given pivot point and the newly given rad.
What I've tried, but then the rotation point moves:
// Add cube to do calculations
var box = new THREE.Box3().setFromObject( o );
var size = box.getSize();
var offsetZ = size.z / 2;
o.geometry.translate(0, -offsetZ, 0)
// Do ratation
o.rotateZ(CalcUtils.degreeToRad(degree));
o.geometry.translate(0, offsetZ, 0)
I also tried to add a Group and rotate that group and then remove the group. But I need to keep the rotation without all the extra objects. The code I created
var box = new THREE.Box3().setFromObject( o );
var size = box.size();
var geometry = new THREE.BoxGeometry( 20, 20, 20 );
var material = new THREE.MeshBasicMaterial( { color: 0xcc0000 } );
var cube = new THREE.Mesh( geometry, material );
cube.position.x = o.position.x;
cube.position.y = 0; // Height / 2
cube.position.z = -size.z / 2;
o.position.x = 0;
o.position.y = 0;
o.position.z = size.z / 2;
cube.add(o);
scene.add(cube);
// Do ratation
cube.rotateY(CalcUtils.degreeToRad(degree));
// Remove cube, and go back to single object
var position = o.getWorldPosition();
scene.add(o)
scene.remove(cube);
console.log(o);
o.position.x = position.x;
o.position.y = position.y;
o.position.z = position.z;
So my question, how do I save the current rotation as the new 0 rotation point. Make the rotation final
EDIT
I added an image of what I want to do. The object is green. I have a 0 point of the world (black). I have a 0 point of the object (red). And I have rotation point (blue).
How can I rotate the object around the blue point?
I wouldn't recommend updating the vertices, because you'll run into trouble with the normals (unless you keep them up-to-date, too). Basically, it's a lot of hassle to perform an action for which the transformation matrices were intended.
You came pretty close by translating, rotating, and un-translating, so you were on the right track. There are some built-in methods which can help make this super easy.
// obj - your object (THREE.Object3D or derived)
// point - the point of rotation (THREE.Vector3)
// axis - the axis of rotation (normalized THREE.Vector3)
// theta - radian value of rotation
// pointIsWorld - boolean indicating the point is in world coordinates (default = false)
function rotateAboutPoint(obj, point, axis, theta, pointIsWorld){
pointIsWorld = (pointIsWorld === undefined)? false : pointIsWorld;
if(pointIsWorld){
obj.parent.localToWorld(obj.position); // compensate for world coordinate
}
obj.position.sub(point); // remove the offset
obj.position.applyAxisAngle(axis, theta); // rotate the POSITION
obj.position.add(point); // re-add the offset
if(pointIsWorld){
obj.parent.worldToLocal(obj.position); // undo world coordinates compensation
}
obj.rotateOnAxis(axis, theta); // rotate the OBJECT
}
After this method completes, the rotation/position IS persisted. The next time you call the method, it will transform the object from its current state to wherever your inputs define next.
Also note the compensation for using world coordinates. This allows you to use a point in either world coordinates or local space by converting the object's position vector into the correct coordinate system. It's probably best to use it this way any time your point and object are in different coordinate systems, though your observations may differ.
As a simple solution for anyone trying to quickly change the pivot point of an object, I would recommend creating a group and adding the mesh to the group, and rotating around that.
Full example
const geometry = new THREE.BoxGeometry();
const material = new THREE.MeshBasicMaterial({ color: 0xff0000 });
const cube = new THREE.Mesh(geometry, material);
scene.add(cube)
Right now, this will just rotate around its center
cube.rotation.z = Math.PI / 4
Create a new group and add the cube
const group = new THREE.Group();
group.add(cube)
scene.add(group)
At this point we are back where we started. Now move the mesh:
cube.position.set(0.5,0.5,0)
Then move the group
group.position.set(-0.5, -0.5, 0)
Now use your group to rotate the object:
group.rotation.z = Math.PI / 4

Manipulate objects in the browser with three.js using the mouse

I have this piece of code (see below) that I used to draw a cube with three.js:
// revolutions per second
var angularSpeed = 0.0;
var lastTime = 0;
function animate(){
// update
var time = (new Date()).getTime();
var timeDiff = time - lastTime;
var angleChange = angularSpeed * timeDiff * 2 * Math.PI / 1000;
cube.rotation.y += angleChange;
lastTime = time;
// render
renderer.render(scene, camera);
// request new frame
requestAnimationFrame(animate);
}
// renderer
var container = document.getElementById("container");
var renderer = new THREE.WebGLRenderer();
renderer.setSize(container.offsetWidth, container.offsetHeight);
container.appendChild(renderer.domElement);
// camera
var camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.z = 700;
// scene
var scene = new THREE.Scene();
// cube Length, Height, Width
var cube = new THREE.Mesh(new THREE.CubeGeometry(400, 200, 200), new THREE.MeshBasicMaterial({
wireframe: true,
color: '#ff0000'
}));
cube.rotation.x = Math.PI * 0.1;
scene.add(cube);
// start animation
animate();
Does anyone know whether is it possible to allow the user to change the size of the cube by dragging the edges using the mouse?
Check this jsfiddle. I reused the structure of draggableCubes, plus little changes :
to drag the vertices i created vertexHelpers (little spheres);
to avoid maths the trick is to use an invisible plane to drag your objects/vertices on, perpendicular to the camera. To see it in action, just set plane.visible=true
now we can correctly drag a vertexHelper, its distance to the center of the cube changes. We just have to scale the cube at the same ratio :
Within the mouseMove listener's function it becomes:
if(SELECTED){
var intersects=raycaster.intersectObject(plane);
//so we get the mouse 3D coordinates in intersects[0].point
var previousDistance=SELECTED.position.sub(cube.position).length();
var increaseRatio=intersects[0].point.sub(cube.position).length()/previousDistance;
cube.scale.set(
cube.scale.x*increaseRatio,
cube.scale.y*increaseRatio,
cube.scale.z*increaseRatio
);
//then update the vertexHelpers position (copy the new vertices positions)
}
EDIT :
In your question you precisely ask to resize a cube by dragging its edges. I did not remember it in the example and did not think about it intuitively, but it can be done the same way.
However, given lineWidth is not implemented in ANGLE (the program used on windows to translate WebGL), it is not easy to pick lines with a 1px-width. I remember a threejs example I could not find, where a geometry is associated to the line so it looks outlined. Basically you could do it by creating a cylinder as custom 'edgesHelpers' (i'm precisely not talking about the THREE.EdgesHelper) and they have to be resized each time the cube is too.
In your setup code add an eventlistener for mousemove:
// Event Handlers
renderer.domElement.addEventListener('mousemove', onDocumentMouseMove, false);
Then in the event handler code, you can check which object is selected and adjust the scale parameter.
function onDocumentMouseMove(event) {
...
var selected = raycaster.intersectObjects(objects);
if (selected.length > 0) {
// Do things to the scale parameter(s)... Just for demo purposes
selected.scale.x = selected.scale.x + selected[0].point.sub(offset).x / 1000;
selected.scale.y = selected.scale.y + selected[0].point.sub(offset).y / 1000;
return;
}
...
}
Since I am typing pseudo code here, all too easy to make an error, so I have left a test version here for you to try: http://www.numpty.co.uk/cubedrag.html
As you can see, size of selected object changes in horrible ways with the dragging of the mouse. You have me interested, so will look into making it proportional to movement if I get more time.

Raycast doesnt hit mesh when casted from the inside

I have set up a simple scene where I have my camera inside a sphere geometry
var mat = new THREE.MeshBasicMaterial({map: THREE.ImageUtils.loadTexture('0.jpg') , overdraw:true, color: 0xffffff, wireframe: false });
var sphereGeo = new THREE.SphereGeometry(1000,50,50);
var sphere = new THREE.Mesh(sphereGeo,mat);
sphere.scale.x = -1;
sphere.doubleSided = false;
scene.add(sphere);
I set up a funcionality where I can look around inside that sphere and my point is to be able to cast a ray on mouse down, hit the sphere and get the coordinates where that hit occured. Im casting a ray but still the intersects are empty.
var vector = new THREE.Vector3();
vector.set( ( event.clientX / window.innerWidth ) * 2 - 1, - ( event.clientY / window.innerHeight ) * 2 + 1, 0.5 );
vector.unproject( camera );
raycaster.ray.set( camera.position, vector.sub( camera.position ).normalize());
var intersects = raycaster.intersectObjects(scene.children, true);
Everything works with a test cube also put inside my sphere.
My question is, does it matter whether you hit the object from the inside or no ? Because that is the only explanation that comes to my mind.
Thanks in advance.
sphere.doubleSided was changed to sphere.material.side = THREE.DoubleSide some years ago.
It does matter if you hit the object from the inside. Usually a ray will pass through an "inverted" surface due to backface culling which happens on the pipeline level.
Inverted/flipped surfaces are usually ignored in both rendering and raycasting.
In your case, however, i'd go ahead and try setting sphere.doubleSided = false; to sphere.doubleSided = true;. This should make the raycast return the intersection point with your sphere. [shouldn't work with negative scale]
You can also enter the "dirty vertices" mode, and flip the normals manually:
mesh.geometry.dynamic = true
mesh.geometry.__dirtyVertices = true;
mesh.geometry.__dirtyNormals = true;
mesh.flipSided = true;
//flip every vertex normal in mesh by multiplying normal by -1
for(var i = 0; i<mesh.geometry.faces.length; i++) {
mesh.geometry.faces[i].normal.x = -1*mesh.geometry.faces[i].normal.x;
mesh.geometry.faces[i].normal.y = -1*mesh.geometry.faces[i].normal.y;
mesh.geometry.faces[i].normal.z = -1*mesh.geometry.faces[i].normal.z;
}
mesh.geometry.computeVertexNormals();
mesh.geometry.computeFaceNormals();
I also suggest you set scale back to 1.0 instead of -1.0.
Let me know if it worked!

Categories