Can I add an invisible bounding box to a three.js scene? - javascript

I am trying to detect a click on a bounding box for an object (rather than just on the object itself - more clickable area). When I load the object like this:
var loader2 = new THREE.ObjectLoader();
loader2.load( "models/Platform/Platform.json", function(object, materials){
object.rotation.x = - (Math.PI / 2);
object.rotation.y = Math.PI;
object.scale.set(.025, .025, .025);
object.position.set(0, 1, .4);
var bbox = new THREE.BoundingBoxHelper(object, 0xffffff);
bbox.update();
scene.add(object);
scene.add(bbox);
objects.push(bbox);
});
And detect the click like this:
raycaster = new THREE.Raycaster();
mouse = new THREE.Vector2();
document.addEventListener( 'mousedown', onDocumentMouseDown, false );
document.addEventListener( 'touchstart', onDocumentTouchStart, false );
window.addEventListener( 'resize', onWindowResize, false );
function onDocumentTouchStart( event ) {
event.preventDefault();
event.clientX = event.touches[0].clientX;
event.clientY = event.touches[0].clientY;
onDocumentMouseDown( event );
}
function onDocumentMouseDown( event ) {
console.log("here");
event.preventDefault();
mouse.x = ( event.clientX / renderer.domElement.clientWidth ) * 2 - 1;
mouse.y = - ( event.clientY / renderer.domElement.clientHeight ) * 2 + 1;
raycaster.setFromCamera( mouse, camera );
console.log(mouse.x);
console.log(mouse.y);
var intersects = raycaster.intersectObjects( objects, true );
if ( intersects.length > 0 ) {
console.log("click");
}
The bounding box shows up correctly, and I can click on it!!!!! However, the bounding box is visible on the screen:
I want the bounding box to be transparent/invisible/hidden. Is there any way I can have a bounding box attached to the object which is clickable but not visible?
I read that to make the bounding box invisible I should remove the scene.add(bbox); (not add it to the scene), but if I do that, then it is not in the scene for the ray to intersect, and thus the click is not registered.
Solutions?
Thanks so much!!!

You can try to set the material to invisible:
bbox.material.visible = false;

So, there seem to be (at least) two solutions.
As suggested by #prisoner849:
bbox.material.opacity = 0;
bbox.material.transparent = true;
As suggested by #tomacco and refined by #WestLangley:
bbox.material.visible = false;
Both of these solutions worked for me!

Related

How to bind onClick or onMouseEnter event to three.js?

Like their`s official example, we can use Raycaster to get current matched objects https://threejs.org/docs/?q=Raycaster#api/en/core/Raycaster.
The official example is:
const raycaster = new THREE.Raycaster();
const pointer = new THREE.Vector2();
function onPointerMove( event ) {
// calculate pointer position in normalized device coordinates
// (-1 to +1) for both components
pointer.x = ( event.clientX / window.innerWidth ) * 2 - 1;
pointer.y = - ( event.clientY / window.innerHeight ) * 2 + 1;
}
function render() {
// update the picking ray with the camera and pointer position
raycaster.setFromCamera( pointer, camera );
// calculate objects intersecting the picking ray
const intersects = raycaster.intersectObjects( scene.children );
for ( let i = 0; i < intersects.length; i ++ ) {
intersects[ i ].object.material.color.set( 0xff0000 );
}
renderer.render( scene, camera );
}
window.addEventListener( 'pointermove', onPointerMove );
window.requestAnimationFrame(render);
Accrodding to this example, in my understanding, I can just get intersects[i].object, which is a threejs Mesh class`s instance.
I want to bind onClick function to object3d like this way:
function createObject(id, position) {
// ...
const mesh = new Mesh()
// ...
mesh.onClick = () => handleClickFn(id, position);
scene.add(mesh);
}
Then I can call intersects[i].object.onClick(); to trigger it.
Every examples I found seems like they do some operation to intersects[i].object directly. Just like intersects[ i ].object.material.color.set( 0xff0000 );.
So, is there any way I can bind functions to each intersects[i].object like this?

Threejs, understanding raycaster's intersectd object coordinates

Hello, I have one doubt:
I have implemented a raycaster and I have been testing it manually, however I do not know why on most of the clicks which I made on the 3D model, it did not get the intersection point.
First I will show you the points I clicked on, then the points which were logged in the web console then the code I have implemented and finally the web structure:
I have clicked on those eight points:
And the results are:
[]length: 0__proto__: Array(0)
[]length: 0__proto__: Array(0)
[]length: 0__proto__: Array(0)
[]length: 0__proto__: Array(0)
point: Vector3 x:--99.34871894866089 y:67 z:0
point: Vector3 x: -126.50880038786315 y: 73.48094335146214 z: -5.684341886080802
[]length: 0__proto__: Array(0)
[]length: 0__proto__: Array(0)
Here we have the implemented code, the important part is the onDocumentMouseDown function:
if ( ! Detector.webgl ) Detector.addGetWebGLMessage();
// global variables for this scripts
let OriginalImg,
SegmentImg;
var mouse = new THREE.Vector2();
var raycaster = new THREE.Raycaster();
var mousePressed = false;
init();
animate();
// initilize the page
function init ()
{
let filename = "models/nrrd/columna01.nrrd"; // change your nrrd file
let idDiv = 'original';
OriginalImg = new InitCanvas(idDiv, filename );
OriginalImg.init();
console.log(OriginalImg);
filename = "models/nrrd/columnasegmentado01.nrrd"; // change your nrrd file
idDiv = 'segment';
SegmentImg = new InitCanvas(idDiv, filename );
SegmentImg.init();
}
document.addEventListener( 'mousedown', onDocumentMouseDown, false );
document.addEventListener( 'mouseup', onDocumentMouseUp, false );
function onDocumentMouseDown( event ) {
mousePressed = true;
mouse.x = ( event.clientX / window.innerWidth ) * 2 - 1;
mouse.y = - ( event.clientY / window.innerHeight ) * 2 + 1;
raycaster.setFromCamera( mouse.clone(), OriginalImg.camera );
var objects = raycaster.intersectObjects(OriginalImg.scene.children);
console.log(objects);
}
function onDocumentMouseUp( event ) { mousePressed = false}
function animate() {
requestAnimationFrame( animate );
OriginalImg.animate();
SegmentImg.animate();
}
Here we see the web structure:
I suspect that because of the canvas have an offset into the window, the points are not being effectively gotten from the areas we clicked on.
I have also read:
Debug threejs raycaster mouse coordinates
How do I get the coordinates of a mouse click on a canvas element?
https://threejs.org/docs/#api/core/Raycaster
threejs raycasting does not work
Any help would be appreciated both link to further reading, theoretical suggestions and code examples.
EDIT:
1) I have opened a new thread with the same topic and more detailed images, graphs, and logs. Here is the link: ThreeJS, raycaster gets strange coordinates when we log the intersection object
2) I followed the answer provided by #TheJim01 here is the current code:
logic.js
if (!Detector.webgl) Detector.addGetWebGLMessage();
// global variables for this scripts
let OriginalImg,
SegmentImg;
var mouse = new THREE.Vector2();
var raycaster = new THREE.Raycaster();
var mousePressed = false;
var clickCount = 0;
init();
animate();
// initilize the page
function init() {
let filename = "models/nrrd/columna01.nrrd"; // change your nrrd file
let idDiv = 'original';
OriginalImg = new InitCanvas(idDiv, filename);
OriginalImg.init();
console.log(OriginalImg);
filename = "models/nrrd/columnasegmentado01.nrrd"; // change your nrrd file
idDiv = 'segment';
SegmentImg = new InitCanvas(idDiv, filename);
SegmentImg.init();
}
let originalCanvas = document.getElementById('original');
originalCanvas.addEventListener('mousedown', onDocumentMouseDown, false);
originalCanvas.addEventListener('mouseup', onDocumentMouseUp, false);
function onDocumentMouseDown(event) {
mousePressed = true;
clickCount++;
mouse.x = ( event.offsetX / window.innerWidth ) * 2 - 1;
console.log('Mouse x position is: ', mouse.x, 'the click number was: ', clickCount);
mouse.y = -( event.offsetY / window.innerHeight ) * 2 + 1;
console.log('Mouse Y position is: ', mouse.y);
raycaster.setFromCamera(mouse.clone(), OriginalImg.camera);
var objects = raycaster.intersectObjects(OriginalImg.scene.children);
console.log(objects);
}
function onDocumentMouseUp(event) {
mousePressed = false
}
function animate() {
requestAnimationFrame(animate);
OriginalImg.animate();
SegmentImg.animate();
}
InitCanvas.js
// this class handles the load and the canva for a nrrd
// Using programming based on prototype: https://javascript.info/class
// This class should be improved:
// - Canvas Width and height
InitCanvas = function ( IdDiv, Filename ) {
this.IdDiv = IdDiv;
this.Filename = Filename
}
InitCanvas.prototype = {
constructor: InitCanvas,
init: function() {
this.container = document.getElementById( this.IdDiv );
// this should be changed.
debugger;
this.container.innerHeight = 600;
this.container.innerWidth = 800;
//These statenments should be changed to improve the image position
this.camera = new THREE.PerspectiveCamera( 60, this.container.innerWidth / this.container.innerHeight, 0.01, 1e10 );
this.camera.position.z = 300;
let scene = new THREE.Scene();
scene.add( this.camera );
// light
let dirLight = new THREE.DirectionalLight( 0xffffff );
dirLight.position.set( 200, 200, 1000 ).normalize();
this.camera.add( dirLight );
this.camera.add( dirLight.target );
// read file
let loader = new THREE.NRRDLoader();
loader.load( this.Filename , function ( volume ) {
//z plane
let sliceZ = volume.extractSlice('z',Math.floor(volume.RASDimensions[2]/4));
debugger;
this.container.innerWidth = sliceZ.iLength;
this.container.innerHeight = sliceZ.jLength;
scene.add( sliceZ.mesh );
}.bind(this) );
this.scene = scene;
// renderer
this.renderer = new THREE.WebGLRenderer( { alpha: true } );
this.renderer.setPixelRatio( this.container.devicePixelRatio );
debugger;
this.renderer.setSize( this.container.innerWidth, this.container.innerHeight );
// add canvas in container
this.container.appendChild( this.renderer.domElement );
},
animate: function () {
this.renderer.render( this.scene, this.camera );
}
}
index.html
<!DOCTYPE html>
<html lang="en">
<head>
<title>Prototype: three.js without react.js</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<link rel="stylesheet" href="css/styles.css">
<!-- load the libraries and js -->
<script src="js/libs/three.js"></script>
<script src="js/Volume.js"></script>
<script src="js/VolumeSlice.js"></script>
<script src="js/loaders/NRRDLoader.js"></script>
<script src="js/Detector.js"></script>
<script src="js/libs/stats.min.js"></script>
<script src="js/libs/gunzip.min.js"></script>
<script src="js/libs/dat.gui.min.js"></script>
<script src="js/InitCanvas.js"></script>
</head>
<body>
<div id="info">
<h1>Prototype: three.js without react.js</h1>
</div>
<!-- two canvas -->
<div class="row">
<div class="column" id="original">
</div>
<div class="column" id="segment">
</div>
</div>
<script src="js/logic.js"></script>
</body>
</html>
I have sseen that it indeed works differently. Until now I know that the origin of canvas coordinates, would be the red one in fullscreen mode, and the green one when we open the Chrome dev tools on the right:
In addition the zone where raycaster interepts ThreeJS model, is the red area in fullscreen and the green one when we open the Mozilla dev tools below:
3) Currently the canvas is created from a parent div named column which is:
And the canvas being created in it is 800x600
How could we achieve that the raycaster's intercept zone become canvas' model's size?
5) To be able to solve the difficulty for myself I have studied this good SO post:
THREE.js Ray Intersect fails by adding div
However I see that in the post I linked, #WestLangley uses clientX, Y and here in the answers section #TheJim01 advices to use offsetX, Y.
Also as I am beginner with ThreeJS, and I have been learning JS some time I have some difficulties:
How is the origin of coordinates handled in the browser?
What is the origin of coordinates in three.js?
How are both related?
Why most of the articles use this expression?:
mouse.x = ( event.offsetX / window.innerWidth ) * 2 - 1;
Why do we need to divide by window.innerWidth? By we do * 2 - 1?
6) I ask all of that because I would like to do a web application where we could gather the point where the user clicked on the left canvas, and then we change the color of the same part on the right canvas, and we display some information about it as the name and description.
So then gathering the mouse click position with ThreeJS is important to be able to use that to change the color on the right canvas, and on the clicked part.
7) In addition I have also read:
Update Three.js Raycaster After CSS Tranformation
EDIT2: 22/03/18
I have followed the answer provided by #WestLangley here: THREE.js Ray Intersect fails by adding div
And it allows us to have the raycaster's intersection zone on the canvas' image.
So it solves the question in practice.
However I still not understanding something, for example the relation between browser's and Threejs' coordinates.
Here we see that in the browser and ThreeJS' raycaster's intercepted object, x coordinate is the same, however Y coordinate is different, why?
Also I suspect that browser's origin of coordinates on canvas is on the center:
Is this correct?
I will show the pieces of code I needed to add to make the raycaster's detection area be the same as canvas' image.
First I added in the CSS:
canvas {
width: 200px;
height: 200px;
margin: 100px;
padding: 0px;
position: static; /* fixed or static */
top: 100px;
left: 100px;
}
Then I have added in the logic.js
function onDocumentMouseDown(event) {
mousePressed = true;
clickCount++;
mouse.x = ( ( event.clientX - OriginalImg.renderer.domElement.offsetLeft ) / OriginalImg.renderer.domElement.clientWidth ) * 2 - 1;
mouse.y = - ( ( event.clientY - OriginalImg.renderer.domElement.offsetTop ) / OriginalImg.renderer.domElement.clientHeight ) * 2 + 1
console.log('Mouse x position is: ', mouse.x, 'the click number was: ', clickCount);
console.log('Mouse Y position is: ', mouse.y);
raycaster.setFromCamera(mouse.clone(), OriginalImg.camera);
var objects = raycaster.intersectObjects(OriginalImg.scene.children);
console.log(objects);
}
As you can see above, I have added on mouse x and y the offset Left and Top, divided by the renderer Width / Height.
In addition I have also studied how is the mouse click done in OpenAnatomy:
function onSceneMouseMove(event) {
//check if we are not doing a drag (trackball controls)
if (event.buttons === 0) {
//compute offset due to container position
mouse.x = ( (event.clientX-containerOffset.left) / container.clientWidth ) * 2 - 1;
mouse.y = - ( (event.clientY-containerOffset.top) / container.clientHeight ) * 2 + 1;
needPickupUpdate = true;
}
else {
needPickupUpdate = false;
}
}
Link: https://github.com/mhalle/oabrowser/blob/gh-pages/src/app.js
So we see they use the offset, left and top too, and divided by the width and height, but this time the ones from the container not the renderer.
Also I have studied how they do it in AMI:
function onDoubleClick(event) {
const canvas = event.target.parentElement;
const id = event.target.id;
const mouse = {
x: ((event.clientX - canvas.offsetLeft) / canvas.clientWidth) * 2 - 1,
y: - ((event.clientY - canvas.offsetTop) / canvas.clientHeight) * 2 + 1,
};
Link: https://github.com/FNNDSC/ami/blob/dev/examples/viewers_quadview/viewers_quadview.js
So here we see that instead of the container or even the renderer they use the offset of the canvas itself.
In addition I have studied some official ThreeJS examples, and they look like there is only a fullscreen renderer/scene so then they do not show how to handle various canvas and raycasters in a same web page.
function onDocumentMouseMove( event ) {
event.preventDefault();
mouse.x = ( event.clientX / window.innerWidth ) * 2 - 1;
mouse.y = - ( event.clientY / window.innerHeight ) * 2 + 1;
}
Link: https://github.com/mrdoob/three.js/blob/master/examples/webgl_interactive_cubes.html
function onMouseMove( event ) {
mouse.x = ( event.clientX / renderer.domElement.clientWidth ) * 2 - 1;
mouse.y = - ( event.clientY / renderer.domElement.clientHeight ) * 2 + 1;
raycaster.setFromCamera( mouse, camera );
// See if the ray from the camera into the world hits one of our meshes
var intersects = raycaster.intersectObject( mesh );
// Toggle rotation bool for meshes that we clicked
if ( intersects.length > 0 ) {
helper.position.set( 0, 0, 0 );
helper.lookAt( intersects[ 0 ].face.normal );
helper.position.copy( intersects[ 0 ].point );
}
}
Link: https://github.com/mrdoob/three.js/blob/master/examples/webgl_geometry_terrain_raycast.html
Could you help me please?
Thank you.
Raycarster.setFromCamera(NDC, camera) takes as first parameter the normalized device coordinates. That is a value between -1 and 1. But you are giving the actual coordinates. That is why it doesn't intersect. Try this:
const screenPosition = {
x: event.clientX - canvas.offsetLeft,
y: event.clientY - canvas.offsetHeight
};
const widthHalf = canvas.clientWidth * 0.5;
const heightHalf = canvas.clientHeight * 0.5;
const mouse = {
x: (screenPosition.x - widthHalf) / widthHalf ,
y: - (screenPosition.y - heightHalf) / heightHalf,
};
Also try to set recursive to true in intersectObject().

Want to have click event on the .obj file shown on the webpage

I have a .obj file on the web page using Three js.
My aim is, when I drag the mouse left/right, the OBJ model should rotate which I am able to do so USING THREE.TrackballControls().
Next thing is, I want to touch on the specific points on that OBJ model and if the mouse is down on those points something would happen(like a counter increase which will be shown on the web page).
I have seen DOMevents for three js but it looks like it allows us to click on the whole object not on specific points on the objects.
How can I achieve that?
You have to create a raycaster. (r69)
mouse_vector = new THREE.Vector3(),
mouse = { x: 0, y: 0, z: 1 };
var vector = new THREE.Vector3();
var raycaster = new THREE.Raycaster();
var dir = new THREE.Vector3();
function onMouseDown( event_info )
{
event_info.preventDefault();
mouse.x = ( event_info.clientX / window.innerWidth ) * 2 - 1;
mouse.y = - ( event_info.clientY / window.innerHeight ) * 2 + 1;
mouse_vector.set( mouse.x, mouse.y, mouse.z );
mouse_vector.unproject(camera);
var direction = mouse_vector.sub( camera.position ).normalize();
ray = new THREE.Raycaster( camera.position, direction );
ray.set( camera.position, direction );
intersects = ray.intersectObjects(scene.children, true);
if( intersects.length )
{
intersects.forEach(function(clicked)
{
// Your stuff like
if (clicked.object.typ == 'yourObject')
{
//Event on click..
}
});
}
}

catch the click event on a specific mesh in the renderer

I set a canvas renderer which contain two meshs (cubes). What i need to do is to catch the click event on each cube to call the convenient method for it.
So far, i could catch the click event on all the renderer, means when i click on cube1 and cube2, the click belong the same 'cause it's bound to the renderer :)
My question is, how to bind the click event on each cube?
My relevant code is the following:
//dom
var containerPopUp=document.getElementById('popup');
//renderer
var rendererPopUp = new THREE.CanvasRenderer();
rendererPopUp.setSize(420,200);
containerPopUp.appendChild(rendererPopUp.domElement);
//Scene
var scenePopUp = new THREE.Scene();
//Camera
var cameraPopUp = new THREE.PerspectiveCamera(50,60/60,1,1000);
cameraPopUp.position.z = 220;
cameraPopUp.position.y = 20;
//
scenePopUp.add(cameraPopUp);
//Add texture for the cube
//Use image as texture
var img2D = new THREE.MeshBasicMaterial({ //CHANGED to MeshBasicMaterial
map:THREE.ImageUtils.loadTexture('img/2d.png')
});
img2D.map.needsUpdate = true; //ADDED
//Add Cube
var cubeFor2D = new THREE.Mesh(new THREE.CubeGeometry(40,80,40),img2D);
cubeFor2D.position.x =- 60;
cubeFor2D.position.y = 20;
scenePopUp.add(cubeFor2D);
//
var img3D = new THREE.MeshBasicMaterial({ //CHANGED to MeshBasicMaterial
map:THREE.ImageUtils.loadTexture('img/3d.png')
});
img3D.map.needsUpdate = true;
var cubeFor3D = new THREE.Mesh(new THREE.CubeGeometry(40,80,40),img3D);
cubeFor3D.position.x = 60;
cubeFor3D.position.y=20;
scenePopUp.add(cubeFor3D);
//
rendererPopUp.render(scenePopUp,cameraPopUp);
//
animate();
rendererPopUp.domElement.addEventListener('click',testCall,false);//Here the click event is bound on the whole renderer, means what ever object in the renderer is clicked, the testCall method is called.
As you can see, cubeFor2D and cubeFor3D are contained in the renderer. I need to bind the click event on each mesh. I tried this with the threex.domevent.js:
var meshes = {};
meshes['mesh1'] = cubeFor2D;
meshes['mesh1'].on('mouseover', function(event){
//response to click...
console.log('you have clicked on cube 2D');
});
But it doesn't work, in the console, i got this error:
TypeError: meshes.mesh1.on is not a function
Of course, i included the API source code file:
<script src="threex.domevent.js"></script>
You can generate a callback like this. First define your callback function for each object:
mesh.callback = function() { console.log( this.name ); }
Then follow the standard picking pattern:
var raycaster = new THREE.Raycaster();
var mouse = new THREE.Vector2();
function onDocumentMouseDown( event ) {
event.preventDefault();
mouse.x = ( event.clientX / renderer.domElement.clientWidth ) * 2 - 1;
mouse.y = - ( event.clientY / renderer.domElement.clientHeight ) * 2 + 1;
raycaster.setFromCamera( mouse, camera );
var intersects = raycaster.intersectObjects( objects );
if ( intersects.length > 0 ) {
intersects[0].object.callback();
}
}
EDIT: updated to three.js r.70
Create a click handler
window.addEventListener('click', onDocumentMouseDown, false);
Define the function onDocumentMouseDown, note that raycaster the difference in above answer is the index position of the object clicked!
var raycaster = new THREE.Raycaster();
var mouse = new THREE.Vector2();
function onDocumentMouseDown( event ) {
event.preventDefault();
mouse.x = ( event.clientX / renderer.domElement.clientWidth ) * 2 - 1;
mouse.y = - ( event.clientY / renderer.domElement.clientHeight ) * 2 + 1;
raycaster.setFromCamera( mouse, camera );
console.log(scene.children);
var intersects = raycaster.intersectObjects( scene.children );
console.log(intersects[1]);
if ( intersects.length > 0 ) {
intersects[1].object.callback();
}}
Define the Mesh object
var mesh_menu_title = new THREE.Mesh(geometry_menu, materials_menu);
mesh_menu_title.name = 'select_lang';
mesh_menu_title.callback = function() { select_language();}
scene.add(mesh_menu_title);
define the callback function
function select_language(){
var selectedObject = scene.getObjectByName("select_lang");
scene.remove( selectedObject );
var selectedObject = scene.getObjectByName("start");
scene.remove( selectedObject );
var selectedObject = scene.getObjectByName("menu");
scene.remove( selectedObject );
}
So this code above will handle specific object clicked inside my canvas, then callback a function, the "mesh.callback" and it will remove some scene childs from the canvas.
It doesn't work if you use intersects[0].object.callback(); because at the index 0 the stored object are the vertices.

Click and drag only grabs part of an object

I'm on the final stretch of a this project I've been working on and I'm having issues with the ability to click and drag objects. I've added them all into an array called items, and it sort of works right now. Here is the link to the page in action. If you add any of the items from the menu in the upper right, it'll show up but you can only drag it around piece by piece. From what I can tell, the issue is that it is treating each item as a series of items instead of as one item. This makes sense as each model is several models pieced together, but I'm not sure how to work around that. Any ideas?
Here are the three functions I have controlling mouse interaction:
function onMouseMove( event ){
event.preventDefault();
mouse.x = ( event.clientX / width ) * 2 - 1;
mouse.y = - ( event.clientY / height ) * 2 + 1;
var vector = new THREE.Vector3( mouse.x, mouse.y, 0 );
projector.unprojectVector( vector, camera );
var ray = new THREE.Ray( camera.position, vector.subSelf( camera.position ).normalize() );
if ( SELECTED ) {
var intersects = ray.intersectObject( plane );
SELECTED.position.copy( intersects[ 0 ].point.subSelf( offset ) );
return;
}
var intersects = ray.intersectObjects( items );
if ( intersects.length > 0 ) {
if ( INTERSECTED != intersects[ 0 ] ) {
INTERSECTED = intersects[ 0 ].object;
plane.position.copy( INTERSECTED.position );
}
container.style.cursor = 'pointer';
}
else {
INTERSECTED = null;
container.style.cursor = 'auto';
}
}
function onMouseDown( event ) {
event.preventDefault();
var vector = new THREE.Vector3( mouse.x, mouse.y, 0 );
projector.unprojectVector( vector, camera );
var ray = new THREE.Ray( camera.position, vector.subSelf( camera.position ).normalize() );
var intersects = ray.intersectObjects( items );
if ( intersects.length > 0 ) {
SELECTED = intersects[ 0 ].object;
var intersects = ray.intersectObject( plane );
offset.copy( intersects[ 0 ].point ).subSelf( plane.position );
container.style.cursor = 'move';
}
}
function onMouseUp( event ) {
event.preventDefault();
if ( INTERSECTED ) {
plane.position.copy( INTERSECTED.position );
SELECTED = null;
}
container.style.cursor = 'auto';
}
It's heavily based on this example, but without the color bits.
By changing the code in onMouseDown like so
// OLD
SELECTED = intersects[0].object;
// NEW
SELECTED = intersects[0].object.parent;
I can now move the full object. This only works if the object only has one parent though, and so some items are not able to move with this code. Anyone have a suggestion on determining if it has parent objects and moving up if it does?
If somebody is still interested in this question subSelf method is now called sub.
Resolved by adding the following to onMouseDown
SELECTED = intersects[0].object;
while(SELECTED.parent != scene){
SELECTED = SELECTED.parent;
}
This ensures that the object grabbed will be the highest level that isn't the scene and makes all the models drag-able.

Categories