Progressively reveal circle in loop - javascript

I currently have this: Codepen (embedded snippet below).
I would like to be able to change the color progressively after the scanline, something like this:
After searching online and trying to find something in three.js / WebGL I failed to procure what I wanted, probably because I don't quite know what I should search for.
Could you help me with a solution or pointing me in the right direction?
I have considered the following possibilities:
Having a second green circle and a dynamic mask that reveals it after the scanline.
How to create a mask in three.js that can show a slice of an increasing angle θ?
CircleGeometry has parameters to create a slice with angle θ. But constantly changing the geometry of my mesh doesn't sound very smart.
Adding tiny circle slices after the scanline passes so it creates the impression of revealing a circle but it's actually just adding tiny slices.
P.S. - I am using three.js because later there will be 3d elements to this project.
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 1000);
const renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
const innerRadius = 1;
const outerRadius = innerRadius*2;
const barLenght = innerRadius;
// create scanline
const outerMaterial = new THREE.MeshBasicMaterial({color: 0x34ebd2});
const outerCircle = new THREE.Mesh(new THREE.CircleGeometry(outerRadius, 60), outerMaterial);
scene.add(outerCircle);
// Create innerCircle
const innerMaterial = new THREE.MeshBasicMaterial({color: 0x0000ff});
const innerCircle = new THREE.Mesh(new THREE.CircleGeometry(innerRadius, 60), innerMaterial);
scene.add(innerCircle);
// create static line
const staticLine = new THREE.Mesh(new THREE.PlaneGeometry(0.05, barLenght), new THREE.MeshBasicMaterial({color: 0xff0000, side: THREE.DoubleSide}));
scene.add(staticLine);
// create scan line
const scanLine = new THREE.Mesh(new THREE.PlaneGeometry(0.05, barLenght), new THREE.MeshBasicMaterial({color: 0xff0000, side: THREE.DoubleSide}));
scene.add(scanLine);
// position static line
staticLine.position.y = innerRadius + barLenght/2;
// position scan line
scanLine.position.y = innerRadius + barLenght/2;
// create pivot to rotate dateline
const pivot = new THREE.Group();
pivot.position.set( 0.0, 0.0, 0 );
pivot.add(scanLine);
scene.add(pivot);
camera.position.z = 5;
function animate() {
requestAnimationFrame(animate);
renderer.render(scene, camera);
pivot.rotation.z -= 0.005;
}
animate();
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>

This is a 2-in-1 example (fragment and vertex shader implementations) of progressive arcs, just from the scratch. Use it as a starting point.
body{
overflow: hidden;
margin: 0;
}
<script type="module">
import * as THREE from "https://cdn.skypack.dev/three#0.132.2";
import {OrbitControls} from "https://cdn.skypack.dev/three#0.132.2/examples/jsm/controls/OrbitControls.js";
let scene = new THREE.Scene();
let camera = new THREE.PerspectiveCamera(60, innerWidth / innerHeight, 1, 1000);
camera.position.set(-5, 3, 8);
let renderer = new THREE.WebGLRenderer();
renderer.setSize(innerWidth, innerHeight);
document.body.appendChild(renderer.domElement);
let controls = new OrbitControls(camera, renderer.domElement);
// fragment shader option
let g = new THREE.CircleGeometry(5, 64);
let m = new THREE.MeshBasicMaterial({
color: 0x7f7f7f,
side: THREE.DoubleSide,
onBeforeCompile: shader => {
shader.uniforms.time = m.userData.uniforms.time;
shader.uniforms.currColor = m.userData.uniforms.currColor;
shader.uniforms.prevColor = m.userData.uniforms.prevColor;
shader.fragmentShader = `
uniform float time;
uniform vec3 currColor;
uniform vec3 prevColor;
${shader.fragmentShader}
`.replace(
`#include <color_fragment>`,
`#include <color_fragment>
vec2 cUv = vUv - 0.5;
float dist = length(cUv);
vec3 col = prevColor;
float ang = mod(atan(cUv.y, cUv.x) + PI * 3.5, PI2);
float aRatio = 1. - ang / PI2;
float slice = 1. - step(time, aRatio);
col = mix(prevColor, currColor, slice);
float innerCirc = 1. - step(0.25, dist);
col = mix(col, diffuseColor.rgb, innerCirc);
diffuseColor.rgb = col;
`
);
console.log(shader.fragmentShader);
}
})
m.defines = {
"USE_UV": " "
};
m.userData = {
uniforms: {
time: {
value: 0.5
},
currColor: {
value: new THREE.Color(0xff00ff)
},
prevColor: {
value: new THREE.Color(0x00ffff)
}
}
}
let o = new THREE.Mesh(g, m);
scene.add(o);
// vertex shader option
let g2 = new THREE.PlaneGeometry(1, 1, 180, 1);
let m2 = new THREE.MeshBasicMaterial({
color: 0xffff00,
wireframe: true,
onBeforeCompile: shader => {
shader.uniforms.rMin = m2.userData.uniforms.rMin;
shader.uniforms.rMax = m2.userData.uniforms.rMax;
shader.uniforms.arcRatio = m2.userData.uniforms.arcRatio;
shader.vertexShader = `
uniform float rMin;
uniform float rMax;
uniform float arcRatio;
mat2 rot(float a){return mat2(cos(a), -sin(a), sin(a), cos(a));}
${shader.vertexShader}
`.replace(
`#include <begin_vertex>`,
`#include <begin_vertex>
float rDiff = rMax - rMin;
float r = rMin + (rDiff * uv.y);
float ang = PI2 * uv.x * arcRatio;
transformed.xy = rot(ang) * vec2(0., r);
`
);
console.log(shader.vertexShader);
}
});
m2.userData = {
uniforms: {
rMin: {value: 2.5},
rMax: {value: 5},
arcRatio: {value: 0.25} // 0..1
}
}
let o2 = new THREE.Mesh(g2, m2);
o2.position.z = 2;
scene.add(o2);
let clock = new THREE.Clock();
window.addEventListener("resize", onResize);
renderer.setAnimationLoop(_ => {
let t = (clock.getElapsedTime() * 0.1) % 1;
m.userData.uniforms.time.value = t;
m2.userData.uniforms.arcRatio.value = t;
renderer.render(scene, camera);
})
function onResize(){
camera.aspect = innerWidth / innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(innerWidth, innerHeight);
}
</script>

Related

Trouble mapping an image to a point material buffer geometry in Three.js

I've been using Three.js to make a project that creates a point material object which can then be interacted with. I'm trying to map the pixels of an image to the vertexes of a buffer geometry so it shows the image as a set of points (point cloud like). The image in question being a map of the earth (downscaled to 106 x 53 pixels).
I'm doing this by drawing the image on a canvas, extracting the image data, setting the colour from the image data (based on pixel coordinates) and then setting the colour attribute of my geometry (in this case a sphere buffer geometry). Where am I going wrong with the mapping?
This is code for extracting colours and placing them in an array for the geometry:
let colors = [];
let color = new THREE.Color();
let positionAttribute = g.attributes.position; // g being geometry
for (let x = 0; x < img.width; x++) {
for (let y = 0; y < img.height; y++) {
let c = earthCanvas.getContext("2d");
let p = c.getImageData(x, y, 1, 1).data;
let hex = "#" + ("000000" + rgbToHex(p[0], p[1], p[2])).slice(-6);
color.set(hex);
console.log("set");
colors.push(color.r, color.g, color.b);
}
}
g.setAttribute("color", new THREE.Float32BufferAttribute(colors, 3));
Which results in this happening:
Is there any way to make this look like earth as a globe? Am I just placing the coordinates of the pixels wrong?
The code for the geometry itself looks like this:
g = new THREE.SphereBufferGeometry(3, 104, 52);
count = g.attributes.position.count;
console.log(count);
g.center();
let pointShape = new THREE.TextureLoader().load("./models/particle.png");
m = new THREE.PointsMaterial({
size: pointSize,
map: pointShape,
vertexColors: true,
//blending: THREE.AdditiveBlending,
depthTest: false,
opacity: 1
});
And the HTML and JavaScript for the canvas looks like this:
function drawImageSource(source, canvas) {
img = new Image();
img.addEventListener("load", function () {
// The image can be drawn from any source
canvas
.getContext("2d")
.drawImage(
img,
0,
0,
img.width,
img.height,
0,
0,
canvas.width,
canvas.height
);
});
img.crossOrigin = "Anonymous";
img.setAttribute("src", source);
}
<div id="canvasDiv">
<canvas id="earthCanvas", width="106", height="53" hidden />
</body>
The link to the code sandbox project is here:
https://codesandbox.io/s/small-hill-mvhgc?file=/src/index.js:5956-6389
(I apologise for messy code. It's a prototype.)
I would go with a different approach: modify the existing PointsMaterial, using .onBeforeCompile(), and pass a texture in a uniform.
body{
overflow:hidden;
margin:0;
}
<script type="module">
import * as THREE from "https://cdn.skypack.dev/three#0.136.0";
import {OrbitControls} from "https://cdn.skypack.dev/three#0.136.0/examples/jsm/controls/OrbitControls";
let scene = new THREE.Scene();
let camera = new THREE.PerspectiveCamera(60, innerWidth / innerHeight, 1, 1000);
camera.position.set(0, 0, 10);
let renderer = new THREE.WebGLRenderer();
renderer.setSize(innerWidth, innerHeight);
document.body.appendChild(renderer.domElement);
let controls = new OrbitControls(camera, renderer.domElement);
let g = new THREE.SphereGeometry(4, 360, 180);
let m = new THREE.PointsMaterial({
size: 0.05,
onBeforeCompile: shader => {
shader.uniforms.tex = {value: new THREE.TextureLoader().load("https://threejs.org/examples/textures/uv_grid_opengl.jpg")};
shader.vertexShader = `
varying vec2 vUv;
${shader.vertexShader}
`.replace(
`#include <begin_vertex>`,
`#include <begin_vertex>
vUv = uv;
`
);
//console.log(shader.vertexShader);
shader.fragmentShader = `
uniform sampler2D tex;
varying vec2 vUv;
${shader.fragmentShader}
`.replace(
`vec4 diffuseColor = vec4( diffuse, opacity );`,
`
vec3 col = texture2D(tex, vUv).rgb;
col *= diffuse;
vec4 diffuseColor = vec4( col, opacity );`
);
//console.log(shader.fragmentShader);
}
});
let p = new THREE.Points(g, m);
scene.add(p);
renderer.setAnimationLoop(() => {
renderer.render(scene, camera);
});
</script>

Three.js / WebGL How to mirror one side of a texture

I'm basically trying to achieve a kaleidoscopic effect with just one side, but I'm working with lots of Points, so I'd like that to happen in the shader. However if there's a Threejs trick that mirrors half of the texture or the Points object, that would be great. I tried to apply transformation matrices but I can't get it to work.
I found an old KaleidoShader that requires the usage of EffectComposer, but I'd like to implement it manually myself (without EffectComposer) and I'm struggling to do so. I'm using an FBO and I tried adding the code from that shader in both my simulation and render shaders but it's having no effect at all. Do I have to add yet another FBO texture or is it possibile to do those calculations in one of the existing shaders?
For visual reference https://ma-hub.imgix.net/wp-images/2019/01/23205110/premiere-pro-mirror-effect.jpg
I've spent so much time without getting to the bottom of this, hopefully someone can point me in the right direction.
Thanks
I just followed this article
Pasting in the code from that repo seems to work
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
<canvas id="c"></canvas>
<script type="module">
import * as THREE from 'https://threejsfundamentals.org/threejs/resources/threejs/r115/build/three.module.js';
import {EffectComposer} from 'https://threejsfundamentals.org/threejs/resources/threejs/r115/examples/jsm/postprocessing/EffectComposer.js';
import {RenderPass} from 'https://threejsfundamentals.org/threejs/resources/threejs/r115/examples/jsm/postprocessing/RenderPass.js';
import {ShaderPass} from 'https://threejsfundamentals.org/threejs/resources/threejs/r115/examples/jsm/postprocessing/ShaderPass.js';
import {GUI} from 'https://threejsfundamentals.org/threejs/../3rdparty/dat.gui.module.js';
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 75;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 5;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 2;
const scene = new THREE.Scene();
{
const color = 0xFFFFFF;
const intensity = 2;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
scene.add(light);
}
const boxWidth = 1;
const boxHeight = 1;
const boxDepth = 1;
const geometry = new THREE.BoxGeometry(boxWidth, boxHeight, boxDepth);
function makeInstance(geometry, color, x) {
const material = new THREE.MeshPhongMaterial({color});
const cube = new THREE.Mesh(geometry, material);
scene.add(cube);
cube.position.x = x;
return cube;
}
const cubes = [
makeInstance(geometry, 0x44aa88, 0),
makeInstance(geometry, 0x8844aa, -2),
makeInstance(geometry, 0xaa8844, 2),
];
const composer = new EffectComposer(renderer);
composer.addPass(new RenderPass(scene, camera));
// from:
// https://github.com/mistic100/three.js-examples/blob/master/LICENSE
const kaleidoscopeShader = {
uniforms: {
"tDiffuse": { value: null },
"sides": { value: 6.0 },
"angle": { value: 0.0 }
},
vertexShader: `
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
`,
fragmentShader: `
uniform sampler2D tDiffuse;
uniform float sides;
uniform float angle;
varying vec2 vUv;
void main() {
vec2 p = vUv - 0.5;
float r = length(p);
float a = atan(p.y, p.x) + angle;
float tau = 2. * 3.1416 ;
a = mod(a, tau/sides);
a = abs(a - tau/sides/2.) ;
p = r * vec2(cos(a), sin(a));
vec4 color = texture2D(tDiffuse, p + 0.5);
gl_FragColor = color;
}
`
};
const kaleidoscopePass = new ShaderPass(kaleidoscopeShader);
kaleidoscopePass.renderToScreen = true;
composer.addPass(kaleidoscopePass);
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
const gui = new GUI();
gui.add(kaleidoscopePass.uniforms.sides, 'value', 0, 20).name('sides');
gui.add(kaleidoscopePass.uniforms.angle, 'value', 0, 6.28, 0.01).name('angle');
let then = 0;
function render(now) {
now *= 0.001; // convert to seconds
const deltaTime = now - then;
then = now;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
composer.setSize(canvas.width, canvas.height);
}
cubes.forEach((cube, ndx) => {
const speed = 1 + ndx * .1;
const rot = now * speed;
cube.rotation.x = rot;
cube.rotation.y = rot;
});
composer.render(deltaTime);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
</script>
There is a texture wrap mode that does mirroring.
texture.wrapS = texture.wrapT = THREE.MirroredRepeatWrapping
Does that help?
edit: Here's an example showing mirroredrepeatwrapping on both axis:
https://glitch.com/~three-mirroredrepeatwrapping

How to get the global position of a vertex of a skinned mesh in Three.js?

In Three.js, we are now able to get the global position of a vertex of a non-skinned mesh thanks to this question, but how can I get the global position of a vertex of a skinned mesh with bones and morph targets?
For example, how can I print (2.5, 1.5, 0.5) in the following situation?
mesh.geometry.vertices[0] is originally at (0.5, 0.5, 0.5).
Then, bones[1] moves the vertex to (2.5, 0.5, 0.5).
Finally, morphing moves the vertex to (2.5, 1.5, 0.5).
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 200);
camera.position.z = 3;
camera.position.y = 2;
camera.lookAt(0, 0, 0);
const renderer = new THREE.WebGLRenderer({antialias: true});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
const geometry = new THREE.BoxGeometry(1, 1, 1);
geometry.morphTargets.push({name: "morph", vertices: []});
for (const vertex of geometry.vertices) {
geometry.skinIndices.push(new THREE.Vector4(vertex.x < 0 ? 0 : 1, 0, 0, 0));
geometry.skinWeights.push(new THREE.Vector4(1, 0, 0, 0));
geometry.morphTargets[0].vertices.push(vertex.clone().add(new THREE.Vector3(0, 1, 0)));
}
const material = new THREE.MeshPhongMaterial({
skinning: true,
emissive: 0xffffff,
wireframe: true,
morphTargets: true
});
const mesh = new THREE.SkinnedMesh(geometry, material);
const bones = [new THREE.Bone(), new THREE.Bone()];
for (const bone of bones) {
mesh.add(bone);
}
const skeleton = new THREE.Skeleton(bones);
mesh.bind(skeleton);
bones[0].position.x = -2;
bones[1].position.x = 2;
mesh.morphTargetInfluences[0] = 1;
scene.add(mesh);
// This code assigns (0.5, 0.5, 0.5) to pos,
// but I want to know the code which assigns (2.5, 1.5, 0.5) to pos.
const pos = mesh.geometry.vertices[0].clone().applyMatrix4(mesh.matrixWorld);
console.log(`(${pos.x}, ${pos.y}, ${pos.z})`);
(function render() {
requestAnimationFrame(render);
renderer.render(scene, camera);
})();
body {
margin: 0;
overflow: hidden;
}
canvas {
width: 100%;
height: 100%;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/94/three.min.js"></script>
So this is what I did to try to figure this out. I'm too lazy to check if it works for all cases but ...
First I used the Shader Editor extension and then I ran a skinned example and using the shader editor I looked at the generated shader
Looking at the vertex shader the relevant part of the shader is
#ifdef USE_SKINNING
mat4 boneMatX = getBoneMatrix( skinIndex.x );
mat4 boneMatY = getBoneMatrix( skinIndex.y );
mat4 boneMatZ = getBoneMatrix( skinIndex.z );
mat4 boneMatW = getBoneMatrix( skinIndex.w );
#endif
#ifdef USE_SKINNING
mat4 skinMatrix = mat4( 0.0 );
skinMatrix += skinWeight.x * boneMatX;
skinMatrix += skinWeight.y * boneMatY;
skinMatrix += skinWeight.z * boneMatZ;
skinMatrix += skinWeight.w * boneMatW;
skinMatrix = bindMatrixInverse * skinMatrix * bindMatrix;
objectNormal = vec4( skinMatrix * vec4( objectNormal, 0.0 ) ).xyz;
#endif
vec3 transformedNormal = normalMatrix * objectNormal;
#ifdef FLIP_SIDED
transformedNormal = - transformedNormal;
#endif
vec3 transformed = vec3( position );
#ifdef USE_MORPHTARGETS
transformed += ( morphTarget0 - position ) * morphTargetInfluences[ 0 ];
transformed += ( morphTarget1 - position ) * morphTargetInfluences[ 1 ];
transformed += ( morphTarget2 - position ) * morphTargetInfluences[ 2 ];
transformed += ( morphTarget3 - position ) * morphTargetInfluences[ 3 ];
#ifndef USE_MORPHNORMALS
transformed += ( morphTarget4 - position ) * morphTargetInfluences[ 4 ];
transformed += ( morphTarget5 - position ) * morphTargetInfluences[ 5 ];
transformed += ( morphTarget6 - position ) * morphTargetInfluences[ 6 ];
transformed += ( morphTarget7 - position ) * morphTargetInfluences[ 7 ];
#endif
#endif
#ifdef USE_SKINNING
vec4 skinVertex = bindMatrix * vec4( transformed, 1.0 );
vec4 skinned = vec4( 0.0 );
skinned += boneMatX * skinVertex * skinWeight.x;
skinned += boneMatY * skinVertex * skinWeight.y;
skinned += boneMatZ * skinVertex * skinWeight.z;
skinned += boneMatW * skinVertex * skinWeight.w;
transformed = ( bindMatrixInverse * skinned ).xyz;
#endif
So translating that into JavaScript here's what I came up with
First off you need the scene to have all its world matrices
and the skeleton updated.
scene.updateMatrixWorld();
skeleton.update();
Then
// These are so we can avoid doing allocations
// in the inner loop.
const position = new THREE.Vector3();
const transformed = new THREE.Vector3();
const temp1 = new THREE.Vector3();
const tempBoneMatrix = new THREE.Matrix4();
const tempSkinnedVertex = new THREE.Vector3();
const tempSkinned = new THREE.Vector3();
const bindMatrix = mesh.bindMatrix;
const bindMatrixInverse = mesh.bindMatrixInverse;
for (let vndx = 0; vndx < mesh.geometry.vertices.length; ++vndx) {
position.copy(mesh.geometry.vertices[vndx]);
transformed.copy(position);
for (let i = 0; i < mesh.geometry.morphTargets.length; ++i) {
temp1.copy(mesh.geometry.morphTargets[i].vertices[vndx]);
transformed.add(temp1.sub(position).multiplyScalar(mesh.morphTargetInfluences[i]));
}
tempSkinnedVertex.copy(transformed).applyMatrix4(bindMatrix);
tempSkinned.set(0, 0, 0);
const skinIndices = geometry.skinIndices[vndx];
const skinWeights = geometry.skinWeights[vndx];
for (let i = 0; i < 4; ++i) {
const boneNdx = skinIndices.getComponent(i);
const weight = skinWeights.getComponent(i);
tempBoneMatrix.fromArray(skeleton.boneMatrices, boneNdx * 16);
temp1.copy(tempSkinnedVertex);
tempSkinned.add(temp1.applyMatrix4(tempBoneMatrix).multiplyScalar(weight));
}
transformed.copy(tempSkinned).applyMatrix4(bindMatrixInverse);
transformed.applyMatrix4(mesh.matrixWorld);
Example:
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(75, 2, 0.1, 200);
camera.position.z = 3;
camera.position.y = 2;
camera.lookAt(0, 0, 0);
const renderer = new THREE.WebGLRenderer({canvas:document.querySelector('canvas')});
renderer.setSize(512, 256, false);
document.body.appendChild(renderer.domElement);
const geometry = new THREE.BoxGeometry(1, 1, 1);
geometry.morphTargets.push({name: "morph", vertices: []});
for (const vertex of geometry.vertices) {
geometry.skinIndices.push(new THREE.Vector4(vertex.x < 0 ? 0 : 1, 0, 0, 0));
geometry.skinWeights.push(new THREE.Vector4(1, 0, 0, 0));
geometry.morphTargets[0].vertices.push(vertex.clone().add(new THREE.Vector3(0, 1, 0)));
}
const material = new THREE.MeshPhongMaterial({
skinning: true,
emissive: 0xffffff,
wireframe: true,
morphTargets: true
});
const mesh = new THREE.SkinnedMesh(geometry, material);
const bones = [new THREE.Bone(), new THREE.Bone()];
for (const bone of bones) {
mesh.add(bone);
}
const skeleton = new THREE.Skeleton(bones);
mesh.bind(skeleton);
bones[0].position.x = -2;
bones[1].position.x = 2;
mesh.morphTargetInfluences[0] = 1;
scene.add(mesh);
const putMeshesAtSkinnedVertices = (function() {
// These are so we can avoid doing allocations
// in the inner loop.
const position = new THREE.Vector3();
const transformed = new THREE.Vector3();
const temp1 = new THREE.Vector3();
const tempBoneMatrix = new THREE.Matrix4();
const tempSkinnedVertex = new THREE.Vector3();
const tempSkinned = new THREE.Vector3();
return function putMarkersAtSkinnedVertices(mesh, scene, marker, markerMaterial, markers) {
const bindMatrix = mesh.bindMatrix;
const bindMatrixInverse = mesh.bindMatrixInverse;
const geometry = mesh.geometry;
const vertices = geometry.vertices;
const morphTargets = geometry.morphTargets;
const skeleton = mesh.skeleton;
for (let vndx = 0; vndx < vertices.length; ++vndx) {
position.copy(vertices[vndx]);
transformed.copy(position);
for (let i = 0; i < morphTargets.length; ++i) {
temp1.copy(morphTargets[i].vertices[vndx]);
transformed.add(temp1.sub(position).multiplyScalar(mesh.morphTargetInfluences[i]));
}
tempSkinnedVertex.copy(transformed).applyMatrix4(bindMatrix);
tempSkinned.set(0, 0, 0);
const skinIndices = geometry.skinIndices[vndx];
const skinWeights = geometry.skinWeights[vndx];
for (let i = 0; i < 4; ++i) {
const boneNdx = skinIndices.getComponent(i);
const weight = skinWeights.getComponent(i);
tempBoneMatrix.fromArray(skeleton.boneMatrices, boneNdx * 16);
temp1.copy(tempSkinnedVertex);
tempSkinned.add(temp1.applyMatrix4(tempBoneMatrix).multiplyScalar(weight));
}
transformed.copy(tempSkinned).applyMatrix4(bindMatrixInverse);
transformed.applyMatrix4(mesh.matrixWorld);
// create them the first time this is called
let markerMesh = markers[vndx];
if (!markerMesh) {
markerMesh = new THREE.Mesh(marker, markerMaterial);
markers[vndx] = markerMesh;
scene.add(markerMesh);
}
markerMesh.position.copy(transformed);
}
}
}());
// Make sure all matrices are up to date
scene.updateMatrixWorld();
skeleton.update();
const marker = new THREE.BoxBufferGeometry(0.1, 0.1, 0.1);
const markerMaterial = new THREE.MeshBasicMaterial({color: 0xFF0000});
const markers = [];
putMeshesAtSkinnedVertices(mesh, scene, marker, markerMaterial, markers);
(function render() {
// requestAnimationFrame(render);
renderer.render(scene, camera);
})();
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/94/three.min.js"></script>
<canvas></canvas>
I did test it with this sample and it seemed to work.

How to make the camera look at a specific child in THREE.js

I'm using THREE v0.86 with React and I'm to trying to get the world position of a specific child of a parent, so when I click a button I can change its material color and make the camera face to it.
First, I store a reference for each of the children I'm interested, on the onLoad function:
const onLoad = object => {
object.name = "scene 1";
object.position.set(0, 5, 0);
obj3d.add(object);
object.traverse(
function(child) {
if (condition === true) {
let material = new THREE.MeshBasicMaterial({ color: 0xffff00 });
child.material = material;
this[child.name] = child; // <-- Here
}
}.bind(this)
);
};
Then, I've tried it two ways: first with controls enabled: TrackballControls
moveCamera(selectedChild) {
this[selectedChild].material.color.setHex(0xff0000);
const material = new THREE.MeshBasicMaterial({ color: "#FF0000" });
this[selectedChild].material = material;
const newPosition = new THREE.Vector3();
newPosition.setFromMatrixPosition(this[selectedChild].matrixWorld);
// I've tried this:
this.controls.target = this[selectedChild].getWorldPosition();
// OR this
this.controls.target = newPosition;
this.camera.lookAt(this[selectedChild]);
}
Also, I've disabled the controls and tried this:
moveCamera(selectedChild) {
this[selectedChild].material.color.setHex(0xff0000);
const material = new THREE.MeshBasicMaterial({ color: "#FF0000" });
this[selectedChild].material = material;
this.camera.lookAt(this[selectedChild].getWorldPosition());
}
With this in the renderScene() function:
renderScene() {
this.renderer.autoClear = true;
this.renderer.setClearColor(0xfff0f0);
this.renderer.setClearAlpha(0.0);
this.scene.updateMatrixWorld(); // <- THIS
if (this.composer) {
this.composer.render();
} else {
this.renderer.render(this.scene, this.camera);
}
}
The moveCamera(childName) does change the specific child color but the problem is that it ALWAYS looks in the same direction, so I consoled log the parent object and took a glance at the children and they all have the same matrix, matrixAutoUpdate: true, matrixWorld, matrixWorldNeedsUpdate: false properties values between them and they are all the parent's values as well, so of course the newPosition.setFromMatrixPosition(this[selectedChild].matrixWorld); vector will always be the same. What am I missing? why aren't the different children objects positions relative to the world different?
This is my scene setup
componentDidMount() {
const { THREE, innerWidth, innerHeight } = window;
OBJLoader(THREE);
MTLLoader(THREE);
this.setScene();
this.setCamera();
this.setRenderer();
this.group = new THREE.Group();
this.selectedObjects = [];
this.mouse = new THREE.Vector2();
this.raycaster = new THREE.Raycaster();
this.enableControls();
this.start();
}
Where:
this.setScene()
setScene() {
const { THREE } = window;
const scene = new THREE.Scene();
scene.background = new THREE.Color(0xcccccc);
scene.fog = new THREE.FogExp2(0xcccccc, 0.002);
// LIGHTS
scene.add(new THREE.AmbientLight(0xaaaaaa, 0.2));
const light = new THREE.DirectionalLight(0xddffdd, 0.6);
light.position.set(1, 1, 1);
light.castShadow = true;
light.shadow.mapSize.width = 1024;
light.shadow.mapSize.height = 1024;
const d = 10;
light.shadow.camera.left = -d;
light.shadow.camera.right = d;
light.shadow.camera.top = d;
light.shadow.camera.bottom = -d;
light.shadow.camera.far = 1000;
scene.add(light);
const hemiLight = new THREE.HemisphereLight(0xffffff, 0xffffff, 0.5);
hemiLight.color.setHSL(0.6, 1, 0.6);
hemiLight.groundColor.setHSL(0.095, 1, 0.75);
hemiLight.position.set(0, 500, 0);
scene.add(hemiLight);
// GROUND
const groundGeo = new THREE.PlaneBufferGeometry(1000, 1000);
const groundMat = new THREE.MeshPhongMaterial({
color: 0xffffff,
specular: 0x050505
});
groundMat.color.setHSL(0.095, 1, 0.75);
const ground = new THREE.Mesh(groundGeo, groundMat);
ground.rotation.x = -Math.PI / 2;
ground.position.y = -0.5;
scene.add(ground);
// SKYDOME
const vertexShader =
"varying vec3 vWorldPosition; void main() { vec4 worldPosition = modelMatrix * vec4( position, 1.0 ); vWorldPosition = worldPosition.xyz; gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 ); }";
const fragmentShader =
"uniform vec3 topColor; uniform vec3 bottomColor; uniform float offset; uniform float exponent; varying vec3 vWorldPosition; void main() { float h = normalize( vWorldPosition + offset ).y; gl_FragColor = vec4( mix( bottomColor, topColor, max( pow( max( h , 0.0), exponent ), 0.0 ) ), 1.0 ); }";
const uniforms = {
topColor: { value: new THREE.Color(0x0077ff) },
bottomColor: { value: new THREE.Color(0xffffff) },
offset: { value: 0 },
exponent: { value: 0.6 }
};
uniforms.topColor.value.copy(hemiLight.color);
scene.fog.color.copy(uniforms.bottomColor.value);
const skyGeo = new THREE.SphereGeometry(300, 32, 15);
const skyMat = new THREE.ShaderMaterial({
vertexShader: vertexShader,
fragmentShader: fragmentShader,
uniforms: uniforms,
side: THREE.BackSide
});
const sky = new THREE.Mesh(skyGeo, skyMat);
scene.add(sky);
this.scene = scene;
}
this.setCamera
setCamera() {
const { THREE, innerWidth, innerHeight } = window;
const camera = new THREE.PerspectiveCamera(
45,
innerWidth / innerHeight,
0.1,
100
);
camera.position.set(-4.1, 7.2, 4.2);
this.camera = camera;
}
this.setRenderer
setRenderer() {
const { THREE, innerWidth, innerHeight } = window;
const renderer = new THREE.WebGLRenderer({ antialias: false });
renderer.setSize(innerWidth, innerHeight * this.windowMultiplier);
this.renderer = renderer;
}
this.start this.stop this.animate
start() {
if (!this.frameId) {
this.frameId = requestAnimationFrame(this.animate);
}
}
stop() {
cancelAnimationFrame(this.frameId);
}
animate() {
this.renderScene();
this.frameId = window.requestAnimationFrame(this.animate);
if (!!this.controls) {
this.controls.update();
}
}
The Object3D .lookAt method expects a position in world space:
var position = new THREE.Vector3().copy( child.position );
child.localToWorld( position );
camera.lookAt( position );
Using TrackballControls or OrbitControls would complicate this, I'm not sure whether lookAt will work properly with either of those involved.
three.js r89
I got it! All of my code works perfectly, the problem was in the .obj itself, all of the children's pivot points where the same as the parent, they weren't at the object's center (how one would expect):

Hard Light material blending mode in Three.js?

I am currently using the MeshPhongMaterial provided by Three.js to create a simple scene with basic water. I would like for the water material to have the Hard Light blending mode that can be found in applications such as Photoshop. How can I achieve the Hard Light blending modes below on the right?
The right halves of the images above are set to Hard Light in Photoshop. I am trying to recreate that Hard Light blend mode in Three.js.
One lead I have come across is to completely reimplement the MeshPhongMaterial's fragment and vertex shader, but this will take me some time as I am quite new to this.
What is the way to implement a Hard Light blending mode for a material in Three.js?
/*
* Scene config
**/
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(50, window.innerWidth / window.innerHeight, 0.1, 10000);
var renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setClearColor(0xffffff);
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
camera.position.set(0, 500, 1000);
camera.lookAt(scene.position);
/*
* Scene lights
**/
var spotlight = new THREE.SpotLight(0x999999, 0.1);
spotlight.castShadow = true;
spotlight.shadowDarkness = 0.75;
spotlight.position.set(0, 500, 0);
scene.add(spotlight);
var pointlight = new THREE.PointLight(0x999999, 0.5);
pointlight.position.set(75, 50, 0);
scene.add(pointlight);
var hemiLight = new THREE.HemisphereLight(0xffce7a, 0x000000, 1.25);
hemiLight.position.y = 75;
hemiLight.position.z = 500;
scene.add(hemiLight);
/*
* Scene objects
*/
/* Water */
var waterGeo = new THREE.PlaneGeometry(1000, 1000, 50, 50);
var waterMat = new THREE.MeshPhongMaterial({
color: 0x00aeff,
emissive: 0x0023b9,
shading: THREE.FlatShading,
shininess: 60,
specular: 30,
transparent: true
});
for (var j = 0; j < waterGeo.vertices.length; j++) {
waterGeo.vertices[j].x = waterGeo.vertices[j].x + ((Math.random() * Math.random()) * 30);
waterGeo.vertices[j].y = waterGeo.vertices[j].y + ((Math.random() * Math.random()) * 20);
}
var waterObj = new THREE.Mesh(waterGeo, waterMat);
waterObj.rotation.x = -Math.PI / 2;
scene.add(waterObj);
/* Floor */
var floorGeo = new THREE.PlaneGeometry(1000, 1000, 50, 50);
var floorMat = new THREE.MeshPhongMaterial({
color: 0xe9b379,
emissive: 0x442c10,
shading: THREE.FlatShading
});
for (var j = 0; j < floorGeo.vertices.length; j++) {
floorGeo.vertices[j].x = floorGeo.vertices[j].x + ((Math.random() * Math.random()) * 30);
floorGeo.vertices[j].y = floorGeo.vertices[j].y + ((Math.random() * Math.random()) * 20);
floorGeo.vertices[j].z = floorGeo.vertices[j].z + ((Math.random() * Math.random()) * 20);
}
var floorObj = new THREE.Mesh(floorGeo, floorMat);
floorObj.rotation.x = -Math.PI / 2;
floorObj.position.y = -75;
scene.add(floorObj);
/*
* Scene render
**/
var count = 0;
function render() {
requestAnimationFrame(render);
var particle, i = 0;
for (var ix = 0; ix < 50; ix++) {
for (var iy = 0; iy < 50; iy++) {
waterObj.geometry.vertices[i++].z = (Math.sin((ix + count) * 2) * 3) +
(Math.cos((iy + count) * 1.5) * 6);
waterObj.geometry.verticesNeedUpdate = true;
}
}
count += 0.05;
renderer.render(scene, camera);
}
render();
html,
body {
margin: 0;
padding: 0;
width: 100%;
height: 100%;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r73/three.min.js"></script>
I don't think you're going to get the effect you want.
How do you generate the first image? I assume you just made fuzzy oval in photoshop and picked "hard light"?
If you want the same thing in three.js you'll need to generate a fuzzy oval and apply it in 2d using a post processing effect in three.js
You could generate such an oval by making a 2nd scene in three.js, adding the lights and shining them on a black plane that has no waves that's at the same position as the water is in the original scene. Render that to a rendertarget. You probably want only the spotlight and maybe point light in that scene. In your current scene remove the spotlight for sure. Render that to another render target.
When you're done combine the scenes using a post processing effect that implements hard light
// pseudo code
vec3 partA = texture2D(sceneTexture, texcoord);
vec3 partB = texture2D(lightTexture, texcoord);
vec3 line1 = 2.0 * partA * partB;
vec3 line2 = 1.0 - (1.0 - partA) * (1.0 - partB);
gl_FragCoord = vec4(mix(line2, line1, step(0.5, partA)), 1);
I ended up doing it in the following way thanks to gman's excellent answer. View the code snippet below to see it in action.
As gman described:
I created a WebGLRenderTarget to which the scene is rendered to.
The WebGLRenderTarget is then passed to the ShaderMaterial's uniforms as a texture, together with the window.innerWidth, window.innerHeight and color.
The respective texture coordinates, in relation to the current fragment, are calculated by dividing gl_FragCoord by the window's width and height.
The fragment can now sample what is on screen from the WebGLRenderTarget texture and combine that with the color of the object to output the correct gl_FragColor.
So far it works great. The only thing I am currently looking into is to create a separate scene containing only the objects that are necessary for blending, perhaps cloned. I assume that would be more performant. Currently I am toggling the visibility of the object to be blended in the render loop, before and after it is sent to the WebGLRenderTarget. For a larger scene with more objects, that probably doesn't make much sense and would complicate things.
var conf = {
'Color A': '#cc6633',
'Color B': '#0099ff'
};
var GUI = new dat.GUI();
var A_COLOR = GUI.addColor(conf, 'Color A');
A_COLOR.onChange(function(val) {
A_OBJ.material.uniforms.color = {
type: "c",
value: new THREE.Color(val)
};
A_OBJ.material.needsUpdate = true;
});
var B_COLOR = GUI.addColor(conf, 'Color B');
B_COLOR.onChange(function(val) {
B_OBJ.material.uniforms.color = {
type: "c",
value: new THREE.Color(val)
};
B_OBJ.material.needsUpdate = true;
});
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(50, window.innerWidth / window.innerHeight, 0.1, 100);
var renderer = new THREE.WebGLRenderer();
renderer.setClearColor(0x888888);
renderer.setSize(window.innerWidth, window.innerHeight);
var target = new THREE.WebGLRenderTarget(window.innerWidth, window.innerHeight, {format: THREE.RGBFormat});
document.body.appendChild(renderer.domElement);
camera.position.set(0, 0, 50);
camera.lookAt(scene.position);
var A_GEO = new THREE.PlaneGeometry(20, 20);
var B_GEO = new THREE.PlaneGeometry(20, 20);
var A_MAT = new THREE.ShaderMaterial({
uniforms: {
color: {
type: "c",
value: new THREE.Color(0xcc6633)
}
},
vertexShader: document.getElementById('vertexShaderA').innerHTML,
fragmentShader: document.getElementById('fragmentShaderA').innerHTML
});
var B_MAT = new THREE.ShaderMaterial({
uniforms: {
color: {
type: "c",
value: new THREE.Color(0x0099ff)
},
window: {
type: "v2",
value: new THREE.Vector2(window.innerWidth, window.innerHeight)
},
target: {
type: "t",
value: target
}
},
vertexShader: document.getElementById('vertexShaderB').innerHTML,
fragmentShader: document.getElementById('fragmentShaderB').innerHTML
});
var A_OBJ = new THREE.Mesh(A_GEO, A_MAT);
var B_OBJ = new THREE.Mesh(B_GEO, B_MAT);
A_OBJ.position.set(-5, -5, 0);
B_OBJ.position.set(5, 5, 0);
scene.add(A_OBJ);
scene.add(B_OBJ);
function render() {
requestAnimationFrame(render);
B_OBJ.visible = false;
renderer.render(scene, camera, target, true);
B_OBJ.visible = true;
renderer.render(scene, camera);
}
render();
body { margin: 0 }
canvas { display: block }
<script src="https://cdnjs.cloudflare.com/ajax/libs/dat-gui/0.5.1/dat.gui.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r74/three.min.js"></script>
<script type="x-shader/x-vertex" id="vertexShaderA">
uniform vec3 color;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script type="x-shader/x-fragment" id="fragmentShaderA">
uniform vec3 color;
void main() {
gl_FragColor = vec4(color, 1.0);
}
</script>
<script type="x-shader/x-vertex" id="vertexShaderB">
uniform vec3 color;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script type="x-shader/x-fragment" id="fragmentShaderB">
uniform vec3 color;
uniform vec2 window;
uniform sampler2D target;
void main() {
vec2 targetCoords = gl_FragCoord.xy / window.xy;
vec4 a = texture2D(target, targetCoords);
vec4 b = vec4(color, 1.0);
vec4 multiply = 2.0 * a * b;
vec4 screen = 1.0 - 2.0 * (1.0 - a) * (1.0 - b);
gl_FragColor = vec4(mix(screen, multiply, step(0.5, a)));
}
</script>

Categories