Three.js: Setting `texture.needsUpdate = true` is very slow - javascript

I'm working on a Three.js scene in which I'd like to update some textures after some time. I'm finding that updating the textures is very slow, however, and drags FPS to only 1-2 FPS for several seconds (when updating just a single texture).
Is there anything one can do to expedite texture updates? Any insights others can offer on this question would be very appreciated.
To see this behavior, click the window of the example below. This will load the first texture update (another click will trigger the second texture update). If you try to zoom after one of these clicks, you'll find the screen freezes and the FPS will drop terribly. Does anyone know how to fix this problem?
<html>
<head>
<style>
html, body { width: 100%; height: 100%; background: #000; }
body { margin: 0; overflow: hidden; }
canvas { width: 100%; height: 100%; }
</style>
</head>
<body>
<script src='https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js'></script>
<script src='https://rawgit.com/YaleDHLab/pix-plot/master/assets/js/trackball-controls.js'></script>
<script src='https://rawgit.com/mrdoob/stats.js/master/build/stats.min.js'></script>
<script type='x-shader/x-vertex' id='vertex-shader'>
precision highp float;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
uniform vec3 cameraPosition;
attribute vec3 position; // sets the blueprint's vertex positions
attribute vec3 translation; // x y translation offsets for an instance
attribute float texIdx; // the texture index to access
varying float vTexIdx;
void main() {
// set point position
vec3 pos = position + translation;
vec4 projected = projectionMatrix * modelViewMatrix * vec4(pos, 1.0);
gl_Position = projected;
// assign the varyings
vTexIdx = texIdx;
// use the delta between the point position and camera position to size point
float xDelta = pow(projected[0] - cameraPosition[0], 2.0);
float yDelta = pow(projected[1] - cameraPosition[1], 2.0);
float zDelta = pow(projected[2] - cameraPosition[2], 2.0);
float delta = pow(xDelta + yDelta + zDelta, 0.5);
gl_PointSize = 40000.0 / delta;
}
</script>
<script type='x-shader/x-fragment' id='fragment-shader'>
precision highp float;
uniform sampler2D a;
uniform sampler2D b;
varying float vTexIdx;
void main() {
int textureIndex = int(vTexIdx);
vec2 uv = vec2(gl_PointCoord.x, gl_PointCoord.y);
if (textureIndex == 0) {
gl_FragColor = texture2D(a, uv);
} else if (textureIndex == 1) {
gl_FragColor = texture2D(b, uv);
}
}
</script>
<script>
/**
* Generate a scene object with a background color
**/
function getScene() {
var scene = new THREE.Scene();
scene.background = new THREE.Color(0xaaaaaa);
return scene;
}
/**
* Generate the camera to be used in the scene
**/
function getCamera() {
var aspectRatio = window.innerWidth / window.innerHeight;
var camera = new THREE.PerspectiveCamera(75, aspectRatio, 0.1, 100000);
camera.position.set(0, 1, -6000);
return camera;
}
/**
* Generate the renderer to be used in the scene
**/
function getRenderer() {
// Create the canvas with a renderer
var renderer = new THREE.WebGLRenderer({antialias: true});
// Add support for retina displays
renderer.setPixelRatio(window.devicePixelRatio);
// Specify the size of the canvas
renderer.setSize(window.innerWidth, window.innerHeight);
// Add the canvas to the DOM
document.body.appendChild(renderer.domElement);
return renderer;
}
/**
* Generate the controls to be used in the scene
**/
function getControls(camera, renderer) {
var controls = new THREE.TrackballControls(camera, renderer.domElement);
controls.zoomSpeed = 0.4;
controls.panSpeed = 0.4;
return controls;
}
/**
* Generate the points for the scene
**/
function addPoints(scene) {
var BA = THREE.BufferAttribute;
var IBA = THREE.InstancedBufferAttribute;
var geometry = new THREE.InstancedBufferGeometry();
// add data for each observation
var n = 10000; // number of observations
var rootN = n**(1/2);
var cellSize = 20;
var translation = new Float32Array( n * 3 );
var texIdx = new Float32Array( n );
var translationIterator = 0;
var texIterator = 0;
for (var i=0; i<n*3; i++) {
var x = Math.random() * n - (n/2);
var y = Math.random() * n - (n/2);
translation[translationIterator++] = x;
translation[translationIterator++] = y;
translation[translationIterator++] = Math.random() * n - (n/2);
texIdx[texIterator++] = (x + y) > (n/8) ? 1 : 0;
}
var positionAttr = new BA(new Float32Array( [0, 0, 0] ), 3);
var translationAttr = new IBA(translation, 3, 1);
var texIdxAttr = new IBA(texIdx, 1, 1);
positionAttr.dynamic = true;
translationAttr.dynamic = true;
texIdxAttr.dynamic = true;
geometry.addAttribute('position', positionAttr);
geometry.addAttribute('translation', translationAttr);
geometry.addAttribute('texIdx', texIdxAttr);
var canvases = [
getElem('canvas', { width: 16384, height: 16384, }),
getElem('canvas', { width: 16384, height: 16384, }),
]
var textures = [
getTexture( canvases[0] ),
getTexture( canvases[1] ),
];
var material = new THREE.RawShaderMaterial({
uniforms: {
a: {
type: 't',
value: textures[0],
},
b: {
type: 't',
value: textures[1],
}
},
vertexShader: document.getElementById('vertex-shader').textContent,
fragmentShader: document.getElementById('fragment-shader').textContent,
});
var mesh = new THREE.Points(geometry, material);
mesh.frustumCulled = false; // prevent the mesh from being clipped on drag
scene.add(mesh);
// on the first window click, paint red points
// on the second window click, paint blue points
var clicks = 0;
window.addEventListener('click', function() {
if (clicks == 0 || clicks == 1) {
var canvas = canvases[clicks];
var ctx = canvas.getContext('2d');
ctx.fillStyle = clicks == 0 ? 'red' : 'blue';
ctx.rect(0, 0, 16384, 16384);
ctx.fill();
textures[clicks].needsUpdate = true;
clicks++;
}
})
}
function getTexture(canvas) {
var tex = new THREE.Texture(canvas);
tex.needsUpdate = true;
tex.flipY = false;
return tex;
}
/**
* Create an element
**/
function getElem(tag, obj) {
var obj = obj || {};
var elem = document.createElement(tag);
Object.keys(obj).forEach(function(attr) {
elem[attr] = obj[attr];
})
return elem;
}
/**
* Add stats
**/
function getStats() {
var stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.top = '65px';
stats.domElement.style.right = '5px';
stats.domElement.style.left = 'initial';
document.body.appendChild(stats.domElement);
return stats;
}
/**
* Render!
**/
function render() {
requestAnimationFrame(render);
renderer.render(scene, camera);
controls.update();
stats.update();
};
/**
* Main
**/
var stats = getStats();
var scene = getScene();
var camera = getCamera();
var renderer = getRenderer();
var controls = getControls(camera, renderer);
addPoints(scene);
render();
</script>
</body>
</html>

Your canvases are 16384 by 16384. That's basically insanely large.
For RGBA format, that is 1073741824 bytes.. a gigabyte of texture data that is getting sent to your GPU from the CPU when you set that texture.needsUpdate = true
You will definitely notice this getting uploaded to the card.
If your use case absolutely requires textures that large.. then you may need to consider doing incremental updates via gl.texSubImage2D, or using a bunch of smaller textures and only updating one of the per frame, or only updating those textures at the start of your app, and not thereafter.
For reference, there are very few cases i've seen where textures > 4k per side are needed.
And that is about 1/16th the size of your textures.
This has nothing to do with three.js btw. It's a fundamental characteristic of GPU/CPU interaction. Uploads and state changes are slow and have to be carefully orchestrated and monitored.

Related

Mouse movement doesn't appear to be updating fragment shader

I'm trying to implement the Voronoi shader in the Book of Shaders in three.js and cannot seem to wrap my head around why the mouse position is not having any effect on my visible output. (This is for the initial demo where the mouse moves and the Voronoi updates)
I'm logging the mouse position, checking that the uniform value is updating, yet the shader doesn't appear to be changing whatsoever on my end.
This is what I see (static image - no change at all) while animate is being called.
I'm seeing u_time update when I log it in animate so the callback is happening. The uniforms just don't appear to be updated and I thought I was updating them correctly.
Note - I followed this SO post regarding tracking mouse position as a basis, though I tried both that mouse position and the tweaked mouse I have below (to map from [-1,1]).
Full code:
<!--
* Based on Book of Shaders 12:
https://thebookofshaders.com/12/
-->
<!DOCTYPE HTML>
<html>
<head>
<title>WebGL Demo - Voronoi</title>
<meta charset="utf-8">
<style>
body {
margin: 0;
padding: 0;
overflow: hidden;
}
</style>
<script src="./libraries/threejs/three.min.js"></script>
<!-- shaders -->
<script type="x-shader/x-vertex" id="vertexShader">
void main() {
vec4 modelViewPosition = modelViewMatrix * vec4(position, 1.0);
gl_Position = projectionMatrix * modelViewPosition;
}
</script>
<script type="x-shader/x-fragment" id="fragmentShader">
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
vec3 color = vec3(.0);
// Cell positions
vec2 point[5];
point[0] = vec2(0.83,0.75);
point[1] = vec2(0.60,0.07);
point[2] = vec2(0.28,0.64);
point[3] = vec2(0.31,0.26);
point[4] = u_mouse/u_resolution;
float m_dist = 1.; // minimum distance
// Iterate through the points positions
for (int i = 0; i < 5; i++) {
float dist = distance(st, point[i]);
// Keep the closer distance
m_dist = min(m_dist, dist);
}
// Draw the min distance (distance field)
color += m_dist;
// Show isolines
// color -= step(.7,abs(sin(50.0*m_dist)))*.3;
gl_FragColor = vec4(color,1.0);
}
</script>
</head>
<body></body>
<script>
let camera, scene, renderer;
let uniforms, mesh;
init();
animate();
function init() {
scene = new THREE.Scene();
camera = new THREE.Camera();
camera.position.z = 1;
renderer = new THREE.WebGLRenderer({ antialias: true });
renderer.setClearColor(0x000000, 1);
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setPixelRatio(window.devicePixelRatio);
document.body.appendChild(renderer.domElement);
uniforms = {
u_resolution: { type: 'vec2', value: new THREE.Vector2() },
u_mouse: { type: 'vec2', value: new THREE.Vector2() },
u_time: { type: 'float', value: 1.0 }
};
let vShader = document.getElementById("vertexShader").textContent;
let fShader = document.getElementById("fragmentShader").textContent;
let geometry = new THREE.PlaneGeometry(2, 2);
// give it a material
let material = new THREE.ShaderMaterial({
uniforms: uniforms,
fragmentShader: fShader,
vertexShader: vShader,
});
// and now create the mesh (geom+mat)
mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
onWindowResize();
window.addEventListener('resize', onWindowResize, false);
}
function animate() {
requestAnimationFrame(animate);
render();
}
function render() {
uniforms.u_time.value += 0.05;
renderer.render(scene, camera);
}
function onWindowResize(e) {
renderer.setSize(window.innerWidth, window.innerHeight);
uniforms.u_resolution.value.x = renderer.domElement.width;
uniforms.u_resolution.value.y = renderer.domElement.height;
}
document.onmousemove = function (e) {
uniforms.u_mouse.value.x = (e.clientX / window.innerWidth) * 2 - 1;//e.pageX / window.innerWidth;
uniforms.u_mouse.value.y = -(e.clientY / window.innerHeight) * 2 + 1;//e.pageY / window.innerHeight;
}
</script>
</html>
The line point[4] = u_mouse/u_resolution; probably doesn’t need to be divided by u_resolution because the Vector2 is already in the [-1, 1] range. You might just be getting very small values so the mouse movement is indistinguishable.

How to initialise a particles system as a spherical cap?

My Three.js project consists in making a snow fall(with some particles) inside a snowball(a sphereGeometry), a classic Xmas snowball. I have created the particles using THREE.BufferGeometry() and I initialised them giving an initial_position for each parameter(x, y, z).
Is there a way to make them visible only inside the sphere? I resolved the problem giving the particles outside the sphere the same colour of the background, but it doesn't work perfectly.
Is there a way to make the particles outside the sphere not visible? Maybe making them transparent.
Otherwise how could I initialise the particles as a spherical cap?
Thanks!
This is how I am initialising particles positions(as a parallelepiped):
for(i=0; i<n; i++){
init_pos_y[i] = 50 + (Math.random()-0.5)*20;
init_pos_x[i] = (Math.random()-0.5)*100;
init_pos_z[i] = (Math.random()-0.5)*100;
acceleration[i] = Math.random()*1;
In the Vertex Shader this is how I am making the particles fall and giving them colour(and changing its opacity depending on its position inside or outside the sphere):
void main(){
vec3 p = position;
p.x = initial_position_x;
p.z = initial_position_z;
if (initial_position_y - time * acceleration > -32.8 + min_level){
p.y = initial_position_y - time * acceleration;
}
else{
p.y = -33.8 + min_level;
}
float opacity;
if (p.x*p.x + p.y*p.y + p.z*p.z > 2490.0){
opacity = 0.40;
vColor = vec4( customColor, opacity );
}
else{
opacity = 1.0;
vColor = vec4( customColor2, opacity );
}
gl_Position = projectionMatrix * modelViewMatrix * vec4(p, 1.0);
vUv = projectionMatrix * vec4(p, 1.0);
gl_PointSize = 3.0*acceleration;
}
First, you need to put your points in a formation of cylinder (randomly inside a circle and randomly on height). For that, see function setInCircle().
Then you need to modify the code of shader. I prefer to do it with .onBeforeCompile(), thus you can modify necessery parts, keeping all the other functionalities of the material.
In the shader, you change y-value with adding the distance (time * speed), dividing it by 10 (mod() function), thus you put it in a cycle to go from top to bottom in range of 5 to -5.
The last thing is to check if the length of the given transformed vector is less or equal to given radius, passing the result in a varying to the fragment shader, where you discard a pixel if the result if less than 0.5.
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.setScalar(10);
var renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
var controls = new THREE.OrbitControls(camera, renderer.domElement);
scene.add(new THREE.GridHelper(10, 10));
var pts = [];
var radius = 5;
var pointsCount = 5000;
for (i = 0; i < pointsCount; i++) {
let v2 = setInCircle().multiplyScalar(radius);
let v3 = new THREE.Vector3(v2.x, THREE.Math.randFloat(-radius, radius), v2.y);
pts.push(v3);
}
var geom = new THREE.BufferGeometry().setFromPoints(pts);
var uniforms = {
speed: {
value: 1
},
time: {
value: 0
},
radius: {
value: radius
}
}
var mat = new THREE.PointsMaterial({
color: "magenta",
size: 0.05
});
mat.onBeforeCompile = shader => {
shader.uniforms.speed = uniforms.speed;
shader.uniforms.time = uniforms.time;
shader.uniforms.radius = uniforms.radius;
shader.vertexShader = `
uniform float speed;
uniform float time;
uniform float radius;
varying float vVisible;
` + shader.vertexShader;
//console.log(shader.vertexShader);
shader.vertexShader = shader.vertexShader.replace(
`#include <begin_vertex>`,
`#include <begin_vertex>
transformed.y = mod((transformed.y - speed * time) - 5., 10.) - 5.;
vVisible = length(transformed) <= radius ? 1.: 0.;
`
);
shader.fragmentShader = `
varying float vVisible;
` + shader.fragmentShader;
console.log(shader.fragmentShader);
shader.fragmentShader = shader.fragmentShader.replace(
`void main() {`,
`void main() {
if (vVisible < 0.5) discard;
`
);
}
var points = new THREE.Points(geom, mat);
scene.add(points);
function setInCircle() {
let v = new THREE.Vector2();
v.set(
THREE.Math.randFloat(-1, 1),
THREE.Math.randFloat(-1, 1)
)
return v.length() <= 1 ? v : setInCircle();
}
var clock = new THREE.Clock();
renderer.setAnimationLoop(() => {
uniforms.time.value = clock.getElapsedTime();
renderer.render(scene, camera)
});
body {
overflow: hidden;
margin: 0;
}
<script src="https://threejs.org/build/three.min.js"></script>
<script src="https://threejs.org/examples/js/controls/OrbitControls.js"></script>

Apply color gradient to material on mesh - three.js

I have an STL file loaded into my scene with a single colour applied to a phong material
I'd like a way of applying two colours to this mesh's material with a gradient effect applied on the Z axis a like the example below.Gradient Vase]1
I have a feeling I may have to introduce shaders but I've not gotten this far with three.js.
Simple gradient shader, based on uvs:
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(60, 1, 1, 1000);
camera.position.set(13, 25, 38);
camera.lookAt(scene.position);
var renderer = new THREE.WebGLRenderer({
antialias: true
});
var canvas = renderer.domElement
document.body.appendChild(canvas);
var controls = new THREE.OrbitControls(camera, renderer.domElement);
var geometry = new THREE.CylinderBufferGeometry(2, 5, 20, 32, 1, true);
var material = new THREE.ShaderMaterial({
uniforms: {
color1: {
value: new THREE.Color("red")
},
color2: {
value: new THREE.Color("purple")
}
},
vertexShader: `
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position,1.0);
}
`,
fragmentShader: `
uniform vec3 color1;
uniform vec3 color2;
varying vec2 vUv;
void main() {
gl_FragColor = vec4(mix(color1, color2, vUv.y), 1.0);
}
`,
wireframe: true
});
var mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
render();
function resize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render() {
if (resize(renderer)) {
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
renderer.render(scene, camera);
requestAnimationFrame(render);
}
html,
body {
height: 100%;
margin: 0;
overflow: hidden;
}
canvas {
width: 100%;
height: 100%;
display;
block;
}
<script src="https://cdn.jsdelivr.net/npm/three#0.115.0/build/three.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/three#0.115.0/examples/js/controls/OrbitControls.js"></script>
Simple gradient shader, based on coordinates:
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(60, 1, 1, 1000);
camera.position.set(13, 25, 38);
camera.lookAt(scene.position);
var renderer = new THREE.WebGLRenderer({
antialias: true
});
var canvas = renderer.domElement
document.body.appendChild(canvas);
var controls = new THREE.OrbitControls(camera, renderer.domElement);
var geometry = new THREE.CylinderBufferGeometry(2, 5, 20, 16, 4, true);
geometry.computeBoundingBox();
var material = new THREE.ShaderMaterial({
uniforms: {
color1: {
value: new THREE.Color("red")
},
color2: {
value: new THREE.Color("purple")
},
bboxMin: {
value: geometry.boundingBox.min
},
bboxMax: {
value: geometry.boundingBox.max
}
},
vertexShader: `
uniform vec3 bboxMin;
uniform vec3 bboxMax;
varying vec2 vUv;
void main() {
vUv.y = (position.y - bboxMin.y) / (bboxMax.y - bboxMin.y);
gl_Position = projectionMatrix * modelViewMatrix * vec4(position,1.0);
}
`,
fragmentShader: `
uniform vec3 color1;
uniform vec3 color2;
varying vec2 vUv;
void main() {
gl_FragColor = vec4(mix(color1, color2, vUv.y), 1.0);
}
`,
wireframe: true
});
var mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
render();
function resize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render() {
if (resize(renderer)) {
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
renderer.render(scene, camera);
requestAnimationFrame(render);
}
html,
body {
height: 100%;
margin: 0;
overflow: hidden;
}
canvas {
width: 100%;
height: 100%;
display: block;
}
<script src="https://cdn.jsdelivr.net/npm/three#0.115.0/build/three.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/three#0.115.0/examples/js/controls/OrbitControls.js"></script>
Gradient with vertex colours:
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(60, 1, 1, 1000);
camera.position.set(0, 0, 10);
var renderer = new THREE.WebGLRenderer({
antialias: true
});
var canvas = renderer.domElement
document.body.appendChild(canvas);
var geom = new THREE.TorusKnotGeometry(2.5, .5, 100, 16);
var rev = true;
var cols = [{
stop: 0,
color: new THREE.Color(0xf7b000)
}, {
stop: .25,
color: new THREE.Color(0xdd0080)
}, {
stop: .5,
color: new THREE.Color(0x622b85)
}, {
stop: .75,
color: new THREE.Color(0x007dae)
}, {
stop: 1,
color: new THREE.Color(0x77c8db)
}];
setGradient(geom, cols, 'z', rev);
function setGradient(geometry, colors, axis, reverse) {
geometry.computeBoundingBox();
var bbox = geometry.boundingBox;
var size = new THREE.Vector3().subVectors(bbox.max, bbox.min);
var vertexIndices = ['a', 'b', 'c'];
var face, vertex, normalized = new THREE.Vector3(),
normalizedAxis = 0;
for (var c = 0; c < colors.length - 1; c++) {
var colorDiff = colors[c + 1].stop - colors[c].stop;
for (var i = 0; i < geometry.faces.length; i++) {
face = geometry.faces[i];
for (var v = 0; v < 3; v++) {
vertex = geometry.vertices[face[vertexIndices[v]]];
normalizedAxis = normalized.subVectors(vertex, bbox.min).divide(size)[axis];
if (reverse) {
normalizedAxis = 1 - normalizedAxis;
}
if (normalizedAxis >= colors[c].stop && normalizedAxis <= colors[c + 1].stop) {
var localNormalizedAxis = (normalizedAxis - colors[c].stop) / colorDiff;
face.vertexColors[v] = colors[c].color.clone().lerp(colors[c + 1].color, localNormalizedAxis);
}
}
}
}
}
var mat = new THREE.MeshBasicMaterial({
vertexColors: THREE.VertexColors,
wireframe: true
});
var obj = new THREE.Mesh(geom, mat);
scene.add(obj);
render();
function resize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render() {
if (resize(renderer)) {
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
renderer.render(scene, camera);
obj.rotation.y += .01;
requestAnimationFrame(render);
}
html,
body {
height: 100%;
margin: 0;
overflow: hidden;
}
canvas {
width: 100%;
height: 100%;
display;
block;
}
<script src="https://cdn.jsdelivr.net/npm/three#0.115.0/build/three.min.js"></script>
Actually, it's up to you which approach to use: shaders, vertex colours, textures etc.
If you want to keep the functionality of the MeshPhongMaterial you can try extending the material.
This is a somewhat broad topic with several approaches, and you can read more about it in depth here.
There is a line in the phong materials shader that looks like this
vec4 diffuseColor = vec4( diffuse, opacity );
So after studying the book of shaders or some other tutorials, you will learn that you can mix two colors by using a normalized factor ( a number between 0,1).
That means that you could change this line to something like this
vec4 diffuseColor = vec4( mix(diffuse, myColor, vec3(myFactor)), opacity);
You can extend the shader as such
const myFactor = { value: 0 }
const myColor = {value: new THREE.Color}
myMaterial.onBeforeCompile = shader=>{
shader.uniforms.myFactor = myFactor
shader.uniforms.myColor = myColor
shader.fragmentShader = `
uniform vec3 myColor;
uniform float myFactor;
${shader.fragmentShader.replace(
vec4 diffuseColor = vec4( diffuse, opacity );
vec4 diffuseColor = vec4( mix(diffuse, myColor, vec3(myFactor)), opacity);
)}
`
Now when you change myFactor.value the color of your object should change from myMaterial.color to myColor.value.
Now to actually make it into a gradient you would replace myFactor with something dynamic. I like prisoners solution to use the uvs. It's entirely done in javascript, and very simple to hook up in this shader. Other approaches would probably require more shader work.
vec4 diffuseColor = vec4( mix(diffuse, myColor, vec3(vUv.y)), opacity);
Now the problem you may encounter - if you call new PhongMaterial({color}) ie. without any textures provided to it, the shader will compile without vUv.
There are many conditions that would cause it to compile and be useful to you, but i'm not sure if they break other stuff:
#if defined( USE_MAP ) || defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( USE_SPECULARMAP ) || defined( USE_ALPHAMAP ) || defined( USE_EMISSIVEMAP ) || defined( USE_ROUGHNESSMAP ) || defined( USE_METALNESSMAP )
So, adding something like
myMaterial.defines = {USE_MAP:''}
Might make vUv variable available for your shader. This way you get all the lights of the phong material to affect the material, you just change the base color.
If you want your gradient to be static, you could just add a texture to your material using the .map property. Or you could assign it to the .emissiveMap property if you want it to "glow" without the need of lights.
However, if you want your gradient to change, and always fade in the z-axis, even after rotating the model or camera, you'd have to write a custom shader, which would require you to take some tutorials. You could look at this example for how to implement custom shaders in Three.js, and visit https://thebookofshaders.com/ to get a good understanding on how to write a simple gradient shader.

BufferGeometry create particles z-index

I created two particles with a threejs BufferGeometry, I want to click on each particle shows the corresponding image.
But when I clicked on the particle the image was shown and another particle covered it.
I want to know how to get the particles out of the control of the hierarchy and keep the clicked particles always on top.
code:`
var scene, camera, renderer,controls;
var points;
var shaderMaterial;
var particleCount = 2;
function init () {
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 0.1, 1000);
camera.position.x = 15;
camera.position.y = 16;
camera.position.z = 35;
camera.lookAt(scene.position);
renderer = new THREE.WebGLRenderer();
renderer.setClearColor(0x000000, 1.0);
renderer.setSize(window.innerWidth, window.innerHeight);
var light = new THREE.AmbientLight( 0xcccccc );
scene.add(light);
document.body.appendChild(renderer.domElement);
createParticles();
createGrid();
render();
document.querySelector('canvas').addEventListener( 'click', interactive, false );
}
function createParticles () {
var geometry = new THREE.BufferGeometry();
var positions = new Float32Array( particleCount * 3 );
var sizes = new Float32Array( particleCount );
var pop = new Float32Array( particleCount);
for (var i = 0, i3 = 0; i < particleCount; i ++, i3 += 3) {
positions[i3 + 0] = i* 10;
positions[i3 + 1] = 0.1;
positions[i3 + 2] = 1;
sizes[i] = 15;
pop[i] = 0.0;
}
geometry.addAttribute( 'position', new THREE.BufferAttribute( positions, 3 ) );
geometry.addAttribute( 'size', new THREE.BufferAttribute( sizes, 1 ) );
geometry.addAttribute( 'pop', new THREE.BufferAttribute( pop, 1 ) );
shaderMaterial = new THREE.ShaderMaterial({
uniforms: {
'u_time': {type: 'f', value: 1.0},
'u_texture_0': { value: new THREE.TextureLoader().load('https://avatars2.githubusercontent.com/u/5829050?s=256&v=4') }},
vertexShader: document.getElementById( 'vs' ).textContent,
fragmentShader: document.getElementById( 'fs' ).textContent,
// blending: THREE.AdditiveBlending,
depthTest: false,
transparent: true
});
shaderMaterial.uniforms['u_texture_0'].value.flipY = false;
points = new THREE.Points(geometry, shaderMaterial);
scene.add(points);
}
var raycaster = new THREE.Raycaster();
raycaster.params.Points.threshold = 5;
var touch = new THREE.Vector2();
var intersects, INTERSECTED;
var beforeIndex;
function interactive (event) {
touch.x = ( event.clientX / window.innerWidth ) * 2 - 1;
touch.y = - ( event.clientY / window.innerHeight ) * 2 + 1;
points.geometry.computeBoundingSphere();
camera.updateMatrixWorld();
var vector = new THREE.Vector3(touch.x, touch.y, 0.5 ).unproject(camera);
raycaster.set(camera.position, vector.sub(camera.position ).normalize());
raycaster.setFromCamera( touch, camera );
intersects = raycaster.intersectObject(points);
if ( intersects.length > 0 ) {
if ( INTERSECTED != intersects[ 0 ].index ) {
INTERSECTED = intersects[ 0 ].index;
if (beforeIndex != INTERSECTED) {
points.geometry.attributes.pop.array[ beforeIndex ] = 0.0;
}
points.geometry.attributes.pop.array[ INTERSECTED ] = 1.0;
beforeIndex = INTERSECTED;
}
}
points.geometry.attributes.size.needsUpdate = true;
points.geometry.attributes.pop.needsUpdate = true;
}
function createGrid () {
var helper = new THREE.GridHelper( 100, 20, 0x303030, 0x303030 );
scene.add( helper );
}
function render () {
renderer.render(scene, camera);
requestAnimationFrame(render);
}
init();
* {
margin: 0;
padding: 0;
}
html, body {
width: 100%;
height: 100%;
background: #000;
}
canvas {
display: block;
}
<script src="https://threejs.org/build/three.js"></script>
<script id="fs" type="x-shader/x-fragment">
precision highp float;
uniform sampler2D u_texture_0;
uniform float u_time;
varying float u_pop;
void main () {
vec2 uv = gl_PointCoord.xy;
vec4 rval = texture2D(u_texture_0,uv);
vec2 posToCenter = (uv - vec2(.5, .5)) * 2.0;
float distanceToCenter = length(posToCenter);
float fadeOpacity = 1. - smoothstep(0.8, 1., distanceToCenter);
float opacity = (1. - step(0.8, distanceToCenter)) + fadeOpacity;
vec3 bgColor = mix(vec3(255., 255., 255.), vec3(252., 222., 184.), distanceToCenter) / 255.;
vec4 color = vec4(mix(bgColor, rval.rgb, u_pop), 1.);
color.a = opacity;
gl_FragColor = color;
}
</script>
<script type="x-shader/x-vertex" id="vs">
attribute float size;
attribute float pop;
varying float u_pop;
void main() {
vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
gl_PointSize = size * ( 300.0 / -mvPosition.z );
gl_Position = projectionMatrix * mvPosition;
u_pop = pop;
}
</script>
`
You have misunderstanding about how 3D works, therefore, you use wrong concepts and terminology. There is no "Z-Index" in 3D. There is Z-buffer or Depth-buffer (two names, same thing), which reflects object distance from the render's point of view (camera, virtual observer). Naturarly, the purpose of the depth-buffer with depth-testing is to prevent farest objects to be rendered in front of closest ones (this also allow to optimize, by preventing unseen pixels to be computed).
Also, the background to foreground display is not controled by any hierarchy (unless the engine deliberately implements such feature), objects are simply rendered in order they are supplied. If the Depth-testing is disabled, the latest rendered object will be displayed in front of all previously rendered ones. In 3D scene, the hierarchy is relative to transformations, not display order (unless objects are rendered in scene's hierarchy order without depth-testing).
To achieve what you want in robust way, you'll have to disable the depth-desting and manually control the order which of sprites are rendered, to ensure the one which must be in "front", is the last rendered one. This is a pretty low-level manipulation, and unless Three.JS allow you to control that (which I doubt), you'll probably have to changes your tactic, or implements your own WebGL engine.

Hard Light material blending mode in Three.js?

I am currently using the MeshPhongMaterial provided by Three.js to create a simple scene with basic water. I would like for the water material to have the Hard Light blending mode that can be found in applications such as Photoshop. How can I achieve the Hard Light blending modes below on the right?
The right halves of the images above are set to Hard Light in Photoshop. I am trying to recreate that Hard Light blend mode in Three.js.
One lead I have come across is to completely reimplement the MeshPhongMaterial's fragment and vertex shader, but this will take me some time as I am quite new to this.
What is the way to implement a Hard Light blending mode for a material in Three.js?
/*
* Scene config
**/
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(50, window.innerWidth / window.innerHeight, 0.1, 10000);
var renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setClearColor(0xffffff);
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
camera.position.set(0, 500, 1000);
camera.lookAt(scene.position);
/*
* Scene lights
**/
var spotlight = new THREE.SpotLight(0x999999, 0.1);
spotlight.castShadow = true;
spotlight.shadowDarkness = 0.75;
spotlight.position.set(0, 500, 0);
scene.add(spotlight);
var pointlight = new THREE.PointLight(0x999999, 0.5);
pointlight.position.set(75, 50, 0);
scene.add(pointlight);
var hemiLight = new THREE.HemisphereLight(0xffce7a, 0x000000, 1.25);
hemiLight.position.y = 75;
hemiLight.position.z = 500;
scene.add(hemiLight);
/*
* Scene objects
*/
/* Water */
var waterGeo = new THREE.PlaneGeometry(1000, 1000, 50, 50);
var waterMat = new THREE.MeshPhongMaterial({
color: 0x00aeff,
emissive: 0x0023b9,
shading: THREE.FlatShading,
shininess: 60,
specular: 30,
transparent: true
});
for (var j = 0; j < waterGeo.vertices.length; j++) {
waterGeo.vertices[j].x = waterGeo.vertices[j].x + ((Math.random() * Math.random()) * 30);
waterGeo.vertices[j].y = waterGeo.vertices[j].y + ((Math.random() * Math.random()) * 20);
}
var waterObj = new THREE.Mesh(waterGeo, waterMat);
waterObj.rotation.x = -Math.PI / 2;
scene.add(waterObj);
/* Floor */
var floorGeo = new THREE.PlaneGeometry(1000, 1000, 50, 50);
var floorMat = new THREE.MeshPhongMaterial({
color: 0xe9b379,
emissive: 0x442c10,
shading: THREE.FlatShading
});
for (var j = 0; j < floorGeo.vertices.length; j++) {
floorGeo.vertices[j].x = floorGeo.vertices[j].x + ((Math.random() * Math.random()) * 30);
floorGeo.vertices[j].y = floorGeo.vertices[j].y + ((Math.random() * Math.random()) * 20);
floorGeo.vertices[j].z = floorGeo.vertices[j].z + ((Math.random() * Math.random()) * 20);
}
var floorObj = new THREE.Mesh(floorGeo, floorMat);
floorObj.rotation.x = -Math.PI / 2;
floorObj.position.y = -75;
scene.add(floorObj);
/*
* Scene render
**/
var count = 0;
function render() {
requestAnimationFrame(render);
var particle, i = 0;
for (var ix = 0; ix < 50; ix++) {
for (var iy = 0; iy < 50; iy++) {
waterObj.geometry.vertices[i++].z = (Math.sin((ix + count) * 2) * 3) +
(Math.cos((iy + count) * 1.5) * 6);
waterObj.geometry.verticesNeedUpdate = true;
}
}
count += 0.05;
renderer.render(scene, camera);
}
render();
html,
body {
margin: 0;
padding: 0;
width: 100%;
height: 100%;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r73/three.min.js"></script>
I don't think you're going to get the effect you want.
How do you generate the first image? I assume you just made fuzzy oval in photoshop and picked "hard light"?
If you want the same thing in three.js you'll need to generate a fuzzy oval and apply it in 2d using a post processing effect in three.js
You could generate such an oval by making a 2nd scene in three.js, adding the lights and shining them on a black plane that has no waves that's at the same position as the water is in the original scene. Render that to a rendertarget. You probably want only the spotlight and maybe point light in that scene. In your current scene remove the spotlight for sure. Render that to another render target.
When you're done combine the scenes using a post processing effect that implements hard light
// pseudo code
vec3 partA = texture2D(sceneTexture, texcoord);
vec3 partB = texture2D(lightTexture, texcoord);
vec3 line1 = 2.0 * partA * partB;
vec3 line2 = 1.0 - (1.0 - partA) * (1.0 - partB);
gl_FragCoord = vec4(mix(line2, line1, step(0.5, partA)), 1);
I ended up doing it in the following way thanks to gman's excellent answer. View the code snippet below to see it in action.
As gman described:
I created a WebGLRenderTarget to which the scene is rendered to.
The WebGLRenderTarget is then passed to the ShaderMaterial's uniforms as a texture, together with the window.innerWidth, window.innerHeight and color.
The respective texture coordinates, in relation to the current fragment, are calculated by dividing gl_FragCoord by the window's width and height.
The fragment can now sample what is on screen from the WebGLRenderTarget texture and combine that with the color of the object to output the correct gl_FragColor.
So far it works great. The only thing I am currently looking into is to create a separate scene containing only the objects that are necessary for blending, perhaps cloned. I assume that would be more performant. Currently I am toggling the visibility of the object to be blended in the render loop, before and after it is sent to the WebGLRenderTarget. For a larger scene with more objects, that probably doesn't make much sense and would complicate things.
var conf = {
'Color A': '#cc6633',
'Color B': '#0099ff'
};
var GUI = new dat.GUI();
var A_COLOR = GUI.addColor(conf, 'Color A');
A_COLOR.onChange(function(val) {
A_OBJ.material.uniforms.color = {
type: "c",
value: new THREE.Color(val)
};
A_OBJ.material.needsUpdate = true;
});
var B_COLOR = GUI.addColor(conf, 'Color B');
B_COLOR.onChange(function(val) {
B_OBJ.material.uniforms.color = {
type: "c",
value: new THREE.Color(val)
};
B_OBJ.material.needsUpdate = true;
});
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(50, window.innerWidth / window.innerHeight, 0.1, 100);
var renderer = new THREE.WebGLRenderer();
renderer.setClearColor(0x888888);
renderer.setSize(window.innerWidth, window.innerHeight);
var target = new THREE.WebGLRenderTarget(window.innerWidth, window.innerHeight, {format: THREE.RGBFormat});
document.body.appendChild(renderer.domElement);
camera.position.set(0, 0, 50);
camera.lookAt(scene.position);
var A_GEO = new THREE.PlaneGeometry(20, 20);
var B_GEO = new THREE.PlaneGeometry(20, 20);
var A_MAT = new THREE.ShaderMaterial({
uniforms: {
color: {
type: "c",
value: new THREE.Color(0xcc6633)
}
},
vertexShader: document.getElementById('vertexShaderA').innerHTML,
fragmentShader: document.getElementById('fragmentShaderA').innerHTML
});
var B_MAT = new THREE.ShaderMaterial({
uniforms: {
color: {
type: "c",
value: new THREE.Color(0x0099ff)
},
window: {
type: "v2",
value: new THREE.Vector2(window.innerWidth, window.innerHeight)
},
target: {
type: "t",
value: target
}
},
vertexShader: document.getElementById('vertexShaderB').innerHTML,
fragmentShader: document.getElementById('fragmentShaderB').innerHTML
});
var A_OBJ = new THREE.Mesh(A_GEO, A_MAT);
var B_OBJ = new THREE.Mesh(B_GEO, B_MAT);
A_OBJ.position.set(-5, -5, 0);
B_OBJ.position.set(5, 5, 0);
scene.add(A_OBJ);
scene.add(B_OBJ);
function render() {
requestAnimationFrame(render);
B_OBJ.visible = false;
renderer.render(scene, camera, target, true);
B_OBJ.visible = true;
renderer.render(scene, camera);
}
render();
body { margin: 0 }
canvas { display: block }
<script src="https://cdnjs.cloudflare.com/ajax/libs/dat-gui/0.5.1/dat.gui.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r74/three.min.js"></script>
<script type="x-shader/x-vertex" id="vertexShaderA">
uniform vec3 color;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script type="x-shader/x-fragment" id="fragmentShaderA">
uniform vec3 color;
void main() {
gl_FragColor = vec4(color, 1.0);
}
</script>
<script type="x-shader/x-vertex" id="vertexShaderB">
uniform vec3 color;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script type="x-shader/x-fragment" id="fragmentShaderB">
uniform vec3 color;
uniform vec2 window;
uniform sampler2D target;
void main() {
vec2 targetCoords = gl_FragCoord.xy / window.xy;
vec4 a = texture2D(target, targetCoords);
vec4 b = vec4(color, 1.0);
vec4 multiply = 2.0 * a * b;
vec4 screen = 1.0 - 2.0 * (1.0 - a) * (1.0 - b);
gl_FragColor = vec4(mix(screen, multiply, step(0.5, a)));
}
</script>

Categories