I'm trying to implement the Voronoi shader in the Book of Shaders in three.js and cannot seem to wrap my head around why the mouse position is not having any effect on my visible output. (This is for the initial demo where the mouse moves and the Voronoi updates)
I'm logging the mouse position, checking that the uniform value is updating, yet the shader doesn't appear to be changing whatsoever on my end.
This is what I see (static image - no change at all) while animate is being called.
I'm seeing u_time update when I log it in animate so the callback is happening. The uniforms just don't appear to be updated and I thought I was updating them correctly.
Note - I followed this SO post regarding tracking mouse position as a basis, though I tried both that mouse position and the tweaked mouse I have below (to map from [-1,1]).
Full code:
<!--
* Based on Book of Shaders 12:
https://thebookofshaders.com/12/
-->
<!DOCTYPE HTML>
<html>
<head>
<title>WebGL Demo - Voronoi</title>
<meta charset="utf-8">
<style>
body {
margin: 0;
padding: 0;
overflow: hidden;
}
</style>
<script src="./libraries/threejs/three.min.js"></script>
<!-- shaders -->
<script type="x-shader/x-vertex" id="vertexShader">
void main() {
vec4 modelViewPosition = modelViewMatrix * vec4(position, 1.0);
gl_Position = projectionMatrix * modelViewPosition;
}
</script>
<script type="x-shader/x-fragment" id="fragmentShader">
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
vec3 color = vec3(.0);
// Cell positions
vec2 point[5];
point[0] = vec2(0.83,0.75);
point[1] = vec2(0.60,0.07);
point[2] = vec2(0.28,0.64);
point[3] = vec2(0.31,0.26);
point[4] = u_mouse/u_resolution;
float m_dist = 1.; // minimum distance
// Iterate through the points positions
for (int i = 0; i < 5; i++) {
float dist = distance(st, point[i]);
// Keep the closer distance
m_dist = min(m_dist, dist);
}
// Draw the min distance (distance field)
color += m_dist;
// Show isolines
// color -= step(.7,abs(sin(50.0*m_dist)))*.3;
gl_FragColor = vec4(color,1.0);
}
</script>
</head>
<body></body>
<script>
let camera, scene, renderer;
let uniforms, mesh;
init();
animate();
function init() {
scene = new THREE.Scene();
camera = new THREE.Camera();
camera.position.z = 1;
renderer = new THREE.WebGLRenderer({ antialias: true });
renderer.setClearColor(0x000000, 1);
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setPixelRatio(window.devicePixelRatio);
document.body.appendChild(renderer.domElement);
uniforms = {
u_resolution: { type: 'vec2', value: new THREE.Vector2() },
u_mouse: { type: 'vec2', value: new THREE.Vector2() },
u_time: { type: 'float', value: 1.0 }
};
let vShader = document.getElementById("vertexShader").textContent;
let fShader = document.getElementById("fragmentShader").textContent;
let geometry = new THREE.PlaneGeometry(2, 2);
// give it a material
let material = new THREE.ShaderMaterial({
uniforms: uniforms,
fragmentShader: fShader,
vertexShader: vShader,
});
// and now create the mesh (geom+mat)
mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
onWindowResize();
window.addEventListener('resize', onWindowResize, false);
}
function animate() {
requestAnimationFrame(animate);
render();
}
function render() {
uniforms.u_time.value += 0.05;
renderer.render(scene, camera);
}
function onWindowResize(e) {
renderer.setSize(window.innerWidth, window.innerHeight);
uniforms.u_resolution.value.x = renderer.domElement.width;
uniforms.u_resolution.value.y = renderer.domElement.height;
}
document.onmousemove = function (e) {
uniforms.u_mouse.value.x = (e.clientX / window.innerWidth) * 2 - 1;//e.pageX / window.innerWidth;
uniforms.u_mouse.value.y = -(e.clientY / window.innerHeight) * 2 + 1;//e.pageY / window.innerHeight;
}
</script>
</html>
The line point[4] = u_mouse/u_resolution; probably doesn’t need to be divided by u_resolution because the Vector2 is already in the [-1, 1] range. You might just be getting very small values so the mouse movement is indistinguishable.
Related
So I am trying to make a cloud generation script that when going forward, left, or right it generates clouds infinitely.
Here is an example of how it currently is, I would like it to generate clouds when moving left or right infinitely but currently it only generates clouds moving forwards infinitely. Arrows are provided in the example for better understanding. I apologize for not adding a image directly but I am unable to as I am a new user:
https://i.stack.imgur.com/4XzpZ.jpg
I would like to modify my script to make the clouds generate forward, left, and right.
Here is my script:
https://pastebin.com/raw/vkTVrybQ
// HTML:
<!DOCTYPE html>
<html lang="en" >
<head>
<meta charset="utf-8" />
<title>cloud generation</title>
<link href="css/main.css" rel="stylesheet" type="text/css" />
<script src="js/ThreeWebGL.js"></script>
<script src="js/ThreeExtras.js"></script>
</head>
<body>
<script id="vs" type="x-shader/x-vertex">
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
</script>
<script id="fs" type="x-shader/x-fragment">
uniform sampler2D map;
uniform vec3 fogColor;
uniform float fogNear;
uniform float fogFar;
varying vec2 vUv;
void main() {
float depth = gl_FragCoord.z / gl_FragCoord.w;
float fogFactor = smoothstep( fogNear, fogFar, depth );
gl_FragColor = texture2D( map, vUv );
gl_FragColor.w *= pow( gl_FragCoord.z, 20.0 );
gl_FragColor = mix( gl_FragColor, vec4( fogColor, gl_FragColor.w ), fogFactor );
}
</script>
<div class="container">
<canvas id="panel" width="10" height="1"></canvas>
</div>
<script type="text/javascript" src="js/script.js"></script>
</body>
</html>
// CSS:
*{
margin:0;
padding:0;
}
body {
color:#fff;
font:14px/1.3 Arial,sans-serif;
background-image: url(../images/sky.jpg);
}
.container {
height:1px;
}
// Javascript:
// inner variables
var canvas, ctx;
var camera, scene, renderer, meshMaterial, mesh, geometry, i, f;
var mouseX = 0, mouseY = 0;
var startTime = new Date().getTime();
var windowHalfX = window.innerWidth / 2;
var windowHalfY = window.innerHeight / 2;
if (window.attachEvent) {
window.attachEvent('onload', main_init);
} else {
if(window.onload) {
var curronload = window.onload;
var newonload = function() {
curronload();
main_init();
};
window.onload = newonload;
} else {
window.onload = main_init;
}
}
function main_init() {
// creating canvas and context objects
canvas = document.getElementById('panel');
var ctx = canvas.getContext('2d');
// preparing camera
camera = new THREE.Camera(30, window.innerWidth / window.innerHeight, 1, 5000);
camera.position.z = 6000;
// preparing scene
scene = new THREE.Scene();
// preparing geometry
geometry = new THREE.Geometry();
// loading texture
var texture = THREE.ImageUtils.loadTexture('../images/clouds.png');
texture.magFilter = THREE.LinearMipMapLinearFilter;
texture.minFilter = THREE.LinearMipMapLinearFilter;
// preparing fog
var fog = new THREE.Fog(0x251d32, - 100, 5000);
// preparing material
meshMaterial = new THREE.MeshShaderMaterial({
uniforms: {
'map': {type: 't', value:2, texture: texture},
'fogColor' : {type: 'c', value: fog.color},
'fogNear' : {type: 'f', value: fog.near},
'fogFar' : {type: 'f', value: fog.far},
},
vertexShader: document.getElementById('vs').textContent,
fragmentShader: document.getElementById('fs').textContent,
depthTest: false
});
// preparing planeMesh
var planeMesh = new THREE.Mesh(new THREE.PlaneGeometry(64, 64));
for (i = 0; i < 10000; i++) {
planeMesh.position.x = Math.random() * 1000 - 500;
planeMesh.position.y = - Math.random() * Math.random() * 200 - 15;
planeMesh.position.z = i;
planeMesh.rotation.z = Math.random() * Math.PI;
planeMesh.scale.x = planeMesh.scale.y = Math.random() * Math.random() * 1.5 + 0.5;
THREE.GeometryUtils.merge(geometry, planeMesh);
}
mesh = new THREE.Mesh(geometry, meshMaterial);
scene.addObject(mesh);
mesh = new THREE.Mesh(geometry, meshMaterial);
mesh.position.z = - 10000;
scene.addObject(mesh);
// preparing new renderer and drawing it
renderer = new THREE.WebGLRenderer({ antialias: false });
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
// change positions by mouse
document.addEventListener('mousemove', onMousemove, false);
// change canvas size on resize
window.addEventListener('resize', onResize, false);
setInterval(drawScene, 30); // loop drawScene
}
function onMousemove(event) {
mouseX = (event.clientX - windowHalfX) * 0.3;
mouseY = (event.clientY - windowHalfY) * 0.2;
}
function onResize(event) {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function drawScene() {
position = ((new Date().getTime() - startTime) * 0.1) % 10000;
camera.position.x += mouseX * 0.01;
camera.position.y += - mouseY * 0.01;
camera.position.z = - position + 10000;
renderer.render(scene, camera);
}
// ^ Please note I also use ThreeWebGL.js and ThreeExtras.js but these scripts are too large to paste here as they are APIS
I'm working on a Three.js scene in which I'd like to update some textures after some time. I'm finding that updating the textures is very slow, however, and drags FPS to only 1-2 FPS for several seconds (when updating just a single texture).
Is there anything one can do to expedite texture updates? Any insights others can offer on this question would be very appreciated.
To see this behavior, click the window of the example below. This will load the first texture update (another click will trigger the second texture update). If you try to zoom after one of these clicks, you'll find the screen freezes and the FPS will drop terribly. Does anyone know how to fix this problem?
<html>
<head>
<style>
html, body { width: 100%; height: 100%; background: #000; }
body { margin: 0; overflow: hidden; }
canvas { width: 100%; height: 100%; }
</style>
</head>
<body>
<script src='https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js'></script>
<script src='https://rawgit.com/YaleDHLab/pix-plot/master/assets/js/trackball-controls.js'></script>
<script src='https://rawgit.com/mrdoob/stats.js/master/build/stats.min.js'></script>
<script type='x-shader/x-vertex' id='vertex-shader'>
precision highp float;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
uniform vec3 cameraPosition;
attribute vec3 position; // sets the blueprint's vertex positions
attribute vec3 translation; // x y translation offsets for an instance
attribute float texIdx; // the texture index to access
varying float vTexIdx;
void main() {
// set point position
vec3 pos = position + translation;
vec4 projected = projectionMatrix * modelViewMatrix * vec4(pos, 1.0);
gl_Position = projected;
// assign the varyings
vTexIdx = texIdx;
// use the delta between the point position and camera position to size point
float xDelta = pow(projected[0] - cameraPosition[0], 2.0);
float yDelta = pow(projected[1] - cameraPosition[1], 2.0);
float zDelta = pow(projected[2] - cameraPosition[2], 2.0);
float delta = pow(xDelta + yDelta + zDelta, 0.5);
gl_PointSize = 40000.0 / delta;
}
</script>
<script type='x-shader/x-fragment' id='fragment-shader'>
precision highp float;
uniform sampler2D a;
uniform sampler2D b;
varying float vTexIdx;
void main() {
int textureIndex = int(vTexIdx);
vec2 uv = vec2(gl_PointCoord.x, gl_PointCoord.y);
if (textureIndex == 0) {
gl_FragColor = texture2D(a, uv);
} else if (textureIndex == 1) {
gl_FragColor = texture2D(b, uv);
}
}
</script>
<script>
/**
* Generate a scene object with a background color
**/
function getScene() {
var scene = new THREE.Scene();
scene.background = new THREE.Color(0xaaaaaa);
return scene;
}
/**
* Generate the camera to be used in the scene
**/
function getCamera() {
var aspectRatio = window.innerWidth / window.innerHeight;
var camera = new THREE.PerspectiveCamera(75, aspectRatio, 0.1, 100000);
camera.position.set(0, 1, -6000);
return camera;
}
/**
* Generate the renderer to be used in the scene
**/
function getRenderer() {
// Create the canvas with a renderer
var renderer = new THREE.WebGLRenderer({antialias: true});
// Add support for retina displays
renderer.setPixelRatio(window.devicePixelRatio);
// Specify the size of the canvas
renderer.setSize(window.innerWidth, window.innerHeight);
// Add the canvas to the DOM
document.body.appendChild(renderer.domElement);
return renderer;
}
/**
* Generate the controls to be used in the scene
**/
function getControls(camera, renderer) {
var controls = new THREE.TrackballControls(camera, renderer.domElement);
controls.zoomSpeed = 0.4;
controls.panSpeed = 0.4;
return controls;
}
/**
* Generate the points for the scene
**/
function addPoints(scene) {
var BA = THREE.BufferAttribute;
var IBA = THREE.InstancedBufferAttribute;
var geometry = new THREE.InstancedBufferGeometry();
// add data for each observation
var n = 10000; // number of observations
var rootN = n**(1/2);
var cellSize = 20;
var translation = new Float32Array( n * 3 );
var texIdx = new Float32Array( n );
var translationIterator = 0;
var texIterator = 0;
for (var i=0; i<n*3; i++) {
var x = Math.random() * n - (n/2);
var y = Math.random() * n - (n/2);
translation[translationIterator++] = x;
translation[translationIterator++] = y;
translation[translationIterator++] = Math.random() * n - (n/2);
texIdx[texIterator++] = (x + y) > (n/8) ? 1 : 0;
}
var positionAttr = new BA(new Float32Array( [0, 0, 0] ), 3);
var translationAttr = new IBA(translation, 3, 1);
var texIdxAttr = new IBA(texIdx, 1, 1);
positionAttr.dynamic = true;
translationAttr.dynamic = true;
texIdxAttr.dynamic = true;
geometry.addAttribute('position', positionAttr);
geometry.addAttribute('translation', translationAttr);
geometry.addAttribute('texIdx', texIdxAttr);
var canvases = [
getElem('canvas', { width: 16384, height: 16384, }),
getElem('canvas', { width: 16384, height: 16384, }),
]
var textures = [
getTexture( canvases[0] ),
getTexture( canvases[1] ),
];
var material = new THREE.RawShaderMaterial({
uniforms: {
a: {
type: 't',
value: textures[0],
},
b: {
type: 't',
value: textures[1],
}
},
vertexShader: document.getElementById('vertex-shader').textContent,
fragmentShader: document.getElementById('fragment-shader').textContent,
});
var mesh = new THREE.Points(geometry, material);
mesh.frustumCulled = false; // prevent the mesh from being clipped on drag
scene.add(mesh);
// on the first window click, paint red points
// on the second window click, paint blue points
var clicks = 0;
window.addEventListener('click', function() {
if (clicks == 0 || clicks == 1) {
var canvas = canvases[clicks];
var ctx = canvas.getContext('2d');
ctx.fillStyle = clicks == 0 ? 'red' : 'blue';
ctx.rect(0, 0, 16384, 16384);
ctx.fill();
textures[clicks].needsUpdate = true;
clicks++;
}
})
}
function getTexture(canvas) {
var tex = new THREE.Texture(canvas);
tex.needsUpdate = true;
tex.flipY = false;
return tex;
}
/**
* Create an element
**/
function getElem(tag, obj) {
var obj = obj || {};
var elem = document.createElement(tag);
Object.keys(obj).forEach(function(attr) {
elem[attr] = obj[attr];
})
return elem;
}
/**
* Add stats
**/
function getStats() {
var stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.top = '65px';
stats.domElement.style.right = '5px';
stats.domElement.style.left = 'initial';
document.body.appendChild(stats.domElement);
return stats;
}
/**
* Render!
**/
function render() {
requestAnimationFrame(render);
renderer.render(scene, camera);
controls.update();
stats.update();
};
/**
* Main
**/
var stats = getStats();
var scene = getScene();
var camera = getCamera();
var renderer = getRenderer();
var controls = getControls(camera, renderer);
addPoints(scene);
render();
</script>
</body>
</html>
Your canvases are 16384 by 16384. That's basically insanely large.
For RGBA format, that is 1073741824 bytes.. a gigabyte of texture data that is getting sent to your GPU from the CPU when you set that texture.needsUpdate = true
You will definitely notice this getting uploaded to the card.
If your use case absolutely requires textures that large.. then you may need to consider doing incremental updates via gl.texSubImage2D, or using a bunch of smaller textures and only updating one of the per frame, or only updating those textures at the start of your app, and not thereafter.
For reference, there are very few cases i've seen where textures > 4k per side are needed.
And that is about 1/16th the size of your textures.
This has nothing to do with three.js btw. It's a fundamental characteristic of GPU/CPU interaction. Uploads and state changes are slow and have to be carefully orchestrated and monitored.
I created two particles with a threejs BufferGeometry, I want to click on each particle shows the corresponding image.
But when I clicked on the particle the image was shown and another particle covered it.
I want to know how to get the particles out of the control of the hierarchy and keep the clicked particles always on top.
code:`
var scene, camera, renderer,controls;
var points;
var shaderMaterial;
var particleCount = 2;
function init () {
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 0.1, 1000);
camera.position.x = 15;
camera.position.y = 16;
camera.position.z = 35;
camera.lookAt(scene.position);
renderer = new THREE.WebGLRenderer();
renderer.setClearColor(0x000000, 1.0);
renderer.setSize(window.innerWidth, window.innerHeight);
var light = new THREE.AmbientLight( 0xcccccc );
scene.add(light);
document.body.appendChild(renderer.domElement);
createParticles();
createGrid();
render();
document.querySelector('canvas').addEventListener( 'click', interactive, false );
}
function createParticles () {
var geometry = new THREE.BufferGeometry();
var positions = new Float32Array( particleCount * 3 );
var sizes = new Float32Array( particleCount );
var pop = new Float32Array( particleCount);
for (var i = 0, i3 = 0; i < particleCount; i ++, i3 += 3) {
positions[i3 + 0] = i* 10;
positions[i3 + 1] = 0.1;
positions[i3 + 2] = 1;
sizes[i] = 15;
pop[i] = 0.0;
}
geometry.addAttribute( 'position', new THREE.BufferAttribute( positions, 3 ) );
geometry.addAttribute( 'size', new THREE.BufferAttribute( sizes, 1 ) );
geometry.addAttribute( 'pop', new THREE.BufferAttribute( pop, 1 ) );
shaderMaterial = new THREE.ShaderMaterial({
uniforms: {
'u_time': {type: 'f', value: 1.0},
'u_texture_0': { value: new THREE.TextureLoader().load('https://avatars2.githubusercontent.com/u/5829050?s=256&v=4') }},
vertexShader: document.getElementById( 'vs' ).textContent,
fragmentShader: document.getElementById( 'fs' ).textContent,
// blending: THREE.AdditiveBlending,
depthTest: false,
transparent: true
});
shaderMaterial.uniforms['u_texture_0'].value.flipY = false;
points = new THREE.Points(geometry, shaderMaterial);
scene.add(points);
}
var raycaster = new THREE.Raycaster();
raycaster.params.Points.threshold = 5;
var touch = new THREE.Vector2();
var intersects, INTERSECTED;
var beforeIndex;
function interactive (event) {
touch.x = ( event.clientX / window.innerWidth ) * 2 - 1;
touch.y = - ( event.clientY / window.innerHeight ) * 2 + 1;
points.geometry.computeBoundingSphere();
camera.updateMatrixWorld();
var vector = new THREE.Vector3(touch.x, touch.y, 0.5 ).unproject(camera);
raycaster.set(camera.position, vector.sub(camera.position ).normalize());
raycaster.setFromCamera( touch, camera );
intersects = raycaster.intersectObject(points);
if ( intersects.length > 0 ) {
if ( INTERSECTED != intersects[ 0 ].index ) {
INTERSECTED = intersects[ 0 ].index;
if (beforeIndex != INTERSECTED) {
points.geometry.attributes.pop.array[ beforeIndex ] = 0.0;
}
points.geometry.attributes.pop.array[ INTERSECTED ] = 1.0;
beforeIndex = INTERSECTED;
}
}
points.geometry.attributes.size.needsUpdate = true;
points.geometry.attributes.pop.needsUpdate = true;
}
function createGrid () {
var helper = new THREE.GridHelper( 100, 20, 0x303030, 0x303030 );
scene.add( helper );
}
function render () {
renderer.render(scene, camera);
requestAnimationFrame(render);
}
init();
* {
margin: 0;
padding: 0;
}
html, body {
width: 100%;
height: 100%;
background: #000;
}
canvas {
display: block;
}
<script src="https://threejs.org/build/three.js"></script>
<script id="fs" type="x-shader/x-fragment">
precision highp float;
uniform sampler2D u_texture_0;
uniform float u_time;
varying float u_pop;
void main () {
vec2 uv = gl_PointCoord.xy;
vec4 rval = texture2D(u_texture_0,uv);
vec2 posToCenter = (uv - vec2(.5, .5)) * 2.0;
float distanceToCenter = length(posToCenter);
float fadeOpacity = 1. - smoothstep(0.8, 1., distanceToCenter);
float opacity = (1. - step(0.8, distanceToCenter)) + fadeOpacity;
vec3 bgColor = mix(vec3(255., 255., 255.), vec3(252., 222., 184.), distanceToCenter) / 255.;
vec4 color = vec4(mix(bgColor, rval.rgb, u_pop), 1.);
color.a = opacity;
gl_FragColor = color;
}
</script>
<script type="x-shader/x-vertex" id="vs">
attribute float size;
attribute float pop;
varying float u_pop;
void main() {
vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
gl_PointSize = size * ( 300.0 / -mvPosition.z );
gl_Position = projectionMatrix * mvPosition;
u_pop = pop;
}
</script>
`
You have misunderstanding about how 3D works, therefore, you use wrong concepts and terminology. There is no "Z-Index" in 3D. There is Z-buffer or Depth-buffer (two names, same thing), which reflects object distance from the render's point of view (camera, virtual observer). Naturarly, the purpose of the depth-buffer with depth-testing is to prevent farest objects to be rendered in front of closest ones (this also allow to optimize, by preventing unseen pixels to be computed).
Also, the background to foreground display is not controled by any hierarchy (unless the engine deliberately implements such feature), objects are simply rendered in order they are supplied. If the Depth-testing is disabled, the latest rendered object will be displayed in front of all previously rendered ones. In 3D scene, the hierarchy is relative to transformations, not display order (unless objects are rendered in scene's hierarchy order without depth-testing).
To achieve what you want in robust way, you'll have to disable the depth-desting and manually control the order which of sprites are rendered, to ensure the one which must be in "front", is the last rendered one. This is a pretty low-level manipulation, and unless Three.JS allow you to control that (which I doubt), you'll probably have to changes your tactic, or implements your own WebGL engine.
I'm trying to establish a full screen quad using a pass thru vertex shader in THREE.js. The quad itself is a plane geometry with dimension (2, 2) located at the origin. It is assigned the ShaderMaterial. The camera is at z = 1 aiming at the quad.
The shaders are quite simple:
Vertex Shader:
void main() {
gl_Position = vec4( position, 1.0 );
}
Fragment Shader:
void main() {
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
}
But nothing shows up on screen. This setup is a standard way of render-to-texture, why is it not working in THREE.js?
I've tried plane.frustumCulled = false and changing the clip planes of the camera to no avail.
Any help is appreciated.
Upon further investigation, the reason for not seeing the rendering result is more involved and pointing to some odd behavior in three.js.
I am using a PlaneGeometry with a rotation matrix applied, which is then wrapped by an Object3D with a counter rotation.
var geometry = new THREE.PlaneGeometry(2, 2);
var m4 = new THREE.Matrix4().makeRotationX(Math.PI * 0.5);
geometry.applyMatrix(m4);
var mesh = new THREE.Mesh(geometry, material);
var obj = new THREE.Object3D();
obj.add(mesh);
obj.rotation.x = -Math.PI * 0.5;
scene.add(obj);
This setup seems to throw three.js off and no rendering is shown.
Ok, the rotation got thrown away because it is a part of model view matrix that I ignored in the vertex shader. I've to refact what I'm currently doing.
I'm not sure the exact problem you're having, but here is an example of a working fullscreen quad using the same technique.
var canvas = document.getElementById('canvas');
var scene = new THREE.Scene();
var renderer = new THREE.WebGLRenderer({canvas: canvas, antialias: true});
var camera = new THREE.PerspectiveCamera(45, canvas.clientWidth / canvas.clientWidth, 1, 1000);
var clock = new THREE.Clock();
var quad = new THREE.Mesh(
new THREE.PlaneGeometry(2, 2),
new THREE.ShaderMaterial({
vertexShader: document.getElementById('vertex-shader').textContent,
fragmentShader: document.getElementById('fragment-shader').textContent,
depthWrite: false,
depthTest: false
})
);
scene.add(quad);
var box = new THREE.Mesh(
new THREE.BoxGeometry(50, 50, 50),
new THREE.MeshBasicMaterial({color: '#000', wireframe: true})
);
scene.add(box);
camera.position.z = 200;
render();
function render() {
requestAnimationFrame(render);
if (canvas.width !== canvas.clientWidth || canvas.height !== canvas.clientHeight) {
renderer.setSize(canvas.clientWidth, canvas.clientHeight, false);
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
var dt = clock.getDelta();
box.rotation.x += dt * 2 * Math.PI / 5;
box.rotation.y += dt * 2 * Math.PI / 7;
box.rotation.z += dt * 2 * Math.PI / 11;
renderer.render(scene, camera);
}
html, body, #canvas {
margin: 0;
padding: 0;
width: 100%;
height: 100%;
display: block;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r73/three.min.js"></script>
<canvas id="canvas"></canvas>
<script id="vertex-shader" type="x-shader/x-vertex">
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = vec4(position, 1.0);
}
</script>
<script id="fragment-shader" type="x-shader/x-fragment">
varying vec2 vUv;
void main() {
gl_FragColor = vec4(vUv, 0.0, 1.0);
}
</script>
How to achieve an x-ray-style effect in three.js / webgl? Some sort of this
UPD
I need real-time render with this stuff, not a still image. This can be done with shaders, that change density in non-linear way on overlaps based on distance. I briefly understand theory, but have no practice, that is why I need help with this
This is the as Владимир Корнилов's example except I changed the shader a little.
I'm not sure what he was going for with the dot(vNormal, vNormel). Doing abs(dot(vNormal, vec3(0, 0, 1)) will give you something that is brighter when facing toward or away from the view. Making it 1.0 - abs(dot(vNormal, vec3(0, 0, 1)) will flip that so perpendicular to the view is brighter. Then add the pow and it looks better to me but I guess that's subjective
var human;
var $ = document.querySelector.bind(document);
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(45, 1, 0.1, 1000);
var renderer = new THREE.WebGLRenderer({antialias: true, alpha: true});
renderer.setClearColor(0x000000, 1.0);
lookAt = scene.position;
lookAt.y = 15;
camera.lookAt(lookAt);
document.body.appendChild(renderer.domElement);
var customMaterial = new THREE.ShaderMaterial(
{
uniforms: {
p: { type: "f", value: 2 },
glowColor: { type: "c", value: new THREE.Color(0x84ccff) },
},
vertexShader: $('#vertexShader').text,
fragmentShader: $('#fragmentShader').text,
side: THREE.DoubleSide,
blending: THREE.AdditiveBlending,
transparent: true,
depthWrite: false
});
var loader = new THREE.ColladaLoader();
loader.options.convertUpAxis = true;
loader.load('http://greggman.github.io/doodles/assets/woman.dae', function (collada) {
dae = collada.scene;
dae.traverse( function ( child ) {
if (child instanceof THREE.Mesh) {
console.log(child);
child.material = customMaterial;
}
} );
dae.scale.x = 0.2;
dae.scale.y = 0.2;
dae.scale.z = 0.2;
human = dae;
scene.add(human);
});
function resize() {
var canvas = renderer.domElement;
var width = canvas.clientWidth;
var height = canvas.clientHeight;
if (canvas.width !== width || canvas.height !== height) {
renderer.setSize(width, height, false);
camera.aspect = width / height;
camera.updateProjectionMatrix();
}
}
// call the render function
function render(time) {
time *= 0.001;
resize();
camera.position.x = -20 * (Math.cos(time));
camera.position.z = (20 * (Math.sin(time)));
camera.position.y = 20;
camera.lookAt(lookAt);
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
<script src="//cdnjs.cloudflare.com/ajax/libs/three.js/r123/three.min.js"></script>
<script src="//greggman.github.io/doodles/js/three/js/loaders/ColladaLoader.js"></script>
<script id="vertexShader" type="x-shader/x-vertex">
uniform float p;
varying float intensity;
void main()
{
vec3 vNormal = normalize( normalMatrix * normal );
intensity = pow(1.0 - abs(dot(vNormal, vec3(0, 0, 1))), p);
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
</script>
<!-- fragment shader a.k.a. pixel shader -->
<script id="fragmentShader" type="x-shader/x-vertex">
uniform vec3 glowColor;
varying float intensity;
void main()
{
vec3 glow = glowColor * intensity;
gl_FragColor = vec4( glow, 1.0 );
}
</script>
<style>
html, body {
margin: 0;
overflow: hidden;
height: 100%;
}
canvas {
width: 100%;
height: 100%;
}
</style>
Ok, got acceptable result with this:
<!DOCTYPE html>
<html>
<head>
<title>X-ray</title>
<script type="text/javascript" src="js/three.js/build/three.js"></script>
<script type="text/javascript" src="js/three.js/examples/js/loaders/OBJLoader.js"></script>
<script src="http://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<script type="text/javascript" src="js/stats.min.js"></script>
<script type="text/javascript" src="js/three.js/examples/js/renderers/SVGRenderer.js"></script>
<script id="vertexShader" type="x-shader/x-vertex">
uniform vec3 viewVector;
uniform float c;
uniform float p;
varying float intensity;
void main()
{
vec3 vNormal = normalize( normalMatrix * normal );
vec3 vNormel = normalize( normalMatrix * viewVector );
intensity = pow( c - dot(vNormal, vNormel), p );
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
</script>
<!-- fragment shader a.k.a. pixel shader -->
<script id="fragmentShader" type="x-shader/x-vertex">
uniform vec3 glowColor;
varying float intensity;
void main()
{
vec3 glow = glowColor * intensity;
gl_FragColor = vec4( glow, 1.0 );
}
</script>
<style>
body {
/* set margin to 0 and overflow to hidden, to go fullscreen */
margin: 0;
overflow: hidden;
}
</style>
</head>
<body>
<div id="Stats-output">
</div>
<!-- Div which will hold the Output -->
<div id="WebGL-output">
</div>
<!-- Javascript code that runs our Three.js examples -->
<script type="text/javascript">
// once everything is loaded, we run our Three.js stuff.
$(function () {
var mouseX = 0, mouseY = 0;
var human;
camstep = 0;
var stats = initStats();
// create a scene, that will hold all our elements such as objects, cameras and lights.
var scene = new THREE.Scene();
// create a camera, which defines where we're looking at.
var camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 0.1, 1000);
// create a render and set the size
var renderer = new THREE.WebGLRenderer({antialias: true, alpha: true});
renderer.setClearColor(0x000000, 1.0);
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.shadowMapEnabled = true;
renderer.shadowMapType = THREE.PCFShadowMap;
materialCameraPosition = camera.position.clone();
materialCameraPosition.z += 10;
// position and point the camera to the center of the scene
camera.position.x = -10;
camera.position.y = 0;
camera.position.z = 15;
lookAt = scene.position;
lookAt.y = 15;
camera.lookAt(lookAt);
// add subtle ambient lighting
var ambientLight = new THREE.AmbientLight(0x0c0c0c);
//scene.add(ambientLight);
// add the output of the renderer to the html element
$("#WebGL-output").append(renderer.domElement);
var customMaterial = new THREE.ShaderMaterial(
{
uniforms: {
"c": { type: "f", value: 1.0 },
"p": { type: "f", value: 3 },
glowColor: { type: "c", value: new THREE.Color(0x84ccff) },
viewVector: { type: "v3", value: materialCameraPosition }
},
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragmentShader').textContent,
side: THREE.FrontSide,
blending: THREE.AdditiveBlending,
transparent: true,
//opacity: 0.5,
depthWrite: false
});
var manager = new THREE.LoadingManager();
manager.onProgress = function (item, loaded, total) {
console.log(item, loaded, total);
};
var loader = new THREE.OBJLoader(manager);
loader.load('body_anatomy3.obj', function (object) {
console.log(object);
object.traverse(function (child) {
if (child instanceof THREE.Mesh) {
console.log(child);
child.material = customMaterial;
}
});
object.position.y = 4;
object.scale.x = 0.01;
object.scale.y = 0.01;
object.scale.z = 0.01;
human = object;
scene.add(human);
});
// call the render function
var step = 0;
render();
function render() {
stats.update();
camstep += 0.02;
camera.position.x = -20 * (Math.cos(camstep));
camera.position.z = (20 * (Math.sin(camstep)));
camera.position.y = 20;
camera.lookAt(lookAt);
if (human) {
//human.rotation.y += 0.02;
materialCameraPosition = camera.position.clone();
materialCameraPosition.z += 10;
human.traverse(function (child) {
if (child instanceof THREE.Mesh) {
//console.log(child.material.uniforms.viewVector);
child.material.uniforms.viewVector.value =
new THREE.Vector3().subVectors(camera.position, human.position);
}
});
}
//sphere.material.uniforms.viewVector.value = new THREE.Vector3().subVectors(camera.position, sphere.position);
// render using requestAnimationFrame
requestAnimationFrame(render);
renderer.render(scene, camera);
}
function initStats() {
var stats = new Stats();
stats.setMode(0); // 0: fps, 1: ms
// Align top-left
stats.domElement.style.position = 'absolute';
stats.domElement.style.left = '0px';
stats.domElement.style.top = '0px';
$("#Stats-output").append(stats.domElement);
return stats;
}
});
</script>
</body>
</html>
At the moment got close result with glow shader based on this demo http://stemkoski.github.io/Three.js/Shader-Glow.html