I'm hoping to use the data from an AnalyserNode in WebAudio as the input for my vertex shader in WebGL, but I'm really at a loss for how I'd go about doing this. I've got it working for a simple .jpg using the following code, so I'm wondering how best to pass frequency and waveform data to my fragment shader.
Here's my HTML code:
<body>
<div id="container"></div>
</body>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r71/three.min.js"></script>
<script type="x-shader/x-vertex" id='vertexShader'>
uniform sampler2D iChannel0;
void main() {
vec4 textureDisplacement = texture2D(iChannel0, uv);
vec3 newPosition = position + normal * (textureDisplacement.xyz * 10.0);
gl_Position = projectionMatrix * modelViewMatrix * vec4(newPosition, 1.);
}
</script>
Here's the Javascript:
var container,
material,
renderer,
scene,
camera,
mesh,
start = Date.now(),
fov = 30;
window.addEventListener('load', function () {
// grab the container from the DOM
container = document.getElementById("container");
// create a scene
scene = new THREE.Scene();
// create a camera the size of the browser window
// and place it 100 units away, looking towards the center of the scene
camera = new THREE.PerspectiveCamera(
fov,
window.innerWidth / window.innerHeight,
1,
10000);
camera.position.z = 100;
camera.target = new THREE.Vector3(0, 0, 0);
scene.add(camera);
// create a wireframe material
material = new THREE.ShaderMaterial({
uniforms: {
iChannel0: {
type: 't',
value: THREE.ImageUtils.loadTexture('simplex.jpg')
}
},
vertexShader: document.getElementById('vertexShader').textContent,
shading: THREE.SmoothShading
});
// create a sphere and assign the material
mesh = new THREE.Mesh(
new THREE.IcosahedronGeometry( 20, 4 ),
material);
scene.add(mesh);
// create the renderer and attach it to the DOM
renderer = new THREE.WebGLRenderer({
antiAliasing: true,
alpha: true
});
renderer.setClearColor(0x222222, 1);
renderer.setSize(window.innerWidth, window.innerHeight);
container.appendChild(renderer.domElement);
animate();
});
function animate() {
requestAnimationFrame(animate);
// let there be light
renderer.render(scene, camera);
}
This is the image I'm using as my sampler2D input:
http://members.gamedev.net/vertexnormal/techimages/seamless.jpg
Related
I'm trying to implement the Voronoi shader in the Book of Shaders in three.js and cannot seem to wrap my head around why the mouse position is not having any effect on my visible output. (This is for the initial demo where the mouse moves and the Voronoi updates)
I'm logging the mouse position, checking that the uniform value is updating, yet the shader doesn't appear to be changing whatsoever on my end.
This is what I see (static image - no change at all) while animate is being called.
I'm seeing u_time update when I log it in animate so the callback is happening. The uniforms just don't appear to be updated and I thought I was updating them correctly.
Note - I followed this SO post regarding tracking mouse position as a basis, though I tried both that mouse position and the tweaked mouse I have below (to map from [-1,1]).
Full code:
<!--
* Based on Book of Shaders 12:
https://thebookofshaders.com/12/
-->
<!DOCTYPE HTML>
<html>
<head>
<title>WebGL Demo - Voronoi</title>
<meta charset="utf-8">
<style>
body {
margin: 0;
padding: 0;
overflow: hidden;
}
</style>
<script src="./libraries/threejs/three.min.js"></script>
<!-- shaders -->
<script type="x-shader/x-vertex" id="vertexShader">
void main() {
vec4 modelViewPosition = modelViewMatrix * vec4(position, 1.0);
gl_Position = projectionMatrix * modelViewPosition;
}
</script>
<script type="x-shader/x-fragment" id="fragmentShader">
uniform vec2 u_resolution;
uniform vec2 u_mouse;
uniform float u_time;
void main() {
vec2 st = gl_FragCoord.xy/u_resolution.xy;
st.x *= u_resolution.x/u_resolution.y;
vec3 color = vec3(.0);
// Cell positions
vec2 point[5];
point[0] = vec2(0.83,0.75);
point[1] = vec2(0.60,0.07);
point[2] = vec2(0.28,0.64);
point[3] = vec2(0.31,0.26);
point[4] = u_mouse/u_resolution;
float m_dist = 1.; // minimum distance
// Iterate through the points positions
for (int i = 0; i < 5; i++) {
float dist = distance(st, point[i]);
// Keep the closer distance
m_dist = min(m_dist, dist);
}
// Draw the min distance (distance field)
color += m_dist;
// Show isolines
// color -= step(.7,abs(sin(50.0*m_dist)))*.3;
gl_FragColor = vec4(color,1.0);
}
</script>
</head>
<body></body>
<script>
let camera, scene, renderer;
let uniforms, mesh;
init();
animate();
function init() {
scene = new THREE.Scene();
camera = new THREE.Camera();
camera.position.z = 1;
renderer = new THREE.WebGLRenderer({ antialias: true });
renderer.setClearColor(0x000000, 1);
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.setPixelRatio(window.devicePixelRatio);
document.body.appendChild(renderer.domElement);
uniforms = {
u_resolution: { type: 'vec2', value: new THREE.Vector2() },
u_mouse: { type: 'vec2', value: new THREE.Vector2() },
u_time: { type: 'float', value: 1.0 }
};
let vShader = document.getElementById("vertexShader").textContent;
let fShader = document.getElementById("fragmentShader").textContent;
let geometry = new THREE.PlaneGeometry(2, 2);
// give it a material
let material = new THREE.ShaderMaterial({
uniforms: uniforms,
fragmentShader: fShader,
vertexShader: vShader,
});
// and now create the mesh (geom+mat)
mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
onWindowResize();
window.addEventListener('resize', onWindowResize, false);
}
function animate() {
requestAnimationFrame(animate);
render();
}
function render() {
uniforms.u_time.value += 0.05;
renderer.render(scene, camera);
}
function onWindowResize(e) {
renderer.setSize(window.innerWidth, window.innerHeight);
uniforms.u_resolution.value.x = renderer.domElement.width;
uniforms.u_resolution.value.y = renderer.domElement.height;
}
document.onmousemove = function (e) {
uniforms.u_mouse.value.x = (e.clientX / window.innerWidth) * 2 - 1;//e.pageX / window.innerWidth;
uniforms.u_mouse.value.y = -(e.clientY / window.innerHeight) * 2 + 1;//e.pageY / window.innerHeight;
}
</script>
</html>
The line point[4] = u_mouse/u_resolution; probably doesn’t need to be divided by u_resolution because the Vector2 is already in the [-1, 1] range. You might just be getting very small values so the mouse movement is indistinguishable.
I'm completely new to Three.js and use it for now only for a side feature on a website.
I'm trying to render a paper cup (or something that reminds a paper cup) with different textures on the outer side and inner side.
So far I've managed to do something like that. rotate left <--> right using keyboard arrows.
Simple Demo
How can I add a different texture for the inner side of the cup (where the red arrow points).
without a texture, the rotation looks a bit strange.
and I'll be happy to hear any suggestion to how to make it better in terms of visual
Thank you
Just a concept of how you can do it:
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight, 1, 1000);
camera.position.set(0, 3, 5);
var renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setClearColor(0xffffff);
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
var controls = new THREE.OrbitControls(camera, renderer.domElement);
var geometry = new THREE.CylinderGeometry(2, 1.5, 4, 32, 1, true);
var materialOuter = new THREE.MeshBasicMaterial({
map: new THREE.TextureLoader().load("https://threejs.org/examples/textures/758px-Canestra_di_frutta_(Caravaggio).jpg")
});
var materialInner = new THREE.MeshBasicMaterial({
map: new THREE.TextureLoader().load("https://threejs.org/examples/textures/UV_Grid_Sm.jpg"),
side: THREE.BackSide
});
var meshOuter = new THREE.Mesh(geometry, materialOuter);
var meshInner = new THREE.Mesh(geometry, materialInner);
meshOuter.add(meshInner);
scene.add(meshOuter);
render();
function render() {
requestAnimationFrame(render);
renderer.render(scene, camera);
}
body {
overflow: hidden;
margin: 0;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/93/three.min.js"></script>
<script src="https://threejs.org/examples/js/controls/OrbitControls.js"></script>
As an alternative to #prisoner849's answer, you can use a custom shader material and check whether the rendered fragment is backfacing and give it its own color (texture...)
vec3 frontMaterial = vec3(1.0, 0.0, 0.0); //red
vec3 backMaterial = vec3(0.0, 1.0, 0.0); //green
//make every fragment green
gl_FragColor = vec4( frontMaterial, 1.0 );
//make fragments green when on the inner side of the object model
if (!gl_FrontFacing) gl_FragColor = vec4(backMaterial, 1.0);
Here's a live example
I'm trying to establish a full screen quad using a pass thru vertex shader in THREE.js. The quad itself is a plane geometry with dimension (2, 2) located at the origin. It is assigned the ShaderMaterial. The camera is at z = 1 aiming at the quad.
The shaders are quite simple:
Vertex Shader:
void main() {
gl_Position = vec4( position, 1.0 );
}
Fragment Shader:
void main() {
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
}
But nothing shows up on screen. This setup is a standard way of render-to-texture, why is it not working in THREE.js?
I've tried plane.frustumCulled = false and changing the clip planes of the camera to no avail.
Any help is appreciated.
Upon further investigation, the reason for not seeing the rendering result is more involved and pointing to some odd behavior in three.js.
I am using a PlaneGeometry with a rotation matrix applied, which is then wrapped by an Object3D with a counter rotation.
var geometry = new THREE.PlaneGeometry(2, 2);
var m4 = new THREE.Matrix4().makeRotationX(Math.PI * 0.5);
geometry.applyMatrix(m4);
var mesh = new THREE.Mesh(geometry, material);
var obj = new THREE.Object3D();
obj.add(mesh);
obj.rotation.x = -Math.PI * 0.5;
scene.add(obj);
This setup seems to throw three.js off and no rendering is shown.
Ok, the rotation got thrown away because it is a part of model view matrix that I ignored in the vertex shader. I've to refact what I'm currently doing.
I'm not sure the exact problem you're having, but here is an example of a working fullscreen quad using the same technique.
var canvas = document.getElementById('canvas');
var scene = new THREE.Scene();
var renderer = new THREE.WebGLRenderer({canvas: canvas, antialias: true});
var camera = new THREE.PerspectiveCamera(45, canvas.clientWidth / canvas.clientWidth, 1, 1000);
var clock = new THREE.Clock();
var quad = new THREE.Mesh(
new THREE.PlaneGeometry(2, 2),
new THREE.ShaderMaterial({
vertexShader: document.getElementById('vertex-shader').textContent,
fragmentShader: document.getElementById('fragment-shader').textContent,
depthWrite: false,
depthTest: false
})
);
scene.add(quad);
var box = new THREE.Mesh(
new THREE.BoxGeometry(50, 50, 50),
new THREE.MeshBasicMaterial({color: '#000', wireframe: true})
);
scene.add(box);
camera.position.z = 200;
render();
function render() {
requestAnimationFrame(render);
if (canvas.width !== canvas.clientWidth || canvas.height !== canvas.clientHeight) {
renderer.setSize(canvas.clientWidth, canvas.clientHeight, false);
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
var dt = clock.getDelta();
box.rotation.x += dt * 2 * Math.PI / 5;
box.rotation.y += dt * 2 * Math.PI / 7;
box.rotation.z += dt * 2 * Math.PI / 11;
renderer.render(scene, camera);
}
html, body, #canvas {
margin: 0;
padding: 0;
width: 100%;
height: 100%;
display: block;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r73/three.min.js"></script>
<canvas id="canvas"></canvas>
<script id="vertex-shader" type="x-shader/x-vertex">
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = vec4(position, 1.0);
}
</script>
<script id="fragment-shader" type="x-shader/x-fragment">
varying vec2 vUv;
void main() {
gl_FragColor = vec4(vUv, 0.0, 1.0);
}
</script>
Hey I have just seen this short video: https://vine.co/v/hxHz5H2Q07q and I am wondering how to achieve this scanning effect.
For start there are 2 groups needed: one holding cubes with material and a corresponding one with the same cubes but just wireframed, so it is possible to get these layers.
But how to make whe wired one appear in such "scanner" way? Is it via shaders or maybe there is some masking method in threejs for moving the mask across the rendered and displaying the given object linked with the mask?
Copied from the description of the original demo
This experiment was inspired by real-world projection mapping onto physical shapes. The field is created by random generation and merged into a single geometry. It is duplicated and the copy is rendered with a ShaderMaterial that looks at a uniform point and makes most pixels transparent other than those near the light point. Moving the point through the field creates the appearance of scanning.
So, make some geometry, draw it twice, once flat shaded, ones wireframe. For the wireframe version make a custom shader that takes a single point uniform. If the vertex (or pixel) is close to that point draw a color, if not draw transparent (or discard).
var camera = new THREE.PerspectiveCamera( 20, 1, 1, 10000 );
camera.position.z = 1800;
var scene = new THREE.Scene();
var light = new THREE.DirectionalLight( 0xffffff );
light.position.set( 1, 1, 1 );
scene.add( light );
var flatMaterial = new THREE.MeshLambertMaterial( { color: 0x606060, shading: THREE.FlatShading } );
var geometry = new THREE.IcosahedronGeometry( 200, 1 );
//var wireMaterial = new THREE.MeshBasicMaterial( { color: 0x00FF00, wireframe:true } );
var uniforms = {
color: { type: "c", value: new THREE.Color(0x00FF00) },
lightPos: { type: "v3", value: new THREE.Vector3(0, 0, 0) },
range: { type: "f", value: 150, },
};
var wireMaterial = new THREE.ShaderMaterial({
wireframe: true,
uniforms: uniforms,
attributes: {
},
vertexShader: document.getElementById('vertexshader').text,
fragmentShader: document.getElementById('fragmentshader').text,
depthTest: true,
transparent: true,
});
var mesh = new THREE.Mesh( geometry, flatMaterial );
scene.add( mesh );
var mesh = new THREE.Mesh( geometry, wireMaterial );
scene.add( mesh );
renderer = new THREE.WebGLRenderer( { antialias: true } );
document.body.appendChild( renderer.domElement );
function resize() {
var canvas = renderer.context.canvas
var width = canvas.clientWidth;
var height = canvas.clientHeight;
if (width != canvas.width || height != canvas.height) {
renderer.setSize( width, height, false );
camera.aspect = width / height;
camera.updateProjectionMatrix();
}
}
function render() {
resize();
var time = Date.now() * 0.001;
uniforms.lightPos.value.x = Math.sin(time) * 200;
uniforms.lightPos.value.y = Math.cos(time) * 200;
camera.lookAt( scene.position );
renderer.render( scene, camera );
requestAnimationFrame( render );
}
render();
html, body {
margin: 0px;
width: 100%;
height: 100%;
overflow: hidden;
}
canvas {
width: 100%;
height: 100%;
}
<script src="//cdnjs.cloudflare.com/ajax/libs/three.js/r70/three.min.js"></script>
<body>
</body>
<script type="not-js" id="vertexshader">
varying vec4 v_position;
void main() {
vec4 pos = vec4(position, 1.0);
gl_Position = projectionMatrix * modelViewMatrix * pos;
v_position = modelMatrix * pos;
}
</script>
<script type="not-js" id="fragmentshader">
uniform vec3 color;
uniform vec3 lightPos;
uniform float range;
varying vec4 v_position;
void main() {
float distanceToLight = distance(lightPos, v_position.xyz);
gl_FragColor = mix(vec4(color, 1), vec4(0,0,0,0), step(range, distanceToLight));
}
</script>
You have a bunch of parallel rays maybe 100 or however many you want, that shoot towards your scene and intersect it and at the same time move up the y direction.
Scenario:
In my scene I implemented a vertex shader that positions a plane mesh on the xz-axis at the position of the camera.
So if the camera moves, the plane mesh moves with it. This leads to the visual effect that, while moving the camera, the plane mesh seems to stay fixed in place. This seems to work correctly.
Problem:
If I move the camera (and therefore the plane mesh) to a certain extend, the mesh suddenly disappears.
I realized that there seems to be a relationship between the disappearance and the size of the plane, i.e. the larger the plane, the more I can move the camera before the plane mesh disappears.
Also, in my test scene the plane mesh only disappears when moving on the negative x-axis, positive x-axis or negative z-axis. It does NOT disappear when moving on the positive z-axis.
I assume it has something to do with some kind of clipping, but may be wrong. Recomputing the bounding box of the plane mesh had no effect.
Any ideas?
Cheers
Fiddle:
I created a fiddle that shows the problem: http://jsfiddle.net/p8wZ6/10/
In the fiddle I added an additional box mesh to better visualize that the camera actually moves.
- To change the axis the camera moves on (negative z-axis by default) (un-)comment the appropriate code line in the tick method.
- To change the size of the plane change the size value in the createPlane method.
Sourcecode Shader:
<script id="vertexShader" type="x-shader/x-vertex">
void main() {
vec4 pos = vec4( position, 1.0 );
vec4 wPos = modelMatrix * pos;
wPos.x += cameraPosition.x;
wPos.z += cameraPosition.z;
// standard
// vec4 pPos = projectionMatrix * modelViewMatrix * pos;
// keep fixed
vec4 pPos = projectionMatrix * viewMatrix * wPos;
gl_Position = pPos;
}
</script>
<script id="fragmentShader" type="x-shader/x-fragment">
void main() {
gl_FragColor.rgb = vec3(0.7, 0.7, 0.7);
gl_FragColor.a = 1.0;
}
</script>
Sourcecode JS:
var scene;
var camera;
var light;
var renderer;
var controls;
var onTick;
var planeMesh;
var boxMesh;
var heightmap;
var clock;
function createPlane(){
// disappearance seems related to size of geometry.
// the larger the longer it takes until disappearance.
var size = 20;
var geom = new THREE.PlaneGeometry(size, size, 20, 20);
return geom;
}
function createBox(){
var geom = new THREE.CubeGeometry(2, 2, 4);
return geom;
}
function createMesh(){
// plane
var geom = createPlane();
var shaderMaterial = new THREE.ShaderMaterial({
vertexShader: document.getElementById( 'vertexShader' ).textContent,
fragmentShader: document.getElementById( 'fragmentShader' ).textContent,
side: THREE.DoubleSide,
wireframe: true
});
planeMesh = new THREE.Mesh(geom, shaderMaterial);
var axis = new THREE.AxisHelper(4);
planeMesh.rotation.x = -90 * (Math.PI / 180);
planeMesh.add(axis);
scene.add(planeMesh);
// box
geom = createBox();
var material = new THREE.MeshBasicMaterial( {
color: 0xff00ff,
});
boxMesh = new THREE.Mesh(geom, material);
boxMesh.position.x = 5;
boxMesh.position.z = -15;
axis = new THREE.AxisHelper(4);
boxMesh.add(axis);
scene.add(boxMesh);
}
function startRendering(){
onTick();
};
function onTick(){
// move camera
// causes disappearance
// neg. z
camera.position.z -= .1;
// pos. x
// camera.position.x += .1;
// neg. x
// camera.position.x -= .1;
// causes no disappearance
// pos. z
// camera.position.z += .1;
requestAnimationFrame(onTick);
//controls.update(clock.getDelta());
renderer.render(scene, camera);
}
function init(){
renderer = new THREE.WebGLRenderer();
renderer.setClearColor( 0xffffff, 1 );
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
scene = new THREE.Scene();
scene.add(new THREE.AxisHelper(4));
camera = new THREE.PerspectiveCamera(65, window.innerWidth / window.innerHeight, 0.1, 1000);
camera.position.set(0, 1, 0);
light = new THREE.DirectionalLight(0xffffff, 1);
light.shadowCameraVisible = true;
light.position.set(0, 0, 100);
scene.add(light);
//clock = new THREE.Clock();
//controls = new THREE.FirstPersonControls(camera);
//controls.movementSpeed = 20;
//controls.lookSpeed = .1;
}
init();
createMesh();
startRendering();
You have a fundamental misunderstanding.
You are moving the camera in the CPU. You are moving the vertices of the plane in the GPU.
The camera's frustum calculation knows nothing about the vertex displacements in the vertex shader.
As a work-around, you can set
planeMesh.frustumCulled = false;
A better solution is to just add the plane as a child of the camera, and omit vertex displacements.
planeMesh.position.set( 0, -1, 0 );
camera.add( planeMesh );
scene.add( camera );
You must add the camera to the scene graph it you use the second approach.
three.js r.65
When you define your camera in r73 the last two parameters allow you to specify your camera's near and far z clipping distance.
Taken from this link: http://threejs.org/docs/#Manual/Introduction/Creating_a_scene
var camera =
new THREE.PerspectiveCamera( 75, window.innerWidth / window.innerHeight, 0.1, 1000 );
The third parameter of Three.PerspectiveCamera defines the camera's near clipping distance and the fourth parameter defines the camera's far clipping distance.