This is my first time playing around with Vertex Shaders in a WebGL context. I want to texture a primitive with a video, but instead of just mapping the video into the surface I;m trying to translate the luma of the video into vertex displacement. This is kind of like the Rutt Etra, but in a digital format. A bright pixel should push the vertex forward, while a darker pixel does the inverse. Can anyone tell me what I'm doing wrong? I can't find a reference for this error.
When compiling my code, I get the following when using sampler2D and texture2D:
Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.65 Safari/537.36 | WebGL 1.0 (OpenGL ES 2.0 Chromium) | WebKit | WebKit WebGL | WebGL GLSL ES 1.0 (OpenGL ES GLSL ES 1.0 Chromium) Three.js:264
ERROR: 0:57: 'ftransform' : no matching overloaded function found
ERROR: 0:57: 'assign' : cannot convert from 'const mediump float' to 'Position highp 4-component vector of float'
ERROR: 0:60: 'gl_TextureMatrix' : undeclared identifier
ERROR: 0:60: 'gl_TextureMatrix' : left of '[' is not of type array, matrix, or vector
ERROR: 0:60: 'gl_MultiTexCoord0' : undeclared identifier
Three.js:257
<!doctype html>
<html>
<head>
<title>boiler plate for three.js</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<script src="vendor/three.js/Three.js"></script>
<script src="vendor/three.js/Detector.js"></script>
<script src="vendor/three.js/Stats.js"></script>
<script src="vendor/threex/THREEx.screenshot.js"></script>
<script src="vendor/threex/THREEx.FullScreen.js"></script>
<script src="vendor/threex/THREEx.WindowResize.js"></script>
<script src="vendor/threex.dragpancontrols.js"></script>
<script src="vendor/headtrackr.js"></script>
<style>
body {
overflow : hidden;
padding : 0;
margin : 0;
color : #222;
background-color: #BBB;
font-family : arial;
font-size : 100%;
}
#info .top {
position : absolute;
top : 0px;
width : 100%;
padding : 5px;
text-align : center;
}
#info a {
color : #66F;
text-decoration : none;
}
#info a:hover {
text-decoration : underline;
}
#info .bottom {
position : absolute;
bottom : 0px;
right : 5px;
padding : 5px;
}
</style>
</head>
<body>
<!-- three.js container -->
<div id="container"></div>
<!-- info on screen display -->
<div id="info">
<!--<div class="top">
LearningThree.js
boiler plate for
three.js
</div>-->
<div class="bottom" id="inlineDoc" >
- <i>p</i> for screenshot
</div>
</div>
<canvas id="compare" width="320" height="240" style="display:none"></canvas>
<video id="vid" autoplay loop></video>
<script type="x-shader/x-vertex" id="vertexShader">
varying vec2 texcoord0;
void main()
{
// perform standard transform on vertex
gl_Position = ftransform();
// transform texcoords
texcoord0 = vec2(gl_TextureMatrix[0] * gl_MultiTexCoord0);
}
</script>
<script type="x-shader/x-vertex" id="fragmentShader">
varying vec2 texcoord0;
uniform sampler2D tex0;
uniform vec2 imageSize;
uniform float coef;
const vec4 lumcoeff = vec4(0.299,0.587,0.114,0.);
void main (void)
{
vec4 pixel = texture2D(tex0, texcoord0);
float luma = dot(lumcoeff, pixel);
gl_FragColor = vec4((texcoord0.x / imageSize.x), luma, (texcoord0.y / imageSize.y) , 1.0);
}
</script>
<script type="text/javascript">
var stats, scene, renderer;
var camera, cameraControls;
var videoInput = document.getElementById('vid');
var canvasInput = document.getElementById('compare');
var projector = new THREE.Projector();
var gl;
var mesh,
cube,
attributes,
uniforms,
material,
materials;
var videoTexture = new THREE.Texture( videoInput );
if( !init() ) animate();
// init the scene
function init(){
if( Detector.webgl ){
renderer = new THREE.WebGLRenderer({
antialias : true, // to get smoother output
preserveDrawingBuffer : true // to allow screenshot
});
renderer.setClearColorHex( 0xBBBBBB, 1 );
// uncomment if webgl is required
//}else{
// Detector.addGetWebGLMessage();
// return true;
}else{
renderer = new THREE.CanvasRenderer();
gl=renderer;
}
renderer.setSize( window.innerWidth, window.innerHeight );
document.getElementById('container').appendChild(renderer.domElement);
// create a scene
scene = new THREE.Scene();
// put a camera in the scene
camera = new THREE.PerspectiveCamera( 23, window.innerWidth / window.innerHeight, 1, 100000 );
camera.position.z = 0;
scene.add( camera );
//
// // create a camera contol
// cameraControls = new THREEx.DragPanControls(camera)
// transparently support window resize
// THREEx.WindowResize.bind(renderer, camera);
// allow 'p' to make screenshot
THREEx.Screenshot.bindKey(renderer);
// allow 'f' to go fullscreen where this feature is supported
if( THREEx.FullScreen.available() ){
THREEx.FullScreen.bindKey();
document.getElementById('inlineDoc').innerHTML += "- <i>f</i> for fullscreen";
}
materials = new THREE.MeshLambertMaterial({
map : videoTexture
});
attributes = {};
uniforms = {
tex0: {type: 'mat2', value: materials},
imageSize: {type: 'f', value: []},
coef: {type: 'f', value: 1.0}
};
//Adding a directional light source to see anything..
var directionalLight = new THREE.DirectionalLight(0xffffff);
directionalLight.position.set(1, 1, 1).normalize();
scene.add(directionalLight);
// video styling
videoInput.style.position = 'absolute';
videoInput.style.top = '50px';
videoInput.style.zIndex = '100001';
videoInput.style.display = 'block';
// set up camera controller
headtrackr.controllers.three.realisticAbsoluteCameraControl(camera, 1, [0,0,0], new THREE.Vector3(0,0,0), {damping : 1.1});
var htracker = new headtrackr.Tracker();
htracker.init(videoInput, canvasInput);
htracker.start();
// var stats = new Stats();
// stats.domElement.style.position = 'absolute';
// stats.domElement.style.top = '0px';
// document.body.appendChild( stats.domElement );
document.addEventListener('headtrackrStatus',
function (event) {
if (event.status == "found") {
addCube();
}
}
);
}
// animation loop
function animate() {
// loop on request animation loop
// - it has to be at the begining of the function
// - see details at http://my.opera.com/emoller/blog/2011/12/20/requestanimationframe-for-smart-er-animating
requestAnimationFrame( animate );
// do the render
render();
// update stats
//stats.update();
}
function render() {
// convert matrix of every frame of video -> texture
uniforms.tex0 = materials;
uniforms.coef = 0.2;
uniforms.imageSize.x = window.innerWidth;
uniforms.imageSize.y = window.innerHeight;
// update camera controls
// cameraControls.update();
if( videoInput.readyState === videoInput.HAVE_ENOUGH_DATA ){
videoTexture.needsUpdate = true;
}
// actually render the scene
renderer.render( scene, camera );
}
function addCube(){
material = new THREE.ShaderMaterial({
uniforms: uniforms,
attributes: attributes,
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragmentShader').textContent,
transparent: true
});
//The cube
cube = new THREE.Mesh(new THREE.CubeGeometry(40, 30, 10, 1, 1, 1, material), new THREE.MeshFaceMaterial());
cube.overdraw = true;
scene.add(cube);
}
</script>
</body>
</html>
The primary problem here is that you are using the old GLSL reserved words that were intended for programmable / fixed-function interop. In OpenGL ES 2.0 things like gl_MultiTexCoord0 and gl_TextureMatrix [n] are not defined, because they completely removed the legacy fixed-function vertex array baggage that regular OpenGL has to deal with. These reserved words let you have matrix/vertex array state per-texture unit; they do not exist in OpenGL ES, this was their purpose in OpenGL.
To get around this, you have to use generic vertex attributes (e.g. attribute vec2 tex_st) instead of having a 1:1 mapping between texture coordinate pointers and texture units. Likewise, there is no texture matrix associated with each texture unit. To duplicate the functionality of texture matrices, you need to use matrix uniforms in your vertex/fragment shader.
To be honest, I cannot remember the last time I actually found it useful to have a separate texture matrix / texture coordinate pointer for each texture unit when using shaders... I often have 4 or 5 different textures and only need maybe 1 or 2 sets of texture coordinates. It is no big loss.
The kicker here is ftransform (...). This is intended to make it possible to write 1-line vertex shaders in OpenGL that behave the same way as the fixed-function pipeline. You must have copied and pasted a shader that was written for OpenGL 2.x or 3.x (compatibility). Explaining how to fix everything in this shader could be a real chore, you might have to learn more about GLSL before most of what I just wrote makes sense :-\
Related
i have a scene where i want to add a background, i have a png image (2k resolution), but when i try it on pc it is of the right size, on mobile is a lot "disproportionated"
My code is the following:
var texture = THREE.ImageUtils.loadTexture('img/texture.png');
And to add it as background is just this:
scene = new THREE.Scene();
scene.background = texture;
I've seen with some search that maybe i have to create a separate scene for the background, but i don't think it is the easiest solution, maybe there are a better solution for this?
(As always, sorry for my bad english)
You can try approaching this with THREE.ShaderMaterial
class MyBackgroundPlane extends THREE.Mesh{
constructor(){
super(
new THREE.PlaneBufferGeometry(2,2,1,1),
new THREE.ShaderMaterial({
uniforms:{
uTexture: { value: null },
uAspect: { value: 1 }
},
vertexShader: `
varying vec2 vUv;
uniform float uAspect;
void main(){
vUv = uv; //pass coordinates to screen
vUv.x *= uAspect; //scale the coordinates
gl_Position = vec4(position.xy, 1., 1.);
}
`,
fragmentShader:`
varying vec2 vUv;
uniform sampler2D uTexture;
void main(){
gl_FragColor = texture2D( uTexture, vUv );
}
`
})
)
this.frustumCulled = false
}
setAspect( aspect ){
this.material.uniforms.uAspect.value = aspect
}
setTexture( texture ){
this.material.uniforms.uTexture.value = texture
}
}
You kinda have to figure out what needs to happen when its portrait and when its landscape.
One approach could be to use uniform vec2 uScale; and then set the vertical and horizontal aspects differently depending on the orientation.
The same thing could be done with the scene graph by attaching a regular plane to a camera for example, and then managing it's scale.
As an alternative, you can use a CSS based background:
#background {
background-image: url('http://youring.com/test/img/texture.png');
position: fixed;
top: 0;
left: 0;
height: 100%;
width: 100%;
z-index: -1;
}
Just create your renderer like this so it's possible to see through the canvas:
renderer = new THREE.WebGLRenderer( { antialias: true, alpha: true } );
DEMO: https://jsfiddle.net/f2Lommf5/5052/
I need some help with webgl.
I have to open the mouth of a face model (Lee Perry Smith) from code, but I don't know how to identify the correct vertexes to do it.
For my task I'm not allowed to use three.js.
I've tried to get the indexes from blender but I had no luck for some reason (it's like the identified vertexes in blender do not correspond to the son that I generated for webgl.
Does someone have any idea..?
More infos:
I've used this snippet in blender to get the indices: http://blenderscripting.blogspot.it/2011/07/getting-index-of-currently-selected.html
then went into my javascript and used this function to edit the vertexes coordinates (just to see if they were right, even though this is not the real transformation wanted):
function move_vertex(indices,x,y,z){
vertex = headObject.vertices[0];
indices.forEach(function(index){
vertex[3*index] += x;
vertex[3*index+1]+=y;
vertex[3*index+2]+=z;
});
gl.bindBuffer(gl.ARRAY_BUFFER,headObject.modelVertexBuffer[0]);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, new Float32Array(vertex));
gl.bindBuffer(gl.ARRAY_BUFFER,null);
}
There are basically unlimited ways to do this . Which one fits your situation I have no idea.
One would be to use a skinning system. Attach the mouth vertices to bones and move the bones.
Another would be to use morph targets. Basically save the mesh once with mouth open and once with mouth closed. Load both meshes in webgl, pass both to your shader and lerp between them
attribute vec4 position1; // data from mouth closed model
attribute vec4 position2; // data from mouth open model
uniform float mixAmount;
uniform mat4 worldViewProjection;
...
// compute the position to use based on the mixAmount
// 0 = close mouth
// 1 = open mouth
// 0.5 = 50% between open and closed mouth etc..
vec4 position = mix(position1, position2, mixAmount);
// use the result in the standard way
gl_Position = worldViewProjection * position;
You'd do a similar mix for normals though you'd want to normalize the result.
Most modeling packages support using morph targets inside the package. It up to the file format and the exporter whether or not that data gets exported. The easy way to just hack something together would just be to export the face twice and load 2 files with the code you have.
Another might be to use vertex colors. In your modeling program color the lip vertices a distinct color then find those vertices by color in your code.
Another would be to assign the lips a different material then use the material to find the vertices.
Some 3d modeling programs let you add meta data to vertices. That's basically a variation of the vertex colors method. You'd probably need to write your own exporter as few 3rd party formats support extra data. Even if the format could theoretically support extra data most exporters don't export it.
Similarly some 3d modeling programs let you add vertices to selections/clusters/groups which you can then reference to find the lips. Again this method probably requires your own exporter as most format don't support this data
One other really hacky way but will get the job done in a pinch. Select the lip vertices and move them 1000 units to the right. Then in your program you can find all the vertices too far to the right and subtract 1000 units from each one to put them back where they originally would have been. This might mess up your normals but you can recompute normals after.
Yet another would be to use the data you have and program an interface to highlight each vertex one at a time, write down which vertices are the mouth.
For example put a <input type="number"> on the screen. Based on the number do something with that vertex. Set a vertex color or tweak it's position, something you can do to see it. Then write down which vertices are the mouth. If you're lucky they're in some range so you only have to write down the first and last ones.
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector("canvas").getContext("webgl");
const vs = `
attribute vec4 a_position;
attribute vec4 a_normal;
uniform mat4 u_matrix;
varying vec4 v_color;
void main() {
// Multiply the position by the matrix.
gl_Position = u_matrix * a_position;
// Pass the normal as a color to the fragment shader.
v_color = a_normal * .5 + .5;
}
`;
const fs = `
precision mediump float;
// Passed in from the vertex shader.
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
`;
// Yes, this sample is using TWGL (https://twgljs.org).
// You should be able to tell what it's doing from the names
// of the functions and be able to easily translate that to raw WebGL
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
a_position: HeadData.positions,
a_normal: HeadData.normals,
});
const numVertices = bufferInfo.numElements;
let vertexId = 0; // id of vertex we're inspecting
let newVertexId = 251; // id of vertex we want to inspect
// these are normals and get converted to colors in the shader
const black = new Float32Array([-1, -1, -1]);
const red = new Float32Array([ 1, -1, -1]);
const white = new Float32Array([ 1, 1, 1]);
const colors = [
black,
red,
white,
];
const numElem = document.querySelector("#number");
numElem.textContent = newVertexId;
document.querySelector("#prev").addEventListener('click', e => {
newVertexId = (newVertexId + numVertices - 1) % numVertices;
numElem.textContent = newVertexId;
});
document.querySelector("#next").addEventListener('click', e => {
newVertexId = (newVertexId + 1) % numVertices;
numElem.textContent = newVertexId;
});
let frameCount = 0;
function render(time) {
++frameCount;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
// restore old data
// for what's in bufferInfo see
// http://twgljs.org/docs/module-twgl.html#.BufferInfo
const origData = new Float32Array(
HeadData.normals.slice(vertexId * 3, (vertexId + 3) * 3));
const oldOffset = vertexId * 3 * 4; // 4 bytes per float
gl.bindBuffer(gl.ARRAY_BUFFER, bufferInfo.attribs.a_normal.buffer);
gl.bufferSubData(gl.ARRAY_BUFFER, oldOffset, origData);
// set new vertex to a color
const newOffset = newVertexId * 3 * 4; // 4 bytes per float
gl.bufferSubData(
gl.ARRAY_BUFFER,
newOffset,
colors[(frameCount / 3 | 0) % colors.length]);
vertexId = newVertexId;
const fov = 45 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.1;
const zFar = 50;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [0, 0, 25];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projection, view);
const world = m4.identity();
const worldViewProjection = m4.multiply(viewProjection, world);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_matrix: worldViewProjection,
});
gl.drawArrays(gl.TRIANGLES, 0, numVertices);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
.ui {
position: absolute;
left: 1em;
top: 1em;
background: rgba(0,0,0,0.9);
padding: 1em;
font-size: large;
color: white;
font-family: monospace;
}
#number {
display: inline-block;
text-align: center;
}
<script src="https://twgljs.org/dist/2.x/twgl-full.min.js"></script>
<script src="https://webglfundamentals.org/webgl/resources/headdata.js"></script>
<canvas></canvas>
<div class="ui">
<button id="prev">⬅</button>
<span>vert ndx:</span><span id="number"></span>
<button id="next">➡</button>
</div>
I want to get as best performance as possible with rendering simple textured shapes. The problem is with phong model it requires extra lighting (which involves calculations) + the colors are not like the one desired and needs some tweeking.
To simplify the case I've decided to use a simple flat shader, but some problems occur:
<script id="vertShader" type="shader">
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position,1.0);
}
</script>
<script id="fragShader" type="shader">
varying vec2 vUv;
uniform sampler2D material;
void main() {
gl_FragColor = texture2D(material, vUv);
}
</script>
Under certain camera angles some of the shelves dissapear (you can notice the darker places, and see through them), which does not occur using the phong material:
It happens with the shadow texture put inside each shelf. It's a textured cube with a shadow texture put inside each space (don't ask me why, this is just a task I got:))
I don't know what may be causing this. Maybe the loading?
Im using the standard obj loader and adding textures. Obj loader sets the material to phong and im switching it to custom shader like this:
var objLoader = new THREE.OBJLoader( manager );
objLoader.load( obj, function ( model ) {
elements[name] = model;
console.log('loaded ', name);
var img = THREE.ImageUtils.loadTexture(mat);
elements[name].traverse( function ( child ) {
if ( child instanceof THREE.Mesh ) {
child.material = new THREE.ShaderMaterial( {
uniforms: {
color: {type: 'f', value: 0.0},
material: {type: 't', value: img}
},
fragmentShader: document.getElementById('fragShader').text,
vertexShader: document.getElementById('vertShader').text,
} );
}
});
any suggestions would be helpful
Every surface is drawn in one direction (clockwise or counter-clockwise). if you are showing a surface from the other side, it will "dissapear". I think this is the problem of your own shader. -> you should render them from both sides (-> worse performance) or calculate, from which side it should render.
To optimize the performance slightly you should use a standard material from THREE. You can use them without writing your own shader.
something like:
child.material = new THREE.MeshBasicMaterial({
side: THREE.DoubleSide,
color: 0x000000
// ...
});
i created a skybox-material with textures in an own project:
function getSkyboxMaterial() {
var faceMaterials = getSkyboxFaces();
var skyboxMaterial = new THREE.MeshFaceMaterial(faceMaterials);
return skyboxMaterial;
}
function getSkyboxFaces() {
var NUMBER_OF_FACES = 6, faces = [], texture, faceMaterial, texturePath, i;
for (i = 0; i < NUMBER_OF_FACES; i++) {
texturePath = IMAGE_PREFIX + DIRECTIONS[i] + IMAGE_SUFFIX;
texture = loadFlippedTexture( texturePath );
faceMaterial = getFaceMaterial( texture );
faces.push( faceMaterial );
}
return faces;
}
function loadFlippedTexture(texturePath) {
var texture = loadTexture(texturePath);
flipTexture(texture); // This is necessary, because the skybox-textures are mirrored.
return texture;
}
function loadTexture(path) {
return THREE.ImageUtils.loadTexture(path);
}
function flipTexture(texture) {
texture.repeat.set(-1, 1);
texture.offset.set(1, 0);
return texture;
}
function getFaceMaterial(texture) {
var faceMaterial = new THREE.MeshBasicMaterial({
map: texture,
side: THREE.DoubleSide
});
return faceMaterial;
}
I'm new to WebGL and I'm trying to create a black ring in the middle of this green circle without making additional circles. I believe I can do this by making the normal of those triangles go the other way but I'm not sure exactly how to do this. My friend suggested changing the texture coordinates but I don't really understand how this would help. Can anyone shine some light on these ideas and possible intuition?
_______HTML File__________
<!DOCTYPE html>
<html>
<head>
<script id="vertex-shader" type="x-shader/x-vertex">
attribute vec4 vPosition;
void
main()
{
gl_Position = vPosition;
}
</script>
<script id="fragment-shader" type="x-shader/x-fragment">
precision mediump float;
void
main()
{
gl_FragColor = vec4( 0.0, 1.0, 0.0, 1.0 );
}
</script>
<script type="text/javascript" src="../Common/webgl-utils.js"></script>
<script type="text/javascript" src="../Common/initShaders.js"></script>
<script type="text/javascript" src="../Common/MV.js"></script>
<script type="text/javascript" src="Circle.js"></script>
</head>
<body>
<canvas id="gl-canvas" width="512" height="512">
Oops ... your browser doesn't support the HTML5 canvas element
</canvas>
</body>
</html>
_____Javascript File______
var gl;
var points;
window.onload = function init()
{
var canvas = document.getElementById( "gl-canvas" );
gl = WebGLUtils.setupWebGL( canvas );
if ( !gl ) { alert( "WebGL isn't available" ); }
// The Vertices
var pi = 3.14159;
var x = 2*pi/100;
var y = 2*pi/100;
var r = 0.9;
points = [ vec2(0.0, 0.0) ]; //establish origin
//for loop to push points
for(var i = 0; i < 100; i++){
points.push(vec2(r*Math.cos(x*i), r*Math.sin(y*i)));
points.push(vec2(r*Math.cos(x*(i+1)), r*Math.sin(y*(i+1))));
}
//
// Configure WebGL
//
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.clearColor( 0.3, 0.3, 0.3, 1.0 );
// Load shaders and initialize attribute buffers
var program = initShaders( gl, "vertex-shader", "fragment-shader" );
gl.useProgram( program );
// Load the data into the GPU
var bufferId = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufferId );
gl.bufferData( gl.ARRAY_BUFFER, flatten(points), gl.STATIC_DRAW );
// Associate out shader variables with our data buffer
var vPosition = gl.getAttribLocation( program, "vPosition" );
gl.vertexAttribPointer( vPosition, 2, gl.FLOAT, false, 0, 0 );
gl.enableVertexAttribArray( vPosition );
render();
};
function render() {
gl.clear( gl.COLOR_BUFFER_BIT );
gl.drawArrays( gl.TRIANGLE_FAN, 0, points.length );
}
I assembled some part of your task as you requested. I tried to not change your code much, so you can understand all changes I have done. First small show:
Triangle with your code
Circle made out of 3 points
You made circle out of 100 points (vertices). Now you want to make another shape inside. It means use another 100 points, which is probably what you don't want to do. Instead of this, you would like to use normals. But from the point of view of shaders (which are responsible for drawing), normals, vertices and other things like texture coordinates are just data and you are the one who decides, if data means vertices, normals, texture coordinates or anything else.
If I understand good, you want to customize your object without adding too much additional data. I don't think normals or textures can help you.
There are few problems you will have to face with texture ...
First is, if circle will be too big (close to you), then it will be not that nice with just 100 points.
If circle will be too small (far from you), but there will be a lot circles, you will use too many points for nothing which will lower performance.
If you use texture for black ring inside, it will be fuzzy if you will be closer.
And if you use too large texture for a lot of small circles, it will again lower performance.
... and normals are used to do light reflection like this.
Way I think about the problem. You can define circle with just few params, radius and center. With webgl, you can draw only triangles (and points). But you can for example customize shader to draw inscribed circle in each triangle.
So I defined just radius and center:
var r = 0.9;
var middle = vec2(0.0, 0.0);
Then I generate 3 points of triangle around the circle (circle is inscribed circle of this new triangle):
function buildCircle(center, r) {
var points = [];
points.push(vec2((r * TRI_HEIGHT_MOD * Math.cos(0 * DEG_TO_RAD)) + center[0], (r * TRI_HEIGHT_MOD * Math.sin(0 * DEG_TO_RAD)) + center[1]));
points.push(vec2((r * TRI_HEIGHT_MOD * Math.cos(120 * DEG_TO_RAD)) + center[0], (r * TRI_HEIGHT_MOD * Math.sin(120 * DEG_TO_RAD) + center[1])));
points.push(vec2((r * TRI_HEIGHT_MOD * Math.cos(240 * DEG_TO_RAD)) + center[0], (r * TRI_HEIGHT_MOD * Math.sin(240 * DEG_TO_RAD)) + center[1]));
vertexPositions = points;
}
Then I pass middle, radius and triangle to my shader:
var vPosition = gl.getAttribLocation(program, "vPosition");
gl.vertexAttribPointer(vPosition, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(vPosition);
program.middle = gl.getUniformLocation(program, "middle");
gl.uniform2f(program.middle, middle[0], middle[1]);
program.r = gl.getUniformLocation(program, "r");
gl.uniform1f(program.r, r);
And then I just render it with same as you do, except I need to allow alpha drawing, because some parts of triangle will be invisible, so it will look as circle:
gl.blendFunc(gl.SRC_ALPHA, gl.ONE);
gl.enable(gl.BLEND);
gl.disable(gl.DEPTH_TEST);
Ok now shaders.
There are few things you really need to know to continue, so please read about it here: http://webglfundamentals.org/webgl/lessons/webgl-how-it-works.html
My vertex shader is same as yours, except I need to pass interpolated vertex position to fragment shader:
varying vec4 pos;
...
void main() {
pos = vPosition;
My fragment shader needs to do only one thing and it is to decide, if pixel is in the circle or not. Simple equation:
If the left side is smaller then the right side, then pixel is inside the circle. If not, then it is outside, so invisible:
float inside = pow(pos.r - middle.r, 2.0) + pow(pos.g - middle.g, 2.0);
if (inside < pow(r, 2.0)) {
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
} else {
gl_FragColor = vec4(0.0, 0.0, 0.0, 0.0);
}
End
So now you might know how to make a circle just from few points. You can use similar way to draw a ring inside. Then you can draw thousands of them in any distance and make them move. Program will be still fast and shapes will be as sharp as possible.
Just one last thing. Usually you dont simplify shapes like that, but sometimes you might. Good example is Bézier curve which might help you to do crazy sharp shapes with just few points. But it all matters what would you like to do. One technique can't solve all problems and you have to keep looking for more solutions.
EDIT 1: "What is var middle = vec2(0.0, 0.0)? I meam, vec2?"
There are 3 other scripts in this question that I replicated in my solution (in jsfiddle on the left: External Resources). It wasnt part of this question, but it was easy to find theirs origin:
<script type="text/javascript" src="../Common/webgl-utils.js"></script>
<script type="text/javascript" src="../Common/initShaders.js"></script>
<script type="text/javascript" src="../Common/MV.js"></script>
MV.js is some supply javascript with basic math... or algebraic constructs like vectors and matrices. vec2 is function that returns array with length 2. So var middle = [0.0, 0.0]; is exactly the same thing. This is not part of native javascript, so you need some library for it (you don't need it, but it is very useful). I use glmatrix.
On the other hand in shaders, vectors and matrices are native. Find it out on your own in chapter 4.1 Basic Types.
Have started working my way through a book called WebGL: Up and Running, which uses my preferred solution of THREE.js for rendering 3D objects in the browser. I'm trying to emulate the part of the book where he introduces shaders to make a dynamic, lit sun, but all I get when I try is a black sphere. Making the shader something more simple (like a Lampert shader) works fine, so it doesn't seem to be lighting - just the way the shader is implemented.
So what am I doing wrong? My javascript code to set up the scene is
var renderer, scene, camera, sunMesh, clock, uniforms;
$(function(){
//set scene size
var WIDTH = 1200,
HEIGHT = 800;
//set some camera attributes
var VIEW_ANGLE = 45,
ASPECT = WIDTH / HEIGHT,
NEAR = 0.1,
FAR = 10000;
// get the DOM element to attach to
// - assume we've got jQuery to hand
var $container = $('#container');
// create a WebGL renderer, camera
// and a scene
renderer = new THREE.WebGLRenderer();
camera = new THREE.PerspectiveCamera( VIEW_ANGLE,
ASPECT,
NEAR,
FAR );
scene = new THREE.Scene();
// the camera starts at 0,0,0 so pull it back
camera.position.z = 300;
// start the renderer
renderer.setSize(WIDTH, HEIGHT);
// attach the render-supplied DOM element
$container.append(renderer.domElement);
// Create a group to hold our sun mesh and light
var sunGroup = new THREE.Object3D();
var SUNMAP = "./images/lavatile.jpg";
var NOISEMAP = "./images/cloud.png";
uniforms = {
time: { type: "f", value: 1.0 },
texture1: { type: "t", value: 0, texture: THREE.ImageUtils.loadTexture( NOISEMAP ) },
texture2: { type: "t", value: 1, texture: THREE.ImageUtils.loadTexture( SUNMAP ) }
};
uniforms.texture1.texture.wrapS = uniforms.texture1.texture.wrapT = THREE.Repeat;
uniforms.texture2.texture.wrapS = uniforms.texture2.texture.wrapT = THREE.Repeat;
var material = new THREE.ShaderMaterial( {
uniforms: uniforms,
vertexShader: document.getElementById( 'vertexShader' ).textContent,
fragmentShader: document.getElementById( 'fragmentShader' ).textContent
} );
//USING THIS MATERIAL WORKS, SO ITS DEFINITELY THE SHADER!
//var material = new THREE.MeshLambertMaterial(
//{
// color: 0x5B92E5
//});
// Create our sun mesh
var geometry = new THREE.SphereGeometry(50, 64, 64);
sunMesh = new THREE.Mesh( geometry, material );
// Tuck away the uniforms so that we can animate them over time
// Set up a clock to drive the animation
clock = new THREE.Clock();
// Create a point light to show off our solar system
var light = new THREE.PointLight( 0xffffff );
light.position.set(0,0,100);
sunGroup.add(sunMesh);
sunGroup.add(light);
scene.add(sunGroup);
// and the camera
scene.add(camera);
renderer.render(scene, camera);
// setup an interval to loop the game loop
setInterval(gameloop, 50);
});
function gameloop() {
var delta = clock.getDelta();
uniforms.time.value += delta;
renderer.render(scene, camera);
}
I'll save the full HTML (listed at http://pastebin.com/PGLXkzkA) but that contains my shaders, which I've lifted from the source of the WebGL book. It may be that these are wrong, but I'm suspecting it is the way I'm using the shaders. From what I understand, I should see a sun with the textures changing based on a noise occlusion map, with some movement in the vertex position to make the sun pulse. As I say, I can see a sphere, but it is unlit and black.
Where have I gone wrong?
What a shame. This book appears to be already out-of-date.
Three.js is in alpha, and is changing rapidly. Learn from the three.js examples, instead. They are always current.
Check out the Migration Wiki for help in upgrading to the current version.
Some obvious errors in your code:
texture1: { type: "t", value: 0, texture: THREE.ImageUtils.loadTexture( NOISEMAP ) },
should be
texture1: { type: "t", value: THREE.ImageUtils.loadTexture( NOISEMAP ) },
THREE.Repeat should be THREE.RepeatWrapping, and uniforms.texture1.texture should be uniforms.texture1.value.
Sorry, you may have other issues, but I can only help you with the current version of three.js.
three.js r.54