Three.js: Passing array of textures to shaderMaterial - javascript

Short question: How can I pass a list of textures to shaders and access the nth texture within a fragment shader (where n is a value passed as a varying from the vertex shader)?
Longer question: I'm working on a Three.js scene that represents multiple images. Each image uses one of multiple textures, and each texture is an atlas containing several thumbnails. I'm working on implementing custom shaderMaterial to optimize performance, but am confused on how to use multiple textures in the shaders.
My goal is to pass a list of textures and a number that represents the number of vertices per texture so that I can identify the texture that should be used for each image's vertices/pixels. I thought I could accomplish this by passing the following data:
// Create a texture loader so we can load our image file
var loader = new THREE.TextureLoader();
// specify the url to the texture
var catUrl = 'https://s3.amazonaws.com/duhaime/blog/tsne-webgl/assets/cat.jpg';
var dogUrl = 'https://s3.amazonaws.com/duhaime/blog/tsne-webgl/assets/dog.jpg';
var material = new THREE.ShaderMaterial({
uniforms: {
verticesPerTexture: new Float32Array([4.0]), // count of vertices per texture
textures: {
type: 'tv', // type for texture array
value: [loader.load(catUrl), loader.load(dogUrl)],
}
},
vertexShader: document.getElementById('vertex-shader').textContent,
fragmentShader: document.getElementById('fragment-shader').textContent
});
However, if I do this, the vertex shader can't seem to use the uniforms to tell the fragment shader which texture it should use, as vertex shaders evidently can't pass sampler2d objects as varyings to the fragment shader. How can I pass a list of textures to the shaders?
Full code (which doesn't successfully pass a list of textures):
/**
* Generate a scene object with a background color
**/
function getScene() {
var scene = new THREE.Scene();
scene.background = new THREE.Color(0xffffff);
return scene;
}
/**
* Generate the camera to be used in the scene. Camera args:
* [0] field of view: identifies the portion of the scene
* visible at any time (in degrees)
* [1] aspect ratio: identifies the aspect ratio of the
* scene in width/height
* [2] near clipping plane: objects closer than the near
* clipping plane are culled from the scene
* [3] far clipping plane: objects farther than the far
* clipping plane are culled from the scene
**/
function getCamera() {
var aspectRatio = window.innerWidth / window.innerHeight;
var camera = new THREE.PerspectiveCamera(75, aspectRatio, 0.1, 1000);
camera.position.set(0, 1, 10);
return camera;
}
/**
* Generate the renderer to be used in the scene
**/
function getRenderer() {
// Create the canvas with a renderer
var renderer = new THREE.WebGLRenderer({antialias: true});
// Add support for retina displays
renderer.setPixelRatio(window.devicePixelRatio);
// Specify the size of the canvas
renderer.setSize(window.innerWidth, window.innerHeight);
// Add the canvas to the DOM
document.body.appendChild(renderer.domElement);
return renderer;
}
/**
* Generate the controls to be used in the scene
* #param {obj} camera: the three.js camera for the scene
* #param {obj} renderer: the three.js renderer for the scene
**/
function getControls(camera, renderer) {
var controls = new THREE.TrackballControls(camera, renderer.domElement);
controls.zoomSpeed = 0.4;
controls.panSpeed = 0.4;
return controls;
}
/**
* Load image
**/
function loadImage() {
var geometry = new THREE.BufferGeometry();
/*
Now we need to push some vertices into that geometry to identify the coordinates the geometry should cover
*/
// Identify the image size
var imageSize = {width: 10, height: 7.5};
// Identify the x, y, z coords where the image should be placed
var coords = {x: -5, y: -3.75, z: 0};
// Add one vertex for each corner of the image, using the
// following order: lower left, lower right, upper right, upper left
var vertices = new Float32Array([
coords.x, coords.y, coords.z, // bottom left
coords.x+imageSize.width, coords.y, coords.z, // bottom right
coords.x+imageSize.width, coords.y+imageSize.height, coords.z, // upper right
coords.x, coords.y+imageSize.height, coords.z, // upper left
])
// set the uvs for this box; these identify the following corners:
// lower-left, lower-right, upper-right, upper-left
var uvs = new Float32Array([
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
])
// store the texture index of each object to be rendered
var textureIndices = new Float32Array([0.0, 0.0, 0.0, 0.0]);
// indices = sequence of index positions in `vertices` to use as vertices
// we make two triangles but only use 4 distinct vertices in the object
// the second argument to THREE.BufferAttribute is the number of elements
// in the first argument per vertex
geometry.setIndex([0,1,2, 2,3,0])
geometry.addAttribute('position', new THREE.BufferAttribute(vertices, 3));
geometry.addAttribute('uv', new THREE.BufferAttribute(uvs, 2));
// Create a texture loader so we can load our image file
var loader = new THREE.TextureLoader();
// specify the url to the texture
var catUrl = 'https://s3.amazonaws.com/duhaime/blog/tsne-webgl/assets/cat.jpg';
var dogUrl = 'https://s3.amazonaws.com/duhaime/blog/tsne-webgl/assets/dog.jpg';
// specify custom uniforms and attributes for shaders
// Uniform types: https://github.com/mrdoob/three.js/wiki/Uniforms-types
var material = new THREE.ShaderMaterial({
uniforms: {
verticesPerTexture: new Float32Array([4.0]), // store the count of vertices per texture
cat_texture: {
type: 't',
value: loader.load(catUrl),
},
dog_texture: {
type: 't',
value: loader.load(dogUrl),
},
textures: {
type: 'tv', // type for texture array
value: [loader.load(catUrl), loader.load(dogUrl)],
}
},
vertexShader: document.getElementById('vertex-shader').textContent,
fragmentShader: document.getElementById('fragment-shader').textContent
});
// Combine our image geometry and material into a mesh
var mesh = new THREE.Mesh(geometry, material);
// Set the position of the image mesh in the x,y,z dimensions
mesh.position.set(0,0,0)
// Add the image to the scene
scene.add(mesh);
}
/**
* Render!
**/
function render() {
requestAnimationFrame(render);
renderer.render(scene, camera);
controls.update();
};
var scene = getScene();
var camera = getCamera();
var renderer = getRenderer();
var controls = getControls(camera, renderer);
loadImage();
render();
html, body { width: 100%; height: 100%; background: #000; }
body { margin: 0; overflow: hidden; }
canvas { width: 100%; height: 100%; }
<script src='https://cdnjs.cloudflare.com/ajax/libs/three.js/92/three.min.js'></script>
<script src='https://threejs.org/examples/js/controls/TrackballControls.js'></script>
<script type='x-shader/x-vertex' id='vertex-shader'>
/**
* The vertex shader's main() function must define `gl_Position`,
* which describes the position of each vertex in the space.
*
* To do so, we can use the following variables defined by Three.js:
*
* uniform mat4 modelViewMatrix - combines:
* model matrix: maps a point's local coordinate space into world space
* view matrix: maps world space into camera space
*
* uniform mat4 projectionMatrix - maps camera space into screen space
*
* attribute vec3 position - sets the position of each vertex
*
* attribute vec2 uv - determines the relationship between vertices and textures
*
* `uniforms` are constant across all vertices
*
* `attributes` can vary from vertex to vertex and are defined as arrays
* with length equal to the number of vertices. Each index in the array
* is an attribute for the corresponding vertex
*
* `varyings` are values passed from the vertex to the fragment shader
*
* Specifying attributes that are not passed to the vertex shader will not pevent shader compiling
**/
// declare uniform vals
uniform float verticesPerTexture; // store the vertices per texture
// declare variables to pass to fragment shaders
varying vec2 vUv; // pass the uv coordinates of each vertex to the frag shader
varying float textureIndex; // pass the texture idx
// initialize counters
float vertexIdx = 0.0; // stores the index position of the current vertex
float textureIdx = 1.0; // store the index position of the current texture
void main() {
// keep track of which texture each vertex belongs to
vertexIdx = vertexIdx + 1.0;
if (vertexIdx == verticesPerTexture) {
textureIdx = textureIdx + 1.0;
vertexIdx = 0.0;
}
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script type='x-shader/x-fragment' id='fragment-shader'>
/**
* The fragment shader's main() function must define `gl_FragColor`,
* which describes the pixel color of each pixel on the screen.
*
* To do so, we can use uniforms passed into the shader and varyings
* passed from the vertex shader
*
* Attempting to read a varying not generated by the vertex shader will
* throw a warning but won't prevent shader compiling
*
* Each attribute must contain n_vertices * n_components, where n_components
* is the length of the given datatype (e.g. vec2 n_components = 2;
* float n_components = 1)
**/
precision highp float; // set float precision (optional)
varying vec2 vUv; // identify the uv values as a varying attribute
varying float textureIndex; // identify the texture indices as a varying attribute
uniform sampler2D cat_texture; // identify the texture as a uniform argument
uniform sampler2D dog_texture; // identify the texture as a uniform argument
//uniform sampler2D textures;
// TODO pluck out textures[textureIndex];
//uniform sampler2D textures[int(textureIndex)];
void main() {
int textureIdx = int(textureIndex);
// float point arithmetic prevents strict equality checking
if ( (textureIndex - 1.0) < 0.1 ) {
gl_FragColor = texture2D(cat_texture, vUv);
} else {
gl_FragColor = texture2D(dog_texture, vUv);
}
}
</script>

Having slept on it, here's another method you can try, more akin to how you'd do this with built-in materials:
function createMaterial ( texture ) {
return new ShaderMaterial({
uniforms: {
texture: { value: texture }
}
})
}
var mat1 = createMaterial( dogTexture );
var mat2 = createMaterial( catTexture );
geometry.faces[ 0 ].materialIndex = 0;
geometry.faces[ 1 ].materialIndex = 0;
geometry.faces[ 2 ].materialIndex = 1;
geometry.faces[ 3 ].materialIndex = 1;
var mesh = new Mesh( geometry, [ mat1, mat2 ] );

You’ve written the vertex shader as if main is a for loop and it will iterate through all the vertices and update vertexIdx and textureIdx as it goes along, but that’s not how shaders work. Shaders run in parallel, processing every vertex at the same time. So you can’t share what the shader computes about one vertex with another vertex.
Use an attribute on the geometry instead:
geometry.addAttribute( 'texIndex', new THREE.BufferAttribute( [ 0, 0, 0, 0, 1, 1, 1, 1 ], 1 ) )
I’m getting a little out of my depth here but I think you then pass it through the vertex shader to a varying:
attribute int texIndex;
varying int vTexIndex;
void main () { vTexIndex = texIndex; }
Finally, in the fragment shader:
varying int vTexIndex;
uniform sampler2D textures[ 2 ];
...
sampler2D tex = textures[ vTexIndex ];

Related

Cloud point shader with assigned colors map from model

I'm working on a project using threejs to make a 3D environment to navigate in space.I integrated my environment as GLTF object and instanced a sphere on its surface of it.Now I would use the texture to assign colors to each instance. I think using a shader would be the most performant method!
Does someone have a clue on how to make it?
here is an image of the concept i would recreate
enter image description here
here is the basic point were I'm stuck
// create the model that the points will be placed on
const model = new THREE.Mesh(
new THREE.BoxGeometry(1, 1, 1),
new THREE.MeshBasicMaterial({
color: 0x00ff00
})
);
scene.add(model);
// create the instanced geometry to hold the points
const geometry = new THREE.InstancedBufferGeometry();
// set the attributes for the points (such as position, color, etc.)
const positions = [];
const colors = [];
const modelViewMatrices = [];
const projectionMatrices = [];
for (let i = 0; i < 100; i++) {
positions.push(Math.random() * 2 - 1, Math.random() * 2 - 1, Math.random() * 2 - 1);
colors.push(Math.random(), Math.random(), Math.random());
modelViewMatrices.push(camera.matrixWorldInverse.elements);
projectionMatrices.push(camera.projectionMatrix.elements);
}
const positionAttribute = new THREE.Float32BufferAttribute(positions, 3);
geometry.setAttribute('position', positionAttribute);
const colorAttribute = new THREE.Float32BufferAttribute(colors, 3);
geometry.setAttribute('color', colorAttribute);
const modelViewMatrixAttribute = new THREE.InstancedBufferAttribute(new Float32Array(modelViewMatrices.flat()), 4);
geometry.setAttribute('modelViewMatrix', modelViewMatrixAttribute);
const projectionMatrixAttribute = new THREE.InstancedBufferAttribute(new Float32Array(projectionMatrices.flat()), 4);
geometry.setAttribute('projectionMatrix', projectionMatrixAttribute);
// create the material for the points,using the modelViewMatrix and projectionMatrix attributes in the shader
const material = new THREE.ShaderMaterial({
uniforms: {},
attributes: {
modelViewMatrix: { value: null },
projectionMatrix: { value: null }
},
vertexShader: `
attribute vec3 pointPosition;
attribute vec3 color;
attribute mat4 modelViewMatrix;
attribute mat4 projectionMatrix;
varying vec3 vColor;
void main() {
vColor = color;
gl_Position = projectionMatrix * modelViewMatrix * vec4(pointPosition, 1.0);
}
`,
fragmentShader: `
varying vec3 vColor;
void main() {
gl_FragColor = vec4(vColor, 1.0);
}
`
});
// create the mesh to hold the points and add it to the scene
const mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
// set the number of instances (points) to render
geometry.setDrawRange(positions.length / 3);
// animate the scene
const animate = function () {
requestAnimationFrame(animate);
// update the modelViewMatrix and projectionMatrix uniforms
material.uniforms.modelViewMatrix.value = camera.matrixWorldInverse;
material.uniforms.projectionMatrix.value = camera.projectionMatrix;
renderer.render(scene, camera);
};
animate();

How can I delete a specific object in Webgl (Without library)

I'm studying webgl.
Now I spray the food to a random location on the canvas, and when the mouse pointer and the food collide, I try to delete the food.
(The collision implementation of the mouse cursor is another issue, but it is not necessary now.)
However, no matter how many times I looked for it, I could not find a way to erase a specific object even if I explained how to draw it. Is there a way to delete only certain objects from canvas without a library?
The full text of my code is as follows.
var gl;
var points;
window.onload = function init()
{
var canvas = document.getElementById( "gl-canvas" );
gl = WebGLUtils.setupWebGL( canvas );
if ( !gl ) { alert( "WebGL isn't available" ); }
// Four Vertices
var vertices = [
vec2( -0.5, -0.5 ),
vec2( -0.5, 0.5 ),
vec2( 0.5, 0.5 ),
vec2( 0.5, -0.5)
];
//
// Configure WebGL
//
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
// Load shaders and initialize attribute buffers
var program = initShaders( gl, "vertex-shader", "fragment-shader" );
gl.useProgram( program );
// Load the data into the GPU
var bufferId = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufferId );
//gl.bufferData( gl.ARRAY_BUFFER, flatten(vertices), gl.STATIC_DRAW );
// Associate out shader variables with our data buffer
var foodX, foodY;
var foodSize = 20;
var foodNumber = 50;
var vPosition = gl.getAttribLocation( program, "vPosition" );
// Tell the attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer( vPosition, size, type, normalize, stride, offset);
gl.enableVertexAttribArray( vPosition );
// we added a uniform called vResolution.
var vResolution = gl.getUniformLocation(program, "vResolution");
var fColor = gl.getUniformLocation(program, "fColor");
// set the resolution
gl.uniform2f(vResolution, gl.canvas.width, gl.canvas.height);
// draw 50 random rectangles in random colors
while (foodNumber > 0) {
// Setup a random rectangle
// This will write to positionBuffer because
// its the last thing we bound on the ARRAY_BUFFER
// bind point
//food 좌표는 canvas width와 height 사이에 있도록 하며, canvas 밖으로 빠져나가지 않도록 조절한다.
foodX = randomInt(canvas.width - foodSize);
foodY = randomInt(canvas.height-foodSize);
setRectangle(gl, foodX, foodY, foodSize, foodSize);
foodNumber = foodNumber - 1;
// Set a random color.
gl.uniform4f(fColor, Math.random(), Math.random(), Math.random(), 1);
// Draw the rectangle.
var primitiveType = gl.TRIANGLES;
var offset = 0;
var count = 6;
gl.drawArrays(primitiveType, offset, count);
}
};
// Returns a random integer from 0 to range - 1.
function randomInt(range) {
return Math.floor(Math.random() * range);
}
// Fills the buffer with the values that define a rectangle.
function setRectangle(gl, x, y, width, height) {
var x1 = x;
var x2 = x + width;
var y1 = y;
var y2 = y + height;
// NOTE: gl.bufferData(gl.ARRAY_BUFFER, ...) will affect
// whatever buffer is bound to the `ARRAY_BUFFER` bind point
// but so far we only have one buffer. If we had more than one
// buffer we'd want to bind that buffer to `ARRAY_BUFFER` first.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
x1, y1,
x2, y1,
x1, y2,
x1, y2,
x2, y1,
x2, y2]), gl.STATIC_DRAW);
}
function pop(bufferName){
gl.deleteBuffer(bufferName)
}
<!DOCTYPE html>
<html>
<head>
<script id="vertex-shader" type="x-shader/x-vertex">
//attribute vec4 vPosition;
attribute vec2 vPosition;
uniform vec2 vResolution;
void
main()
{
// convert the position from pixels to 0.0 to 1.0
vec2 zeroToOne = vPosition / vResolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clip space)
vec2 clipSpace = zeroToTwo - 1.0;
//gl_Position = vec4(clipSpace, 0.0, 1.0);
// To get it to be the more traditional top left corner used for 2d graphics APIs we can just flip the clip space y coordinate.
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
}
</script>
<script id="fragment-shader" type="x-shader/x-fragment">
precision mediump float;
uniform vec4 fColor;
void
main()
{
gl_FragColor = fColor;
}
</script>
<script type="text/javascript" src="../Common/webgl-utils.js"></script>
<script type="text/javascript" src="../Common/initShaders.js"></script>
<script type="text/javascript" src="../Common/MV.js"></script>
<script type="text/javascript" src="snakeGame.js"></script>
</head>
<body>
<canvas id="gl-canvas" width="1024" height="800">
Oops ... your browser doesn't support the HTML5 canvas element
</canvas>
<script>
var canvas =
</script>
</body>
</html>
There is no such thing as "deleting an object" at the webgl. WebGL is just an API that draws pixels into a canvas. "Objects" are a higher level concept that your code deals with.
You generally keep your own list of things to draw (whether that is the same as your list of objects or not is up to you)
Every "frame" you clear the canvas and then draw all things you want to draw
render function:
clear the canvas
for each thing to draw
draw thing
So "deleting" an object is a matter of not drawing it.
See this and this and this
As an example
const thingsToDraw = [
{ color: [1, 0, 0, 1], center: [0.2, 0.3], },
{ color: [0, 1, 0, 1], center: [0.0, 0.1], },
{ color: [0, 0, 1, 1], center: [-0.5, -0.4], },
{ color: [1, 0.5, 0, 1], center: [-0.2, 0.3], },
{ color: [0, 1, 1, 1], center: [0.7, -0.1], },
{ color: [1, 0, 1, 1], center: [-0.5, 0.4], },
];
const gl = document.querySelector('canvas').getContext('webgl');
const prg = twgl.createProgram(gl, [`
uniform vec4 position;
void main() {
gl_PointSize = 20.0;
gl_Position = position;
}`,`
precision mediump float;
uniform vec4 color;
void main() {
gl_FragColor = color;
}
`]);
const positionLoc = gl.getUniformLocation(prg, "position");
const colorLoc = gl.getUniformLocation(prg, "color");
function drawThing(color, position) {
gl.useProgram(prg);
gl.uniform4f(positionLoc, ...position, 0, 1);
gl.uniform4fv(colorLoc, color);
gl.drawArrays(gl.POINTS, 0, 1); // draw 1 point
}
function render(time) {
time *= 0.001;
gl.clear(gl.COLOR_BUFFER_BIT);
thingsToDraw.forEach((thing, i) => {
const t = time * 10 + i;
const position = [
thing.center[0] + Math.cos(t) * 0.1,
thing.center[1] + Math.sin(t) * 0.1,
];
drawThing(thing.color, position);
});
requestAnimationFrame(render);
}
requestAnimationFrame(render);
document.querySelector('button').addEventListener('click', () => {
thingsToDraw.splice(0, 1);
});
canvas { border: 1px solid black; }
<canvas></canvas>
<button type="button">remove first thing</button>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
How you decide to track and organize your "objects" or your "things to draw" is entirely up to you. Many 3D systems use a scene graph and then draw the entire graph every frame so 2 ways of not drawing something is to either remove it from the graph or else add some flag to each node whether or not to draw it.
In other systems the scene graph is separate from the list of things to draw.
For small programs people might just use an array (like the example above)

ThreeJS- Adding bloom pass affects canvas transparency

BLOOM AFFECTS TRANSPARENCY
For renderer I'm having this setup:
renderer = new THREE.WebGLRenderer( { antialias: true, preserveDrawingBuffer:true, alpha:true } );
for bloom pass (post processing)
var renderPass = new RenderPass( scene, camera );
var bloomPass = new UnrealBloomPass( new THREE.Vector2( window.innerWidth, window.innerHeight ), 1.5, 0.4, 0.85 );
bloomPass.exposure =0.2;
bloomPass.threshold =0;
bloomPass.strength = 0.2;
bloomPass.radius = 0.1;
composer.addPass( renderPass );
composer.addPass( bloomPass );
and while rendering I'm using
composer.render()
but this is affecting the transparency of the canvas by darkening it (Scene)
I had the same issue, your code is right for the creation of the UnrealBloomPass, but the issue is in the shader of the UnrealBloomPass at the method getSeperableBlurMaterial.
You need to replace the fragmentShader by this code below and your pass background will consider the alpha channel:
fragmentShader:
"#include <common>\
varying vec2 vUv;\n\
uniform sampler2D colorTexture;\n\
uniform vec2 texSize;\
uniform vec2 direction;\
\
float gaussianPdf(in float x, in float sigma) {\
return 0.39894 * exp( -0.5 * x * x/( sigma * sigma))/sigma;\
}\
void main() {\n\
vec2 invSize = 1.0 / texSize;\
float fSigma = float(SIGMA);\
float weightSum = gaussianPdf(0.0, fSigma);\
float alphaSum = 0.0;\
vec3 diffuseSum = texture2D( colorTexture, vUv).rgb * weightSum;\
for( int i = 1; i < KERNEL_RADIUS; i ++ ) {\
float x = float(i);\
float w = gaussianPdf(x, fSigma);\
vec2 uvOffset = direction * invSize * x;\
vec4 sample1 = texture2D( colorTexture, vUv + uvOffset);\
vec4 sample2 = texture2D( colorTexture, vUv - uvOffset);\
diffuseSum += (sample1.rgb + sample2.rgb) * w;\
alphaSum += (sample1.a + sample2.a) * w;\
weightSum += 2.0 * w;\
}\
gl_FragColor = vec4(diffuseSum/weightSum, alphaSum/weightSum);\n\
}"
A bloom pass is doing some mix between the image and a blurred version, causing colors to change. You should consider setting the WebGLRenderer tone mapping property to set a good color dynamic range
Tone mapping definition (Wikipedia)
Tone mapping is a technique used in image processing and computer
graphics to map one set of colors to another to approximate the
appearance of high dynamic range images in a medium that has a more
limited dynamic range.
Add this line in your init routine
renderer.toneMapping = THREE.ReinhardToneMapping

Is passing just the length of the sides of a triangle, sufficient to draw a triangle in WebGL?

The question may not be clear, but I am gonna clear it here. Let's consider an array which have th co-ordinates of a triangle in clip space :
var coor = [
-0.4, 0.6,
0.0, -0.5,
-0.5, 0.0,
0.5, -0.5,
0.0
]
So, the coor array will help us to draw a triangle using WebGL. But instead of it I want to go something like this :
var coor2 = [ 100, 100, 100 ]
In the coor2 I gave the measure of sides to draw a triangle. It will be an equilateral trinagle. So, can I do something such that I enter the three sides of a triangle and a script converts them to co-ordinate which can be cliped in the clip space and can be read by WebGL ?
Thanks In Advance
WebGL only cares that you set gl_Position to clip space values. It doesn't care how you do it. It's up to you to decide how to do that. Inside the vertex shader, at least in WebGL1, things are stateless. Meaning when you draw a triangle your vertex shader will be called 3 times. It needs to set gl_Position to 3 different values in clipspace with no state between calls. So given 100 three times, how would you compute a different value each time your vertex shader is called?
Just imagine it in JavaScript
const gl_Position1 = vertexShader(100);
const gl_Position2 = vertexShader(100);
const gl_Position3 = vertexShader(100);
How is the function vertexShader supposed to produce 3 different values with no other input? Unless there is some other state it can't produce a different value. vertex shaders in WebGL1 don't have any other state.
You need to pass in some data that changes every iteration.
Of course you if the values changed then you could produce a triangle. Example
function vertexShader(v) {
const angle = v / 3 * Math.PI * 2;
return [Math.cos(angle), Math.sin(angle), 0, 1];
}
const gl_Position1 = vertexShader(0);
const gl_Position2 = vertexShader(1);
const gl_Position3 = vertexShader(2);
Would produce a triangle. Converting that to GLSL and WebGL
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute float v;
#define PI radians(180.0)
void main() {
float angle = v / 3.0 * PI * 2.0;
gl_Position = vec4(cos(angle), sin(angle), 0, 1);
}
`;
const fs = `
precision mediump float;
void main() {
gl_FragColor = vec4(1, 0, 0, 1);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const vLocation = gl.getAttribLocation(program, 'v');
const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([0, 1, 2]), gl.STATIC_DRAW);
gl.enableVertexAttribArray(vLocation);
gl.vertexAttribPointer(vLocation, 1, gl.FLOAT, false, 0, 0);
gl.useProgram(program);
gl.drawArrays(gl.TRIANGLES, 0, 3);
canvas { border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
Of course given we only passed in [0, 1, 2] we can't easily specifiy a position but the point is at least now that we have a value that changes we can produce a triangle which is just to re-iterate WebGL only cares that you set gl_Position to clip space values. It doesn't care how you do it.
See this article for more

The Fastest Way to Batch Calls in WebGL

I'm trying to rewrite my canvas-based rendering for my 2d game engine. I've made good progress and can render textures to the webgl context fine, complete with scaling, rotation and blending. But my performance sucks. On my test laptop, I can get 30 fps in vanilla 2d canvas with 1,000 entities on screen at once; in WebGL, I get 30 fps with 500 entities on screen. I'd expect the situation to be reverse!
I have a sneaking suspicion that the culprit is all this Float32Array buffer garbage I'm tossing around. Here's my render code:
// boilerplate code and obj coordinates
// grab gl context
var canvas = sys.canvas;
var gl = sys.webgl;
var program = sys.glProgram;
// width and height
var scale = sys.scale;
var tileWidthScaled = Math.floor(tileWidth * scale);
var tileHeightScaled = Math.floor(tileHeight * scale);
var normalizedWidth = tileWidthScaled / this.width;
var normalizedHeight = tileHeightScaled / this.height;
var worldX = targetX * scale;
var worldY = targetY * scale;
this.bindGLBuffer(gl, this.vertexBuffer, sys.glWorldLocation);
this.bufferGLRectangle(gl, worldX, worldY, tileWidthScaled, tileHeightScaled);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, this.texture);
var frameX = (Math.floor(tile * tileWidth) % this.width) * scale;
var frameY = (Math.floor(tile * tileWidth / this.width) * tileHeight) * scale;
// fragment (texture) shader
this.bindGLBuffer(gl, this.textureBuffer, sys.glTextureLocation);
this.bufferGLRectangle(gl, frameX, frameY, normalizedWidth, normalizedHeight);
gl.drawArrays(gl.TRIANGLES, 0, 6);
bufferGLRectangle: function (gl, x, y, width, height) {
var left = x;
var right = left + width;
var top = y;
var bottom = top + height;
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
left, top,
right, top,
left, bottom,
left, bottom,
right, top,
right, bottom
]), gl.STATIC_DRAW);
},
bindGLBuffer: function (gl, buffer, location) {
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.vertexAttribPointer(location, 2, gl.FLOAT, false, 0, 0);
},
And here's my simple test shaders (these are missing blending, scaling & rotation):
// fragment (texture) shader
precision mediump float;
uniform sampler2D image;
varying vec2 texturePosition;
void main() {
gl_FragColor = texture2D(image, texturePosition);
}
// vertex shader
attribute vec2 worldPosition;
attribute vec2 vertexPosition;
uniform vec2 canvasResolution;
varying vec2 texturePosition;
void main() {
vec2 zeroToOne = worldPosition / canvasResolution;
vec2 zeroToTwo = zeroToOne * 2.0;
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
texturePosition = vertexPosition;
}
Any ideas on how to get better performance? Is there a way to batch my drawArrays? Is there a way to cut down on the buffer garbage?
Thanks!
There's two big issues I can see here that will adversely affect your performance.
You're creating a lot of temporary Float32Arrays, which are currently expensive to construct (That should get better in the future). It would be far better in this case to create a single array and set the vertices each time like so:
verts[0] = left; verts[1] = top;
verts[2] = right; verts[3] = top;
// etc...
gl.bufferData(gl.ARRAY_BUFFER, verts, gl.STATIC_DRAW);
The bigger issue by far, however, is that you're only drawing a single quad at a time. 3D APIs simply aren't designed to do this efficiently. What you want to do is try and squeeze as many triangles as possible into each drawArrays/drawElements call you make.
There's several ways to do that, the most straightforward being to fill up a buffer with as many quads as you can that share the same texture, then draw them all in one go. In psuedocode:
var MAX_QUADS_PER_BATCH = 100;
var VERTS_PER_QUAD = 6;
var FLOATS_PER_VERT = 2;
var verts = new Float32Array(MAX_QUADS_PER_BATCH * VERTS_PER_QUAD * FLOATS_PER_VERT);
var quadCount = 0;
function addQuad(left, top, bottom, right) {
var offset = quadCount * VERTS_PER_QUAD * FLOATS_PER_VERT;
verts[offset] = left; verts[offset+1] = top;
verts[offset+2] = right; verts[offset+3] = top;
// etc...
quadCount++;
if(quadCount == MAX_QUADS_PER_BATCH) {
flushQuads();
}
}
function flushQuads() {
gl.bindBuffer(gl.ARRAY_BUFFER, vertsBuffer);
gl.bufferData(gl.ARRAY_BUFFER, verts, gl.STATIC_DRAW); // Copy the buffer we've been building to the GPU.
// Make sure vertexAttribPointers are set, etc...
gl.drawArrays(gl.TRIANGLES, 0, quadCount + VERTS_PER_QUAD);
}
// In your render loop
for(sprite in spriteTypes) {
gl.bindTexture(gl.TEXTURE_2D, sprite.texture);
for(instance in sprite.instances) {
addQuad(instance.left, instance.top, instance.right, instance.bottom);
}
flushQuads();
}
That's an oversimplification, and there's ways to batch even more, but hopefully that gives you an idea of how to start batching your calls for better performance.
If you use WebGL Inspector you'll see in the trace if you do any unnecessary GL instructions (they're marked with bright yellow background). This might give you an idea on how to optimize your rendering.
Generally speaking, sort your draw calls so all using the same program, then attributes, then textures and then uniforms are done in order. This way you'll have as few GL instructions (and JS instructions) as possible.

Categories