WebGL with offscreen canvas won't render my vertices - javascript

I'm trying to do some behind the scenes rendering work with WebGL and then copy the canvas data to an image.
I'm able to accomplish this successfully with a physical canvas element on the page, however, once I try to replace my
const canvas = document.getElementById("glcanvas") with either
const cavnas = document.createElement("canvas") or
const canvas = new OffscreenCanvas(640, 480)
My render doesn't seem to work anymore.
Interestingly I still see my WebGL clear color so I know it's rendering something. If I go back to the getElemeentById method with a physical canvas in the DOM, everything works fine, any ideas?
code (I pretty much just followed the Mozilla tutorial for WebGL, some code is omitted because it is not important):
function main() {
// Initialize the GL context
const canvas = new OffscreenCanvas(640, 480)
const gl = canvas.getContext("webgl2")
// Only continue if WebGL is available and working
if (gl === null) {
alert("Unable to initialize WebGL. Your browser or machine may not support it.")
return
}
// Set clear color to black, fully opaque
gl.clearColor(0.0, 0.0, 0.0, 1.0);
// Clear the color buffer with specified clear color
gl.clear(gl.COLOR_BUFFER_BIT);
// Initialize a shader program; this is where all the lighting
// for the vertices and so forth is established.
const shaderProgram = initShaderProgram(gl, vsSource, fsSource);
// Collect all the info needed to use the shader program.
// Look up which attributes our shader program is using
// for aVertexPosition, aVertexColor and also
// look up uniform locations.
const programInfo = {
program: shaderProgram,
attribLocations: {
vertexPosition: gl.getAttribLocation(shaderProgram, "aVertexPosition"),
vertexColor: gl.getAttribLocation(shaderProgram, "aVertexColor"),
},
uniformLocations: {
projectionMatrix: gl.getUniformLocation(
shaderProgram,
"uProjectionMatrix"
),
modelViewMatrix: gl.getUniformLocation(shaderProgram, "uModelViewMatrix"),
},
};
const objects = [
initCuboid(
gl,
{x: 0, y: 0, z: 0},
{x: 1, y: 1, z: 1},
{pitch: 0, yaw: 0, roll: 0}
),
initCuboid(
gl,
{x: 0, y: 2, z: 0},
{x: .5, y: .5, z: .5},
{pitch: 0, yaw: 0, roll: 0}
),
]
function render() {
drawScene(gl, programInfo, objects);
canvas.convertToBlob().then((res) => {
document.getElementById("glimg").src = URL.createObjectURL(res)
})
requestAnimationFrame(render)
}
requestAnimationFrame(render)
}

Related

WebGL gl.triangle make square

gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
// position
-0.9, 0.9,
0.9, 0.9,
-0.9, -0.9,
0.9, 0.9,
// color
1, 0, 0, 1,
0, 1, 0, 1,
1, 0, 1, 1,
1, 0, 0, 1
]), gl.STATIC_DRAW);
gl.enableVertexAttribArray(positionLocation);
gl.enableVertexAttribArray(colorLocation);
var size = 2;
var type = gl.FLOAT;
var normalize = false;
var stride = 0;
var offset = 0;
gl.vertexAttribPointer(positionLocation, size, type, normalize, stride, offset);
var size = 4;
var type = gl.FLOAT;
var normalize = false;
var stride = 0;
var offset = Float32Array.BYTES_PER_ELEMENT * 8;
gl.vertexAttribPointer(colorLocation, size, type, normalize, stride, offset);
This is some portion of the code ( the code is too long to fit ) so I will put these part where I'm curious about why it didnt draw a square but still a triangle? I know that I used gl.Triangle but I want to try using gl.Triangle to draw a square which I'm not sure which part of this is wrong and I have searched about it but no one do the same thing as I do ( the one where I put position and vertices in the same array )
There's also this part where count is 3 which I'm not sure what it does ( this code is given by my professor to let me make it a square and colored by changing a few setting so I do not know how to code opengl yet )
// Draw the geometry.
var offset = 0;
var count = 3;
gl.drawArrays(gl.TRIANGLES, offset, count);
below is the full code
<!DOCTYPE html>
<html>
<head>
<title>CS299 - Assignment 1.1</title>
<link type="text/css" href="https://webgl2fundamentals.org/webgl/resources/webgl-tutorials.css" rel="stylesheet" />
<!-- css override -->
<style type="text/css">
body { background-color: #CCCCCC; }
#group {background-color: #E8F49F;}
canvas { background-color: #4DC72F; width: 300px; height: 300px; border: 0px; }
.gman-widget-slider {min-width: 200px;}
</style>
</head>
<body>
<canvas id="canvas"></canvas>
</body>
<!-- util functions -->
<script src="https://webgl2fundamentals.org/webgl/resources/webgl-utils.js"></script>
<!-- main WebGL2 code -->
<script>
"use strict";
var vs = `#version 300 es
// an attribute is an input (in) to a vertex shader.
// It will receive data from a buffer
in vec2 a_position;
in vec4 a_color;
// color output from vertex shader to fragment shader
out vec4 v_color;
// all shaders have a main function.
void main() {
// default position output variable
// convert vec2 to vec4
gl_Position = vec4(a_position, 0, 1);
// color passthrough
v_color = a_color;
}
`;
var fs = `#version 300 es
precision highp float;
// color passthrough
in vec4 v_color;
// outout color
out vec4 outColor;
void main() {
outColor = v_color;
}
`;
function main() {
// Get A WebGL context
/** #type {HTMLCanvasElement} */
var canvas = document.querySelector("#canvas");
var gl = canvas.getContext("webgl2");
if (!gl) {
return;
}
// setup GLSL program
var program = webglUtils.createProgramFromSources(gl, [vs, fs]);
// look up where the vertex data needs to go.
var positionLocation = gl.getAttribLocation(program, "a_position");
var colorLocation = gl.getAttribLocation(program, "a_color");
// Create set of attributes
var vao = gl.createVertexArray();
gl.bindVertexArray(vao);
// Create a buffer (formerly called "vertex buffer object", now just "buffer").
var vbo = gl.createBuffer();
// Set Geometry.
gl.bindBuffer(gl.ARRAY_BUFFER, vbo);
// [40%] Modify the code to draw a square instead of a triangle.
// Assign C,M,Y, and K colors to the 4 vertices of the square.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
// position
-0.9, 0.9,
0.9, 0.9,
-0.9, -0.9,
0.9, 0.9,
// color
1, 0, 0, 1,
0, 1, 0, 1,
1, 0, 1, 1,
1, 0, 0, 1
]), gl.STATIC_DRAW);
// tell the position attribute how to pull data out of the current ARRAY_BUFFER
gl.enableVertexAttribArray(positionLocation);
gl.enableVertexAttribArray(colorLocation);
var size = 2;
var type = gl.FLOAT;
var normalize = false;
var stride = 0;
var offset = 0;
gl.vertexAttribPointer(positionLocation, size, type, normalize, stride, offset);
var size = 4;
var type = gl.FLOAT;
var normalize = false;
var stride = 0;
var offset = Float32Array.BYTES_PER_ELEMENT * 8; // must be in bytes
gl.vertexAttribPointer(colorLocation, size, type, normalize, stride, offset);
// Draw the scene.
function drawScene() {
webglUtils.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
// Clear the canvas
gl.clearColor(0.15, 0.15, 0.15, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Tell it to use our program (pair of shaders)
gl.useProgram(program);
// Bind the attribute/buffer set we want.
gl.bindVertexArray(vao);
// Draw the geometry.
var offset = 0;
var count = 3;
// [1.5 points] Use gl.TRIANGLE_STRIP instead of gl.TRIANGLES
gl.drawArrays(gl.TRIANGLES, offset, count);
}
drawScene();
}
main();
</script>
<p id="group">Group: 4DC72F</p>
</html>
I would like some hint instead of answer if that is ok because I was trying to learn but I cant find this method anywhere on the internet
In your vertex specification, the coordinate (0.9, 0.9) is duplicated, however, that's not the only problem.
See Triangle primitives. The primitive type gl.TRIANGLES renders, as the name suggests, triangles. For 2 triangles you need 6 verticals (2*3). Each triangle consists of 3 vertices, and the triangles are completely independent and have no common vertices. e.g.:
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
// position
// triangle 1
-0.9, 0.9,
0.9, 0.9,
-0.9, -0.9,
// triangle 2
0.9, 0.9,
0.9, -0.9,
-0.9, -0.9,
// color
// [...]
]), gl.STATIC_DRAW);
However you can use the primitive type gl.TRIANGLE_STRIP to draw a single quad:
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
// position
-0.9, 0.9,
0.9, 0.9,
-0.9, -0.9,
0.9, -0.9,
// color
1, 0, 0, 1,
0, 1, 0, 1,
1, 0, 1, 1,
1, 0, 0, 1
]), gl.STATIC_DRAW);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);

Is passing just the length of the sides of a triangle, sufficient to draw a triangle in WebGL?

The question may not be clear, but I am gonna clear it here. Let's consider an array which have th co-ordinates of a triangle in clip space :
var coor = [
-0.4, 0.6,
0.0, -0.5,
-0.5, 0.0,
0.5, -0.5,
0.0
]
So, the coor array will help us to draw a triangle using WebGL. But instead of it I want to go something like this :
var coor2 = [ 100, 100, 100 ]
In the coor2 I gave the measure of sides to draw a triangle. It will be an equilateral trinagle. So, can I do something such that I enter the three sides of a triangle and a script converts them to co-ordinate which can be cliped in the clip space and can be read by WebGL ?
Thanks In Advance
WebGL only cares that you set gl_Position to clip space values. It doesn't care how you do it. It's up to you to decide how to do that. Inside the vertex shader, at least in WebGL1, things are stateless. Meaning when you draw a triangle your vertex shader will be called 3 times. It needs to set gl_Position to 3 different values in clipspace with no state between calls. So given 100 three times, how would you compute a different value each time your vertex shader is called?
Just imagine it in JavaScript
const gl_Position1 = vertexShader(100);
const gl_Position2 = vertexShader(100);
const gl_Position3 = vertexShader(100);
How is the function vertexShader supposed to produce 3 different values with no other input? Unless there is some other state it can't produce a different value. vertex shaders in WebGL1 don't have any other state.
You need to pass in some data that changes every iteration.
Of course you if the values changed then you could produce a triangle. Example
function vertexShader(v) {
const angle = v / 3 * Math.PI * 2;
return [Math.cos(angle), Math.sin(angle), 0, 1];
}
const gl_Position1 = vertexShader(0);
const gl_Position2 = vertexShader(1);
const gl_Position3 = vertexShader(2);
Would produce a triangle. Converting that to GLSL and WebGL
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute float v;
#define PI radians(180.0)
void main() {
float angle = v / 3.0 * PI * 2.0;
gl_Position = vec4(cos(angle), sin(angle), 0, 1);
}
`;
const fs = `
precision mediump float;
void main() {
gl_FragColor = vec4(1, 0, 0, 1);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const vLocation = gl.getAttribLocation(program, 'v');
const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([0, 1, 2]), gl.STATIC_DRAW);
gl.enableVertexAttribArray(vLocation);
gl.vertexAttribPointer(vLocation, 1, gl.FLOAT, false, 0, 0);
gl.useProgram(program);
gl.drawArrays(gl.TRIANGLES, 0, 3);
canvas { border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
Of course given we only passed in [0, 1, 2] we can't easily specifiy a position but the point is at least now that we have a value that changes we can produce a triangle which is just to re-iterate WebGL only cares that you set gl_Position to clip space values. It doesn't care how you do it.
See this article for more

WebGL 2D camera zoom to mouse point

I'm currently building a 2D drawing app in WebGL. I want to implement zoom to point under mouse cursor similar to example in here. But I can't figure out how to apply the solution from that answer in my case.
I have done basic zoom by scaling camera matrix. But it zooms to the top-left corner of the canvas, due to that being the origin (0,0) set by the projection (as far as I understand).
Basic pan & zoom implemented:
My draw function (including matrix computations) looks like this:
var projection = null;
var view = null;
var viewProjection = null;
function draw(gl, camera, sceneTree){
// projection matrix
projection = new Float32Array(9);
mat3.projection(projection, gl.canvas.clientWidth, gl.canvas.clientHeight);
// camera matrix
view = new Float32Array(9);
mat3.fromTranslation(view, camera.translation);
mat3.rotate(view, view, toRadians(camera.rotation));
mat3.scale(view, view, camera.scale);
// view matrix
mat3.invert(view, view)
// VP matrix
viewProjection = new Float32Array(9);
mat3.multiply(viewProjection, projection, view);
// go through scene tree:
// - build final matrix for each object
// e.g: u_matrix = VP x Model (translate x rotate x scale)
// draw each object in scene tree
// ...
}
Vertex shader:
attribute vec2 a_position;
uniform mat3 u_matrix;
void main() {
gl_Position = vec4((u_matrix * vec3(a_position, 1)).xy, 0, 1);
}
Zoom function:
function screenToWorld(screenPos){
// normalized screen position
let nsp = [
2.0 * screenPos[0] / this.gl.canvas.width - 1,
- 2.0 * screenPos[1] / this.gl.canvas.height + 1
];
let inverseVP = new Float32Array(9);
mat3.invert(inverseVP, viewProjection);
let worldPos = [0, 0];
return vec2.transformMat3(worldPos, nsp, inverseVP);
}
var zoomRange = [0.01, 2];
canvas.addEventListener('wheel', (e) => {
let oldZoom = camera.scale[0];
let zoom = Math.min(Math.max(oldZoom + e.deltaX / 100, zoomRange[0]), zoomRange[1]);
camera.scale = [zoom, zoom];
let zoomPoint = screenToWorld([e.clientX, e.clientY]);
// totally breaks if enable this line
//vec2.copy(camera.translation, zoomPoint);
// call draw function again
draw();
}, false);
If I apply zoomPoint to camera translation, the values of zoomPoint (and the camera position accordingly) start to raise up uncontrollably with every zoom event (no mater if I zoom in or out) and the objects drawn in the scene go immediately out of view.
Would greatly appreciate any insights or suggestions about what am I doing wrong here. Thanks.
Since you didn't post a minimal reproducible example in the question itself I couldn't test with your math library. Using my own though I was able to zoom like this
const [clipX, clipY] = getClipSpaceMousePosition(e);
// position before zooming
const [preZoomX, preZoomY] = m3.transformPoint(
m3.inverse(viewProjectionMat),
[clipX, clipY]);
// multiply the wheel movement by the current zoom level
// so we zoom less when zoomed in and more when zoomed out
const newZoom = camera.zoom * Math.pow(2, e.deltaY * -0.01);
camera.zoom = Math.max(0.02, Math.min(100, newZoom));
updateViewProjection();
// position after zooming
const [postZoomX, postZoomY] = m3.transformPoint(
m3.inverse(viewProjectionMat),
[clipX, clipY]);
// camera needs to be moved the difference of before and after
camera.x += preZoomX - postZoomX;
camera.y += preZoomY - postZoomY;
Note that zoom is the opposite of scale. If zoom = 2 then I want everything to appear 2x larger. To do that requires shrinking the camera space so we scale that space by 1 / zoom
Example:
const canvas = document.querySelector('canvas');
const gl = canvas.getContext('webgl');
const vs = `
attribute vec2 a_position;
uniform mat3 u_matrix;
void main() {
gl_Position = vec4((u_matrix * vec3(a_position, 1)).xy, 0, 1);
}
`;
const fs = `
precision mediump float;
uniform vec4 u_color;
void main() {
gl_FragColor = u_color;
}
`;
// compiles shaders, links program, looks up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
a_position: {
numComponents: 2,
data: [
0, 0, // 0----1
40, 0, // | |
40, 10, // | 3--2
10, 10, // | |
10, 20, // | 4-5
30, 20, // | |
30, 30, // | 7-6
10, 30, // | |
10, 50, // 9-8
0, 50,
],
},
indices: [
0, 1, 2,
0, 2, 3,
0, 3, 8,
0, 8, 9,
4, 5, 6,
4, 6, 7,
],
});
const camera = {
x: 0,
y: 0,
rotation: 0,
zoom: 1,
};
const scene = [
{ x: 20, y: 20, rotation: 0, scale: 1, color: [1, 0, 0, 1], bufferInfo},
{ x: 100, y: 50, rotation: Math.PI, scale: 0.5, color: [0, 0.5, 0, 1], bufferInfo},
{ x: 100, y: 50, rotation: 0, scale: 2, color: [0, 0, 1, 1], bufferInfo},
{ x: 200, y: 100, rotation: 0.7, scale: 1, color: [1, 0, 1, 1], bufferInfo},
];
let viewProjectionMat;
function makeCameraMatrix() {
const zoomScale = 1 / camera.zoom;
let cameraMat = m3.identity();
cameraMat = m3.translate(cameraMat, camera.x, camera.y);
cameraMat = m3.rotate(cameraMat, camera.rotation);
cameraMat = m3.scale(cameraMat, zoomScale, zoomScale);
return cameraMat;
}
function updateViewProjection() {
// same as ortho(0, width, height, 0, -1, 1)
const projectionMat = m3.projection(gl.canvas.width, gl.canvas.height);
const cameraMat = makeCameraMatrix();
let viewMat = m3.inverse(cameraMat);
viewProjectionMat = m3.multiply(projectionMat, viewMat);
}
function draw() {
gl.clear(gl.COLOR_BUFFER_BIT);
updateViewProjection();
gl.useProgram(programInfo.program);
for (const {x, y, rotation, scale, color, bufferInfo} of scene) {
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
let mat = m3.identity();
mat = m3.translate(mat, x, y);
mat = m3.rotate(mat, rotation);
mat = m3.scale(mat, scale, scale);
// calls gl.uniformXXX
twgl.setUniforms(programInfo, {
u_matrix: m3.multiply(viewProjectionMat, mat),
u_color: color,
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
}
}
draw();
function getClipSpaceMousePosition(e) {
// get canvas relative css position
const rect = canvas.getBoundingClientRect();
const cssX = e.clientX - rect.left;
const cssY = e.clientY - rect.top;
// get normalized 0 to 1 position across and down canvas
const normalizedX = cssX / canvas.clientWidth;
const normalizedY = cssY / canvas.clientHeight;
// convert to clip space
const clipX = normalizedX * 2 - 1;
const clipY = normalizedY * -2 + 1;
return [clipX, clipY];
}
canvas.addEventListener('wheel', (e) => {
e.preventDefault();
const [clipX, clipY] = getClipSpaceMousePosition(e);
// position before zooming
const [preZoomX, preZoomY] = m3.transformPoint(
m3.inverse(viewProjectionMat),
[clipX, clipY]);
// multiply the wheel movement by the current zoom level
// so we zoom less when zoomed in and more when zoomed out
const newZoom = camera.zoom * Math.pow(2, e.deltaY * -0.01);
camera.zoom = Math.max(0.02, Math.min(100, newZoom));
updateViewProjection();
// position after zooming
const [postZoomX, postZoomY] = m3.transformPoint(
m3.inverse(viewProjectionMat),
[clipX, clipY]);
// camera needs to be moved the difference of before and after
camera.x += preZoomX - postZoomX;
camera.y += preZoomY - postZoomY;
draw();
});
canvas { border: 1px solid black; display: block; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<script src="https://webglfundamentals.org/webgl/resources/m3.js"></script>
note that I included camera.rotation just to make sure things worked if rotated. They seem to. Here's one with zoom, pan, and rotate

Is there a bug in WebGL polygonOffset or am I missing something?

I am seeing very odd behavior where polygonOffset initially works, but if I re-render it stops working.
I made a simple example to illustrate it. I started with the z-fighting example from Ch7 of the WebGL Programming Guide (https://sites.google.com/site/webglbook/). I then separated out just the rendering portion and wrapped it in a function. I then hooked up an HTML button to call the render() function when clicked. On the first click, the triangles render correctly with no issues. On the second click, it is like polygonOffset is turned off again.
I've tried a number of different variations, including re-enabling every time, disabling and re-enabling, changing the offsets, but I keep getting the same behavior. Any ideas?
I'm including the code, though the snippet doesn't run for me won't run without the book's libraries.
// Zfighting.js (c) 2012 matsuda
// Vertex shader program
var VSHADER_SOURCE =
'attribute vec4 a_Position;\n' +
'attribute vec4 a_Color;\n' +
'uniform mat4 u_ViewProjMatrix;\n' +
'varying vec4 v_Color;\n' +
'void main() {\n' +
' gl_Position = u_ViewProjMatrix * a_Position;\n' +
' v_Color = a_Color;\n' +
'}\n';
// Fragment shader program
var FSHADER_SOURCE =
'#ifdef GL_ES\n' +
'precision mediump float;\n' +
'#endif\n' +
'varying vec4 v_Color;\n' +
'void main() {\n' +
' gl_FragColor = v_Color;\n' +
'}\n';
function main() {
// Retrieve <canvas> element
var canvas = document.getElementById('webgl');
// Get the rendering context for WebGL
var gl = getWebGLContext(canvas);
if (!gl) {
console.log('Failed to get the rendering context for WebGL');
return;
}
// Initialize shaders
if (!initShaders(gl, VSHADER_SOURCE, FSHADER_SOURCE)) {
console.log('Failed to intialize shaders.');
return;
}
// Set the vertex coordinates and color (the blue triangle is in the front)
var n = initVertexBuffers(gl);
if (n < 0) {
console.log('Failed to set the vertex information');
return;
}
//Set clear color and enable the hidden surface removal function
gl.clearColor(0, 0, 0, 1);
gl.enable(gl.DEPTH_TEST);
// Get the storage locations of u_ViewProjMatrix
var u_ViewProjMatrix = gl.getUniformLocation(gl.program, 'u_ViewProjMatrix');
if (!u_ViewProjMatrix) {
console.log('Failed to get the storage locations of u_ViewProjMatrix');
return;
}
var viewProjMatrix = new Matrix4();
// Set the eye point, look-at point, and up vector.
viewProjMatrix.setPerspective(30, canvas.width/canvas.height, 1, 100);
viewProjMatrix.lookAt(3.06, 2.5, 10.0, 0, 0, -2, 0, 1, 0);
// Pass the view projection matrix to u_ViewProjMatrix
gl.uniformMatrix4fv(u_ViewProjMatrix, false, viewProjMatrix.elements);
// Enable the polygon offset function
gl.enable(gl.POLYGON_OFFSET_FILL);
function render() {
// Clear color and depth buffer
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Draw the triangles
gl.drawArrays(gl.TRIANGLES, 0, n/2); // The green triangle
gl.polygonOffset(1.0, 1.0); // Set the polygon offset
gl.drawArrays(gl.TRIANGLES, n/2, n/2); // The yellow triangle
}
document.getElementById("button").onclick = render;
}
function initVertexBuffers(gl) {
var verticesColors = new Float32Array([
// Vertex coordinates and color
0.0, 2.5, -5.0, 0.4, 1.0, 0.4, // The green triangle
-2.5, -2.5, -5.0, 0.4, 1.0, 0.4,
2.5, -2.5, -5.0, 1.0, 0.4, 0.4,
0.0, 3.0, -5.0, 1.0, 0.4, 0.4, // The yellow triagle
-3.0, -3.0, -5.0, 1.0, 1.0, 0.4,
3.0, -3.0, -5.0, 1.0, 1.0, 0.4,
]);
var n = 6;
// Create a buffer object
var vertexColorbuffer = gl.createBuffer();
if (!vertexColorbuffer) {
console.log('Failed to create the buffer object');
return -1;
}
// Write the vertex coordinates and color to the buffer object
gl.bindBuffer(gl.ARRAY_BUFFER, vertexColorbuffer);
gl.bufferData(gl.ARRAY_BUFFER, verticesColors, gl.STATIC_DRAW);
var FSIZE = verticesColors.BYTES_PER_ELEMENT;
// Assign the buffer object to a_Position and enable the assignment
var a_Position = gl.getAttribLocation(gl.program, 'a_Position');
if(a_Position < 0) {
console.log('Failed to get the storage location of a_Position');
return -1;
}
gl.vertexAttribPointer(a_Position, 3, gl.FLOAT, false, FSIZE * 6, 0);
gl.enableVertexAttribArray(a_Position);
// Assign the buffer object to a_Color and enable the assignment
var a_Color = gl.getAttribLocation(gl.program, 'a_Color');
if(a_Color < 0) {
console.log('Failed to get the storage location of a_Color');
return -1;
}
gl.vertexAttribPointer(a_Color, 3, gl.FLOAT, false, FSIZE * 6, FSIZE * 3);
gl.enableVertexAttribArray(a_Color);
return n;
}
<canvas id="webgl" width="400" height="400">
Please use a browser that supports "canvas"
</canvas>
<input type="button" id="button" />
You need to reset PolygonOffset or disable/reenable it, otherwise both triangles are offset by the same amount.
GPUs are state machines, you're in charge of managing the state(variables):
function render() {
// Clear color and depth buffer
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Draw the triangles
gl.polygonOffset(0.0, 0.0); // Reset the polygon offset
gl.drawArrays(gl.TRIANGLES, 0, n/2); // The green triangle
gl.polygonOffset(1.0, 1.0); // Set the polygon offset
gl.drawArrays(gl.TRIANGLES, n/2, n/2); // The yellow triangle
}

webgl, texture coordinates and obj

I'm finding it difficult to understand the correlation between vertex and texture coordinates when the data is rendered. I have a cube being drawn using drawElements form data parsed from an obj. I got textures somewhere close to working with a simple plane where the number of vertex for position and for texture coordinates but once i use a more complex model or even just a more complex uv unwrap i end up with the texture going all wrong.
From what i've read there doesn't seen to be a way of using texture coordinate indices the same way you would for vertex position, which is unfortunate because the obj has that information. The way i've gotten it close to working was by building an array of texture coordinates from the indices data in the obj. But because the length of the vertex and texture coordinate arrays differ (for example in an obj for a cube there are 8 vertex and up to 36 texture coordinate depending on have the mesh is unwrapped) they don't correlate.
What is the correct workflow for using drawElements and mapping the vertex to its correct texture coordinates.
You are correct, you can not easily use different indices for different attributes (in your case positions and texture coordinates).
A common example is a cube. If you want to render a cube with lighting you need normals. There are only 8 positions on a cube but each face of the cube needs 3 different normals for the same positions, one normal for each face that shares that position. That means you need 24 vertices total, 4 for each of the 6 faces of the cube.
If you have a file format that has separate indices for different attributes you'll need to expand them out so that each unique combination of attributes (position, normal, texture coord, etc..) is in your buffers.
Most game engines would do this kind of thing offline. In other words, they'd write some tool that reads the OBJ file, expands the various attributes, and then writes the data back out pre-expanded. That's because generating the expanded data can be time consuming at runtime for a large model if you're trying to optimize the data and only keep unique vertices.
If you don't care about optimal data then just expand based on the indices. The number of indices for each type of attribute should be the same.
Note: positions are not special. I bring this up because you said there doesn't seen to be a way of using texture coordinate indices the same way you would for vertex position. WebGL has no concept of "positions". It just has attributes which describe how to pull data out of buffers. What's in those attributes (positions, normals, random data, whatever), is up to you. gl.drawElements indexes the entire combination of attributes you supply. If you pass in an index of 7 it's going to give you element 7 of each attribute.
Note that the above is describing how pretty much all 3d engines written in WebGL work. That said you can get creative if you really want to.
Here's a program that stores positions and normals in textures. It then puts the indices in buffers. Because textures are random access it can therefore have different indices for positions and normals
var canvas = document.getElementById("c");
var gl = canvas.getContext("webgl");
var ext = gl.getExtension("OES_texture_float");
if (!ext) {
alert("need OES_texture_float extension cause I'm lazy");
//return;
}
if (gl.getParameter(gl.MAX_VERTEX_TEXTURE_IMAGE_UNITS) < 2) {
alert("need to be able to access textures from vertex shaders");
//return;
}
var m4 = twgl.m4;
var v3 = twgl.v3;
var programInfo = twgl.createProgramInfo(gl, ["vshader", "fshader"]);
// Cube data
var positions = [
-1, -1, -1, // 0 lbb
+1, -1, -1, // 1 rbb 2---3
-1, +1, -1, // 2 ltb /| /|
+1, +1, -1, // 3 rtb 6---7 |
-1, -1, +1, // 4 lbf | | | |
+1, -1, +1, // 5 rbf | 0-|-1
-1, +1, +1, // 6 ltf |/ |/
+1, +1, +1, // 7 rtf 4---5
];
var positionIndices = [
3, 7, 5, 3, 5, 1, // right
6, 2, 0, 6, 0, 4, // left
6, 7, 3, 6, 3, 2, // top
0, 1, 5, 0, 5, 4, // bottom
7, 6, 4, 7, 4, 5, // front
2, 3, 1, 2, 1, 0, // back
];
var normals = [
+1, 0, 0,
-1, 0, 0,
0, +1, 0,
0, -1, 0,
0, 0, +1,
0, 0, -1,
]
var normalIndices = [
0, 0, 0, 0, 0, 0, // right
1, 1, 1, 1, 1, 1, // left
2, 2, 2, 2, 2, 2, // top
3, 3, 3, 3, 3, 3, // bottom
4, 4, 4, 4, 4, 4, // front
5, 5, 5, 5, 5, 5, // back
];
function degToRad(deg) {
return deg * Math.PI / 180;
}
var bufferInfo = twgl.createBufferInfoFromArrays(gl, {
a_positionIndex: { size: 1, data: positionIndices },
a_normalIndex: { size: 1, data: normalIndices, },
});
var textures = twgl.createTextures(gl, {
positions: {
format: gl.RGB,
type: gl.FLOAT,
height: 1,
src: positions,
min: gl.NEAREST,
mag: gl.NEAREST,
wrap: gl.CLAMP_TO_EDGE,
},
normals: {
format: gl.RGB,
type: gl.FLOAT,
height: 1,
src: normals,
min: gl.NEAREST,
mag: gl.NEAREST,
wrap: gl.CLAMP_TO_EDGE,
},
});
var xRot = degToRad(30);
var yRot = degToRad(20);
var lightDir = v3.normalize([-0.2, -0.1, 0.5]);
function draw(time) {
time *= 0.001; // convert to seconds
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
yRot = time;
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.useProgram(programInfo.program);
var persp = m4.perspective(
degToRad(45),
gl.canvas.clientWidth / gl.canvas.clientHeight,
0.1, 100.0);
var mat = m4.identity();
mat = m4.translate(mat, [0.0, 0.0, -5.0]);
mat = m4.rotateX(mat, xRot);
mat = m4.rotateY(mat, yRot);
var uniforms = {
u_positions: textures.positions,
u_positionsSize: [positions.length / 3, 1],
u_normals: textures.normals,
u_normalsSize: [normals.length / 3, 1],
u_mvpMatrix: m4.multiply(persp, mat),
u_mvMatrix: mat,
u_color: [0.5, 0.8, 1, 1],
u_lightDirection: lightDir,
};
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, uniforms);
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(draw);
}
requestAnimationFrame(draw);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="//twgljs.org/dist/2.x/twgl-full.min.js"></script>
<script id="vshader" type="whatever">
attribute float a_positionIndex;
attribute float a_normalIndex;
attribute vec4 a_pos;
uniform sampler2D u_positions;
uniform vec2 u_positionsSize;
uniform sampler2D u_normals;
uniform vec2 u_normalsSize;
uniform mat4 u_mvpMatrix;
uniform mat4 u_mvMatrix;
varying vec3 v_normal;
// to index the value in the texture we need to
// compute a texture coordinate that will access
// the correct texel. To do that we need access from
// the middle of the first texel to the middle of the
// last texel.
//
// In other words if we had 3 values (and therefore
// 3 texels) we'd have something like this
//
// ------3x1 ----- texels ----------
// [ ][ ][ ]
// 0.0 |<----------------------------->| 1.0
//
// If we just did index / numValues we'd get
//
// [ ][ ][ ]
// | | |
// 0.0 0.333 0.666
//
// Which is right between texels so we add a
// a halfTexel to get this
//
// [ ][ ][ ]
// | | |
// 0.167 0.5 0.833
// note: In WebGL2 we could just use `textureFetch`
// which takes integer pixel locations
vec2 texCoordFromIndex(const float index, const vec2 textureSize) {
vec2 colRow = vec2(
mod(index, textureSize.x), // columm
floor(index / textureSize.x)); // row
return vec2((colRow + 0.5) / textureSize);
}
void main() {
vec2 ptc = texCoordFromIndex(a_positionIndex, u_positionsSize);
vec3 position = texture2D(u_positions, ptc).rgb;
vec2 ntc = texCoordFromIndex(a_normalIndex, u_normalsSize);
vec3 normal = texture2D(u_normals, ntc).rgb;
gl_Position = u_mvpMatrix * vec4(position, 1);
v_normal = (u_mvMatrix * vec4(normal, 0)).xyz;
}
</script>
<script id="fshader" type="whatever">
precision mediump float;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
varying vec3 v_normal;
void main() {
float light = dot(
normalize(v_normal), u_lightDirection) * 0.5 + 0.5;
gl_FragColor = vec4(u_color.rgb * light, u_color.a);
}
</script>
<canvas id="c"></canvas>

Categories