draw point on circumference of circle in webgl - javascript

I am able to draw a circle
I want to pick a arbitary point on circle and draw a shape like a triangle or
a simple point of the circumference
Now what I am understanding is vertexData array has that points
so I can pick a point from vertexData
But, how do I proceed with drawing the point to that location
If its only about drawing a point on canvas
I understand that in vertexShader I can
declare
attribute vec4 a_Position
and then gl_Position = a_Position
but on circumference of circle I am not understanding
please guide here
Thanks
<script>
var vertexShaderText = [
'uniform vec2 u_resolution;',
'',
'attribute vec2 a_position;',
'',
'void main()',
'{',
'',
'vec2 clipspace = a_position / u_resolution * 1.0 ;',
'',
'gl_Position = vec4(clipspace * vec2(1, -1), 0, 1);',
'}'
].join("\n");
var fragmentShaderText = [
'precision mediump float;',
'',
'void main(void)',
'{',
'',
'gl_FragColor = vec4(1.0, 0, 0, 0);',
'',
'}'
].join("\n");
var uni = function(){
var canvas = document.getElementById("game-surface");
var gl = canvas.getContext("webgl",{antialias: true});
console.log("This is working");
gl.clearColor(0.412,0.412,0.412,1);
gl.clear(gl.COLOR_BUFFER_BIT);
var vertextShader = gl.createShader(gl.VERTEX_SHADER);
var fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(vertextShader,vertexShaderText);
gl.shaderSource(fragmentShader,fragmentShaderText);
gl.compileShader(vertextShader);
gl.compileShader(fragmentShader);
if(!gl.getShaderParameter(vertextShader,gl.COMPILE_STATUS)){
console.error("Error with vertexshader",gl.getShaderInfoLog(vertextShader));
return;
}
if(!gl.getShaderParameter(fragmentShader,gl.COMPILE_STATUS)){
console.error("Error with fragmentShader",gl.getShaderInfoLog(fragmentShader));
return;
}
var program =gl.createProgram();
gl.attachShader(program,vertextShader);
gl.attachShader(program,fragmentShader);
gl.linkProgram(program);
gl.useProgram(program);
if(!gl.getProgramParameter(program,gl.LINK_STATUS)){
console.error("Error linking program",gl.getProgramInfoLog(program));
return;
}
gl.validateProgram(program);
if(!gl.getProgramParameter(program,gl.VALIDATE_STATUS)){
console.error("Error validating",gl.getProgramInfoLog(program));
}
var circle = {x: 0, y:0, r: 500};
var ATTRIBUTES = 2;
var numFans = 64;
var degreePerFan = (2* Math.PI) / numFans;
var vertexData = [circle.x, circle.y];
// console.log(gl_Position)
for(var i = 0; i <= numFans; i++) {
var index = ATTRIBUTES * i + 2; // there is already 2 items in array
var angle = degreePerFan * (i+0.1);
//console.log(angle)
vertexData[index] = circle.x + Math.cos(angle) * circle.r;
vertexData[index + 1] = circle.y + Math.sin(angle) * circle.r;
}
//console.log(vertexData);
var vertexDataTyped = new Float32Array(vertexData);
var buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, vertexDataTyped, gl.STATIC_DRAW);
var resolutionLocation = gl.getUniformLocation(program, "u_resolution");
gl.uniform2f(resolutionLocation, canvas.width, canvas.height);
gl.enableVertexAttribArray(positionLocation);
var positionLocation = gl.getAttribLocation(program, "a_position");
gl.vertexAttribPointer(positionLocation, 2, gl.FLOAT, false, ATTRIBUTES * Float32Array.BYTES_PER_ELEMENT, 0);
gl.drawArrays(gl.TRIANGLE_FAN, 0, vertexData.length/ATTRIBUTES);
};
uni();
</script>

You're using a triangle fan to draw this circle, so drawing an additional shape requires a second draw call. This isn't going to scale well, since draw calls are expensive, more likely you're going to want some method to draw multiple shapes in a single draw call.
That said, as a simple example, you can add the following code to the bottom of your uni function, after the end of the first draw call at the end of the existing function, to place a second, smaller circle on the circumference of the first one using a second draw call. Given your fragment shader, this will also be a red circle, so you may want to modify the shader to use a different color.
// Insert this code at the end of the uni() function, it will make
// use of variables and GL state already declared earlier in that function.
// Pick a point along circumference, range 1 to 63
var selectedPointIndex = 8;
circle.x = vertexData[selectedPointIndex * 2];
circle.y = vertexData[selectedPointIndex * 2 + 1];
circle.r = 50;
vertexData = [circle.x, circle.y];
for(var i = 0; i <= numFans; i++) {
var index = ATTRIBUTES * i + 2; // there is already 2 items in array
var angle = degreePerFan * (i+0.1);
vertexData[index] = circle.x + Math.cos(angle) * circle.r;
vertexData[index + 1] = circle.y + Math.sin(angle) * circle.r;
}
vertexDataTyped = new Float32Array(vertexData);
buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, vertexDataTyped, gl.STATIC_DRAW);
gl.vertexAttribPointer(positionLocation, 2, gl.FLOAT, false, ATTRIBUTES * Float32Array.BYTES_PER_ELEMENT, 0);
gl.drawArrays(gl.TRIANGLE_FAN, 0, vertexData.length/ATTRIBUTES);

Related

How to texture map a sphere in WebGL

I'd like to be able to wrap an image of the Earth around my sphere. Using any stock image off of Google.
I've been looking around and I found only answers with three.js, and I'm looking for the most simple way to achieve my goal without that. Below is all of the documentation of a sphere I'd like to texture. If you copy and paste it you should be able to see a regular sphere with some shading on it.
From my WebGL book,
.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Point lighted sphere</title>
</head>
<body onload="main()">
<canvas id="webgl" width="400" height="400">
Please use a browser that supports "canvas"
</canvas>
<script src="../lib/webgl-utils.js"></script>
<script src="../lib/webgl-debug.js"></script>
<script src="../lib/cuon-utils.js"></script>
<script src="../lib/cuon-matrix.js"></script>
<script src="PointLightedSphere.js"></script>
</body>
</html>
.js
var VSHADER_SOURCE =
'attribute vec4 a_Position;\n' +
// 'attribute vec4 a_Color;\n' + // Defined constant in main()
'attribute vec4 a_Normal;\n' +
'uniform mat4 u_MvpMatrix;\n' +
'uniform mat4 u_ModelMatrix;\n' + // Model matrix
'uniform mat4 u_NormalMatrix;\n' + // Transformation matrix of the normal
'uniform vec3 u_LightColor;\n' + // Light color
'uniform vec3 u_LightPosition;\n' + // Position of the light source
'uniform vec3 u_AmbientLight;\n' + // Ambient light color
'varying vec4 v_Color;\n' +
'void main() {\n' +
' vec4 color = vec4(1.0, 1.0, 1.0, 1.0);\n' + // Sphere color
' gl_Position = u_MvpMatrix * a_Position;\n' +
// Calculate a normal to be fit with a model matrix, and make it 1.0 in length
' vec3 normal = normalize(vec3(u_NormalMatrix * a_Normal));\n' +
// Calculate world coordinate of vertex
' vec4 vertexPosition = u_ModelMatrix * a_Position;\n' +
// Calculate the light direction and make it 1.0 in length
' vec3 lightDirection = normalize(u_LightPosition - vec3(vertexPosition));\n' +
// The dot product of the light direction and the normal
' float nDotL = max(dot(lightDirection, normal), 0.0);\n' +
// Calculate the color due to diffuse reflection
' vec3 diffuse = u_LightColor * color.rgb * nDotL;\n' +
// Calculate the color due to ambient reflection
' vec3 ambient = u_AmbientLight * color.rgb;\n' +
// Add the surface colors due to diffuse reflection and ambient reflection
' v_Color = vec4(diffuse + ambient, color.a);\n' +
'}\n';
// Fragment shader program
var FSHADER_SOURCE =
'#ifdef GL_ES\n' +
'precision mediump float;\n' +
'#endif\n' +
'varying vec4 v_Color;\n' +
'void main() {\n' +
' gl_FragColor = v_Color;\n' +
'}\n';
function main() {
// Retrieve <canvas> element
var canvas = document.getElementById('webgl');
// Get the rendering context for WebGL
var gl = getWebGLContext(canvas);
if (!gl) {
console.log('Failed to get the rendering context for WebGL');
return;
}
// Initialize shaders
if (!initShaders(gl, VSHADER_SOURCE, FSHADER_SOURCE)) {
console.log('Failed to intialize shaders.');
return;
}
// Set the vertex coordinates, the color and the normal
var n = initVertexBuffers(gl);
if (n < 0) {
console.log('Failed to set the vertex information');
return;
}
// Set the clear color and enable the depth test
gl.clearColor(0, 0, 0, 1);
gl.enable(gl.DEPTH_TEST);
// Get the storage locations of uniform variables and so on
var u_ModelMatrix = gl.getUniformLocation(gl.program, 'u_ModelMatrix');
var u_MvpMatrix = gl.getUniformLocation(gl.program, 'u_MvpMatrix');
var u_NormalMatrix = gl.getUniformLocation(gl.program, 'u_NormalMatrix');
var u_LightColor = gl.getUniformLocation(gl.program, 'u_LightColor');
var u_LightPosition = gl.getUniformLocation(gl.program, 'u_LightPosition');
var u_AmbientLight = gl.getUniformLocation(gl.program, 'u_AmbientLight');
if (!u_MvpMatrix || !u_NormalMatrix || !u_LightColor || !u_LightPosition || !u_AmbientLight) {
console.log('Failed to get the storage location');
return;
}
// Set the light color (white)
gl.uniform3f(u_LightColor, 0.8, 0.8, 0.8);
// Set the light direction (in the world coordinate)
gl.uniform3f(u_LightPosition, 5.0, 8.0, 7.0);
// Set the ambient light
gl.uniform3f(u_AmbientLight, 0.2, 0.2, 0.2);
var modelMatrix = new Matrix4(); // Model matrix
var mvpMatrix = new Matrix4();   // Model view projection matrix
var normalMatrix = new Matrix4(); // Transformation matrix for normals
// Pass the model matrix to u_ModelMatrix
gl.uniformMatrix4fv(u_ModelMatrix, false, modelMatrix.elements);
// Calculate the view projection matrix
mvpMatrix.setPerspective(30, canvas.width/canvas.height, 1, 100);
mvpMatrix.lookAt(0, 0, 6, 0, 0, 0, 0, 1, 0);
mvpMatrix.multiply(modelMatrix);
// Pass the model view projection matrix to u_MvpMatrix
gl.uniformMatrix4fv(u_MvpMatrix, false, mvpMatrix.elements);
// Calculate the matrix to transform the normal based on the model matrix
normalMatrix.setInverseOf(modelMatrix);
normalMatrix.transpose();
// Pass the transformation matrix for normals to u_NormalMatrix
gl.uniformMatrix4fv(u_NormalMatrix, false, normalMatrix.elements);
// Clear color and depth buffer
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Draw the cube(Note that the 3rd argument is the gl.UNSIGNED_SHORT)
gl.drawElements(gl.TRIANGLES, n, gl.UNSIGNED_SHORT, 0);
}
function initVertexBuffers(gl) { // Create a sphere
var SPHERE_DIV = 13;
var i, ai, si, ci;
var j, aj, sj, cj;
var p1, p2;
var positions = [];
var indices = [];
// Generate coordinates
for (j = 0; j <= SPHERE_DIV; j++) {
aj = j * Math.PI / SPHERE_DIV;
sj = Math.sin(aj);
cj = Math.cos(aj);
for (i = 0; i <= SPHERE_DIV; i++) {
ai = i * 2 * Math.PI / SPHERE_DIV;
si = Math.sin(ai);
ci = Math.cos(ai);
positions.push(si * sj); // X
positions.push(cj); // Y
positions.push(ci * sj); // Z
}
}
// Generate indices
for (j = 0; j < SPHERE_DIV; j++) {
for (i = 0; i < SPHERE_DIV; i++) {
p1 = j * (SPHERE_DIV+1) + i;
p2 = p1 + (SPHERE_DIV+1);
indices.push(p1);
indices.push(p2);
indices.push(p1 + 1);
indices.push(p1 + 1);
indices.push(p2);
indices.push(p2 + 1);
}
}
// Write the vertex property to buffers (coordinates and normals)
// Same data can be used for vertex and normal
// In order to make it intelligible, another buffer is prepared separately
if (!initArrayBuffer(gl, 'a_Position', new Float32Array(positions), gl.FLOAT, 3)) return -1;
if (!initArrayBuffer(gl, 'a_Normal', new Float32Array(positions), gl.FLOAT, 3)) return -1;
// Unbind the buffer object
gl.bindBuffer(gl.ARRAY_BUFFER, null);
// Write the indices to the buffer object
var indexBuffer = gl.createBuffer();
if (!indexBuffer) {
console.log('Failed to create the buffer object');
return -1;
}
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indices), gl.STATIC_DRAW);
return indices.length;
}
function initArrayBuffer(gl, attribute, data, type, num) {
// Create a buffer object
var buffer = gl.createBuffer();
if (!buffer) {
console.log('Failed to create the buffer object');
return false;
}
// Write date into the buffer object
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, data, gl.STATIC_DRAW);
// Assign the buffer object to the attribute variable
var a_attribute = gl.getAttribLocation(gl.program, attribute);
if (a_attribute < 0) {
console.log('Failed to get the storage location of ' + attribute);
return false;
}
gl.vertexAttribPointer(a_attribute, num, type, false, 0, 0);
// Enable the assignment of the buffer object to the attribute variable
gl.enableVertexAttribArray(a_attribute);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
return true;
}

Get 3D coordinates of a mouse click in WebGL

Since there is suprisingly almost no information on webGL (or I just don't know how to search for it), I have a question about how to transform a mouse coordinates to 3D coordinates, so to see where exactly on the screen I am clicking.
So my case is that I have a very simple skybox, the camera is positioned at [0, 0, 0] and I can look around it by clicking and dragging. What I want to do is be able to click somewhere on that skybox and know where I have clicked as I need to put an annotation (some text, or html element) on that position. And that html element must move and go out of view with me turning to another side. So what I need is a way to get a mouse click and find out which side of the cube I am clicking on and at what coordinates, so I can place the annotations correctly.
I am using a plain WebGL, I don't use THREE.js or anything like that. Since its just one cube, I can only assume finding the intersection won't be that hard and won't require extra libraries.
Well you're certainly right that it's hard to find an example 😭
A common webgl shader projects in 3D using code like either
gl_Position = matrix * position;
or
gl_Position = projection * modelView * position;
or
gl_Position = projection * view * world * position;
which are all the same thing basically. They take position and multiply it by a matrix to convert to clip space. You need to do the opposite to go the other way, take a position in clip space and covert back to position space which is
inverse (projection * view * world) * clipSpacePosition
So, take your 3D library and compute the inverse of the matrix you're passing to WebGL. For exmaple here is some code that is computing matrices to draw something using twgl's math library
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 10;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [1, 4, -6];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projection, view);
const world = m4.rotationY(time);
For a shader that is effectively doing this
gl_Position = viewProjection * world * position
So we need the inverse
const invMat = m4.inverse(m4.multiply(viewProjection, world));
Then we need a clip space ray. We're going from 2D to 3D so we'll make a ray that cuts through the frustum starting at zNear and ending at zFar by using -1 and +1 as our Z value
canvas.addEventListener('mousemove', (e) => {
const rect = canvas.getBoundingClientRect();
const x = e.clientX - rect.left;
const y = e.clientY - rect.top;
const clipX = x / rect.width * 2 - 1;
const clipY = y / rect.height * -2 + 1;
const start = m4.transformPoint(invMat, [clipX, clipY, -1]);
const end = m4.transformPoint(invMat, [clipX, clipY, 1]);
... do something with start/end
});
start and end are now relative to position (the data in your geometry) so you now have to use some ray to triangle code in JavaScript to walk through all your triangles and see if the ray from start to end intersecs one or more of your triangles.
Note if all you want is a ray in world space, not position space then you'd use
const invMat = m4.inverse(viewProjection);
"use strict";
const vs = `
uniform mat4 u_world;
uniform mat4 u_viewProjection;
attribute vec4 position;
attribute vec2 texcoord;
attribute vec4 color;
varying vec4 v_position;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
v_texcoord = texcoord;
v_color = color;
gl_Position = u_viewProjection * u_world * position;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord) * v_color;
}
`;
const m4 = twgl.m4;
const gl = document.querySelector("#c").getContext("webgl");
// compiles shaders, links, looks up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const cubeArrays = twgl.primitives.createCubeVertices(1);
cubeArrays.color = {value: [0.2, 0.3, 1, 1]};
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData
// for each array
const cubeBufferInfo = twgl.createBufferInfoFromArrays(gl, cubeArrays);
const numLines = 50;
const positions = new Float32Array(numLines * 3 * 2);
const colors = new Float32Array(numLines * 4 * 2);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData
// for each array
const linesBufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: positions,
color: colors,
texcoord: { value: [0, 0], },
});
const tex = twgl.createTexture(gl, {
minMag: gl.NEAREST,
format: gl.LUMINANCE,
src: [
255, 192,
192, 255,
],
});
let clipX = 0;
let clipY = 0;
let lineNdx = 0;
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 1;
const zFar = 10;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [Math.cos(time), Math.sin(time), 6];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projection, view);
const world = m4.rotateX(m4.rotationY(1), 1);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, cubeBufferInfo);
twgl.setUniformsAndBindTextures(programInfo, {
tex,
u_world: world,
u_viewProjection: viewProjection,
color: [0.2, 0.3, 1, 1],
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, cubeBufferInfo);
// add a line in world space
const invMat = m4.inverse(viewProjection);
const start = m4.transformPoint(invMat, [clipX, clipY, -1]);
const end = m4.transformPoint(invMat, [clipX, clipY, 1]);
const poffset = lineNdx * 3 * 2;
const coffset = lineNdx * 4 * 2;
const color = [Math.random(), Math.random(), Math.random(), 1];
positions.set(start, poffset);
positions.set(end, poffset + 3);
colors.set(color, coffset);
colors.set(color, coffset + 4);
gl.bindBuffer(gl.ARRAY_BUFFER, linesBufferInfo.attribs.position.buffer);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, positions);
gl.bindBuffer(gl.ARRAY_BUFFER, linesBufferInfo.attribs.color.buffer);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, colors);
lineNdx = (lineNdx + 1) % numLines;
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, linesBufferInfo);
twgl.setUniformsAndBindTextures(programInfo, {
tex,
u_world: m4.identity(),
u_viewProjection: viewProjection,
color: [1, 0, 0, 1],
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, linesBufferInfo, gl.LINES);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
gl.canvas.addEventListener('mousemove', (e) => {
const canvas = gl.canvas;
const rect = canvas.getBoundingClientRect();
const x = e.clientX - rect.left;
const y = e.clientY - rect.top;
clipX = x / rect.width * 2 - 1;
clipY = y / rect.height * -2 + 1;
});
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas id="c"></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
As for WebGL info there is some here

WebGL Resetting vertex positions

I am creating a simple webgl program that puts 3 random vertices on the canvas and connects them into a triangle. I tried to add translation to move the triangle to the right (increase the X value of each vertex), but of course if it goes forever, the triangle will go out of the canvas. Does anyone know how to detect if the vertex has an x value above 1 and if yes, reset the position of the given vertex because my solution doesnt seem to do anything, its like it doesnt even trigger
var canvas = document.getElementById("canvas");
var gl = canvas.getContext("webgl");
gl.clearColor(0.1, 0.2, 0.2, 1.0);
gl.clear(gl.DEPTH_BUFFER_BIT | gl.COLOR_BUFFER_BIT);
var indices = [0, 0, 0, 0, 0, 0];
for (var points = 0; points < 6; points++) {
indices[points] = (Math.random() * 2) - 1;
//indices[points + 1] = Math.random() < 0.5 ? -1 : 1;
}
var buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(indices),
gl.STATIC_DRAW);
var vert = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vert, `
precision mediump float;
attribute vec2 position;
uniform vec2 translation;
void main(){
gl_Position = vec4(position + translation, 0.0, 1.0);
}
`);
gl.compileShader(vert);
var success1 = gl.getShaderParameter(vert, gl.COMPILE_STATUS);
if (!success1) {
// Something went wrong during compilation; get the error
throw gl.getShaderInfoLog(vert);
}
var frag = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(frag, `
precision mediump float;
void main(){
gl_FragColor = vec4(0.3, 0.6, 0.4, 1.0);
}
`);
gl.compileShader(frag);
var success2 = gl.getShaderParameter(frag, gl.COMPILE_STATUS);
if (!success2) {
// Something went wrong during compilation; get the error
throw gl.getShaderInfoLog(frag);
}
var program = gl.createProgram();
gl.attachShader(program, vert);
gl.attachShader(program, frag);
gl.linkProgram(program);
var vertLoc = gl.getAttribLocation(program, "position");
gl.vertexAttribPointer(vertLoc, 2, gl.FLOAT, gl.FALSE, 0, 0);
gl.enableVertexAttribArray(vertLoc);
gl.useProgram(program);
var trans = gl.getUniformLocation(program, "translation");
var translation = [0.0, 0.0];
gl.uniform2fv(trans, translation);
gl.drawArrays(gl.TRIANGLES, 0, 3);
function loop() {
gl.clearColor(0.1, 0.2, 0.2, 1.0);
gl.clear(gl.DEPTH_BUFFER_BIT | gl.COLOR_BUFFER_BIT);
translation[0] += 0.01;
gl.uniform2fv(trans, translation);
gl.drawArrays(gl.TRIANGLES, 0, 3);
for (var points = 0; points < 6; points++) {
if (indices[points] % 2 == 0) {
if (indices[points] + translation[0] > 1) {
indices[points] = (Math.random() * 2) - 1;
}
}
//indices[points + 1] = Math.random() < 0.5 ? -1 : 1;
}
requestAnimationFrame(loop);
}
loop();
<canvas id="canvas"></canvas>
To achieve this, consider making the following changes to your code:
remove placement of vertices by translation in your vertex shader to give you "per-vertex" control over placement of the geometry (the translation effectivly means "object-level" placement which isn't what your want here)
when you iterate over the points in your loop(), you checking modulo over the vertex coordinate. You should be performing that check on the iteration index like this: if (points % 2 == 0)
now that the translation concept is gone, update the position of the vertex coordinate, rather the translation array after the modulo check: indices[points] += 0.01;
finally, seeing you're updating the indices vertex data, you'll need to update the webgl buf to ensure your changes are reflected when the next frame is rendered:
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(indices), gl.STATIC_DRAW);
Here's the updated script in full:
var canvas = document.getElementById("canvas");
var gl = canvas.getContext("webgl");
gl.clearColor(0.1, 0.2, 0.2, 1.0);
gl.clear(gl.DEPTH_BUFFER_BIT | gl.COLOR_BUFFER_BIT);
var indices = [0, 0, 0, 0, 0, 0];
for (var points = 0; points < 6; points++) {
indices[points] = (Math.random() * 2) - 1;
}
var buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(indices),
gl.STATIC_DRAW);
var vert = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vert, `
precision mediump float;
attribute vec2 position;
void main(){
gl_Position = vec4(position, 0.0, 1.0);
}
`);
gl.compileShader(vert);
var success1 = gl.getShaderParameter(vert, gl.COMPILE_STATUS);
if (!success1) {
throw gl.getShaderInfoLog(vert);
}
var frag = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(frag, `
precision mediump float;
void main(){
gl_FragColor = vec4(0.3, 0.6, 0.4, 1.0);
}
`);
gl.compileShader(frag);
var success2 = gl.getShaderParameter(frag, gl.COMPILE_STATUS);
if (!success2) {
throw gl.getShaderInfoLog(frag);
}
var program = gl.createProgram();
gl.attachShader(program, vert);
gl.attachShader(program, frag);
gl.linkProgram(program);
var vertLoc = gl.getAttribLocation(program, "position");
gl.vertexAttribPointer(vertLoc, 2, gl.FLOAT, gl.FALSE, 0, 0);
gl.enableVertexAttribArray(vertLoc);
gl.useProgram(program);
gl.drawArrays(gl.TRIANGLES, 0, 3);
function loop() {
gl.clearColor(0.1, 0.2, 0.2, 1.0);
gl.clear(gl.DEPTH_BUFFER_BIT | gl.COLOR_BUFFER_BIT);
gl.drawArrays(gl.TRIANGLES, 0, 3);
// Update the vertex data, causing the vertex x coordinate to increase per-frame
for (var points = 0; points < 6; points++) {
// Only process x coordinate
if (points % 2 == 0) {
// Increase x coordinate per-frame
indices[points] += 0.01;
// If x position > 1 reset it to a new random value
if (indices[points] > 1) {
indices[points] = (Math.random() * 2) - 1;
}
}
}
// Update webgl vertex buffer so that updated indices data is rendered
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(indices), gl.STATIC_DRAW);
requestAnimationFrame(loop);
}
loop();
<canvas id="canvas"><canvas>

On the browser, how to plot 100k series with 64-128 points each?

I want to graph about 120k series, each of them having 64 points (down sampled, 128-512 points if using the real sampling rate, which is even larger)
I have attempted to do it with dygraph but it seems to be very slow if i use more than 1000 series.
I have attempted to use vanilla WebGL, it drew really fast, but than my problem was getting the mouse's click and than deciding which series it was - any strategies on this? (I believe its called unprojecting?) - since there are 100k+ series, using a different color for each series and than using the click coordinate's pixel's color to determine the series is impractical. Any other strategies?
My current design draws the graph as a large PNG atlas containing all the graphs, this is fast to load, but on changes on the data i have to redraw the PNG on the server and than show it again, also "unprojecting" is an issue here - any ideas on how to solve it? if possible?
The data is already quite down sampled, further down sampling will probably result in loss of details i would like to show the end-user.
Drawing 120k * 64 things means every single pixel of a 2700x2700 could be covered. In other words you're probably trying to display too much data? It's also a very large number of things and likely to be slow.
In any case drawing and picking via WebGL is relatively easy. You draw your scene using whatever techniques you want. Then, separately, when the user clicks the mouse (or always under the mouse) you draw the entire scene again to an offscreen framebuffer giving every selectable thing a different color. Given there are 32bits of color by default (8bits red, 8bits green, 8bits blue, 8bits alpha) can count 2^32-1 things. Of course with other buffer formats you could count even higher or draw to multiple buffers but storing the data for 2^32 things is probably the larger limit.
In any case here's an example. This one makes 1000 cubes (just used cubes because this sample already existed). You can consider each cube one of of your "series" with 8 points although the code is actually drawing 24 points per cube. (set primType = gl.TRIANGLES) to see the cubes. It put all the cubes in the same buffer so that a single draw call draws all the cubes. This makes it much faster than if we draw each cube with a separate draw call.
The important part is making a series ID per series. In the code below all points of one cube have the same ID.
The code draws the scene twice. Once with each cube's color, again with each cube's ID into an offscreen texture (as a framebuffer attachment). To know which cube is under the mouse we look up the pixel under the mouse, convert its color back into an ID and update that cube's vertex colors to highlight it.
const gl = document.querySelector('canvas').getContext("webgl");
const m4 = twgl.m4;
const v3 = twgl.v3;
// const primType = gl.TRIANGLES;
const primType = gl.POINTS;
const renderVS = `
attribute vec4 position;
attribute vec4 color;
uniform mat4 u_projection;
uniform mat4 u_modelView;
varying vec4 v_color;
void main() {
gl_PointSize = 10.0;
gl_Position = u_projection * u_modelView * position;
v_color = color;
}
`;
const renderFS = `
precision mediump float;
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
`;
const idVS = `
attribute vec4 position;
attribute vec4 id;
uniform mat4 u_projection;
uniform mat4 u_modelView;
varying vec4 v_id;
void main() {
gl_PointSize = 10.0;
gl_Position = u_projection * u_modelView * position;
v_id = id; // pass the id to the fragment shader
}
`;
const idFS = `
precision mediump float;
varying vec4 v_id;
void main() {
gl_FragColor = v_id;
}
`;
// creates shaders, programs, looks up attribute and uniform locations
const renderProgramInfo = twgl.createProgramInfo(gl, [renderVS, renderFS]);
const idProgramInfo = twgl.createProgramInfo(gl, [idVS, idFS]);
// create one set of geometry with a bunch of cubes
// for each cube give it random color (so every vertex
// that cube will have the same color) and give it an id (so
// every vertex for that cube will have the same id)
const numCubes = 1000;
const positions = [];
const normals = [];
const colors = [];
const timeStamps = [];
const ids = [];
// Save the color of each cube so we can restore it after highlighting
const cubeColors = [];
const radius = 25;
// adapted from http://stackoverflow.com/a/26127012/128511
// used to space the cubes around the sphere
function fibonacciSphere(samples, i) {
const rnd = 1.;
const offset = 2. / samples;
const increment = Math.PI * (3. - Math.sqrt(5.));
// for i in range(samples):
const y = ((i * offset) - 1.) + (offset / 2.);
const r = Math.sqrt(1. - Math.pow(y ,2.));
const phi = ((i + rnd) % samples) * increment;
const x = Math.cos(phi) * r;
const z = Math.sin(phi) * r;
return [x, y, z];
}
const addCubeVertexData = (function() {
const CUBE_FACE_INDICES = [
[3, 7, 5, 1], // right
[6, 2, 0, 4], // left
[6, 7, 3, 2], // ??
[0, 1, 5, 4], // ??
[7, 6, 4, 5], // front
[2, 3, 1, 0], // back
];
const cornerVertices = [
[-1, -1, -1],
[+1, -1, -1],
[-1, +1, -1],
[+1, +1, -1],
[-1, -1, +1],
[+1, -1, +1],
[-1, +1, +1],
[+1, +1, +1],
];
const faceNormals = [
[+1, +0, +0],
[-1, +0, +0],
[+0, +1, +0],
[+0, -1, +0],
[+0, +0, +1],
[+0, +0, -1],
];
const quadIndices = [0, 1, 2, 0, 2, 3];
return function addCubeVertexData(id, matrix, color) {
for (let f = 0; f < 6; ++f) {
const faceIndices = CUBE_FACE_INDICES[f];
for (let v = 0; v < 6; ++v) {
const ndx = faceIndices[quadIndices[v]];
const position = cornerVertices[ndx];
const normal = faceNormals[f];
positions.push(...m4.transformPoint(matrix, position));
normals.push(...m4.transformDirection(matrix, normal));
colors.push(color);
ids.push(id);
timeStamps.push(-1000);
}
}
};
}());
for (let i = 0; i < numCubes; ++i) {
const direction = fibonacciSphere(numCubes, i);
const cubePosition = v3.mulScalar(direction, radius);
const target = [0, 0, 0];
const up = [0, 1, 0];
const matrix = m4.lookAt(cubePosition, target, up);
const color = (Math.random() * 0xFFFFFF | 0) + 0xFF000000;
cubeColors.push(color);
addCubeVertexData(i + 1, matrix, color);
}
const colorData = new Uint32Array(colors);
const cubeColorsAsUint32 = new Uint32Array(cubeColors);
const timeStampData = new Float32Array(timeStamps);
// pass color as Uint32. Example 0x0000FFFF; // blue with alpha 0
function setCubeColor(id, color) {
// we know each cube uses 36 vertices. If each model was different
// we need to save the offset and number of vertices for each model
const numVertices = 36;
const offset = (id - 1) * numVertices;
colorData.fill(color, offset, offset + numVertices);
}
function setCubeTimestamp(id, timeStamp) {
const numVertices = 36;
const offset = (id - 1) * numVertices;
timeStampData.fill(timeStamp, offset, offset + numVertices);
}
// calls gl.createBuffer, gl.bufferData
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: positions,
normal: normals,
color: new Uint8Array(colorData.buffer),
// the colors are stored as 32bit unsigned ints
// but we want them as 4 channel 8bit RGBA values
id: {
numComponents: 4,
data: new Uint8Array((new Uint32Array(ids)).buffer),
},
timeStamp: {
numComponents: 1,
data: timeStampData,
},
});
const lightDir = v3.normalize([3, 5, 10]);
// creates an RGBA/UNSIGNED_BYTE texture
// and a depth renderbuffer and attaches them
// to a framebuffer.
const fbi = twgl.createFramebufferInfo(gl);
// current mouse position in canvas relative coords
let mousePos = {x: 0, y: 0};
let lastHighlightedCubeId = 0;
let highlightedCubeId = 0;
let frameCount = 0;
function getIdAtPixel(x, y, projection, view, time) {
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, fbi);
// no reason to render 100000s of pixels when
// we're only going to read one
gl.enable(gl.SCISSOR_TEST);
gl.scissor(x, y, 1, 1);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.enable(gl.DEPTH_TEST);
drawCubes(idProgramInfo, projection, view, time);
gl.disable(gl.SCISSOR_TEST);
const idPixel = new Uint8Array(4);
gl.readPixels(x, y, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, idPixel);
// convert from RGBA back into ID.
const id = (idPixel[0] << 0) +
(idPixel[1] << 8) +
(idPixel[2] << 16) +
(idPixel[3] << 24);
return id;
}
function drawCubes(programInfo, projection, modelView, time) {
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.uniformXXX
twgl.setUniforms(programInfo, {
u_projection: projection,
u_modelView: modelView, // drawing at origin so model is identity
});
gl.drawArrays(primType, 0, bufferInfo.numElements);
}
function render(time) {
time *= 0.001;
++frameCount;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// resizes the texture and depth renderbuffer to
// match the new size of the canvas.
twgl.resizeFramebufferInfo(gl, fbi);
}
const fov = Math.PI * .35;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.1;
const zFar = 1000;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const radius = 45;
const angle = time * .2;
const eye = [
Math.cos(angle) * radius,
0,
Math.sin(angle) * radius,
];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
if (lastHighlightedCubeId > 0) {
// restore the last highlighted cube's color
setCubeColor(
lastHighlightedCubeId,
cubeColorsAsUint32[lastHighlightedCubeId]);
lastHighlightedCubeId = -1;
}
{
const x = mousePos.x;
const y = gl.canvas.height - mousePos.y - 1;
highlightedCubeId = getIdAtPixel(x, y, projection, view, time);
}
if (highlightedCubeId > 0) {
const color = (frameCount & 0x2) ? 0xFF0000FF : 0xFFFFFFFF;
setCubeColor(highlightedCubeId, color);
setCubeTimestamp(highlightedCubeId, time);
lastHighlightedCubeId = highlightedCubeId;
}
highlightedCubeId = Math.random() * numCubes | 0;
// NOTE: We could use `gl.bufferSubData` and just upload
// the portion that changed.
// upload cube color data.
gl.bindBuffer(gl.ARRAY_BUFFER, bufferInfo.attribs.color.buffer);
gl.bufferData(gl.ARRAY_BUFFER, colorData, gl.DYNAMIC_DRAW);
// upload the timestamp
gl.bindBuffer(gl.ARRAY_BUFFER, bufferInfo.attribs.timeStamp.buffer);
gl.bufferData(gl.ARRAY_BUFFER, timeStampData, gl.DYNAMIC_DRAW);
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, null);
gl.enable(gl.DEPTH_TEST);
drawCubes(renderProgramInfo, projection, view, time);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
function getRelativeMousePosition(event, target) {
target = target || event.target;
const rect = target.getBoundingClientRect();
return {
x: event.clientX - rect.left,
y: event.clientY - rect.top,
}
}
// assumes target or event.target is canvas
function getNoPaddingNoBorderCanvasRelativeMousePosition(event, target) {
target = target || event.target;
const pos = getRelativeMousePosition(event, target);
pos.x = pos.x * target.width / target.clientWidth;
pos.y = pos.y * target.height / target.clientHeight;
return pos;
}
gl.canvas.addEventListener('mousemove', (event, target) => {
mousePos = getRelativeMousePosition(event, target);
});
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
The code above uses an offscreen framebuffer the same size as the canvas but it uses the scissor test to only draw a single pixel (the one under the mouse). It would still run without the scissor test it would just be slower.
We could also make it work using just a single pixel offscreen framebuffer and using projection math so things work out.
const gl = document.querySelector('canvas').getContext("webgl");
const m4 = twgl.m4;
const v3 = twgl.v3;
// const primType = gl.TRIANGLES;
const primType = gl.POINTS;
const renderVS = `
attribute vec4 position;
attribute vec4 color;
uniform mat4 u_projection;
uniform mat4 u_modelView;
varying vec4 v_color;
void main() {
gl_PointSize = 10.0;
gl_Position = u_projection * u_modelView * position;
v_color = color;
}
`;
const renderFS = `
precision mediump float;
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
`;
const idVS = `
attribute vec4 position;
attribute vec4 id;
uniform mat4 u_projection;
uniform mat4 u_modelView;
varying vec4 v_id;
void main() {
gl_PointSize = 10.0;
gl_Position = u_projection * u_modelView * position;
v_id = id; // pass the id to the fragment shader
}
`;
const idFS = `
precision mediump float;
varying vec4 v_id;
void main() {
gl_FragColor = v_id;
}
`;
// creates shaders, programs, looks up attribute and uniform locations
const renderProgramInfo = twgl.createProgramInfo(gl, [renderVS, renderFS]);
const idProgramInfo = twgl.createProgramInfo(gl, [idVS, idFS]);
// create one set of geometry with a bunch of cubes
// for each cube give it random color (so every vertex
// that cube will have the same color) and give it an id (so
// every vertex for that cube will have the same id)
const numCubes = 1000;
const positions = [];
const normals = [];
const colors = [];
const timeStamps = [];
const ids = [];
// Save the color of each cube so we can restore it after highlighting
const cubeColors = [];
const radius = 25;
// adapted from http://stackoverflow.com/a/26127012/128511
// used to space the cubes around the sphere
function fibonacciSphere(samples, i) {
const rnd = 1.;
const offset = 2. / samples;
const increment = Math.PI * (3. - Math.sqrt(5.));
// for i in range(samples):
const y = ((i * offset) - 1.) + (offset / 2.);
const r = Math.sqrt(1. - Math.pow(y ,2.));
const phi = ((i + rnd) % samples) * increment;
const x = Math.cos(phi) * r;
const z = Math.sin(phi) * r;
return [x, y, z];
}
const addCubeVertexData = (function() {
const CUBE_FACE_INDICES = [
[3, 7, 5, 1], // right
[6, 2, 0, 4], // left
[6, 7, 3, 2], // ??
[0, 1, 5, 4], // ??
[7, 6, 4, 5], // front
[2, 3, 1, 0], // back
];
const cornerVertices = [
[-1, -1, -1],
[+1, -1, -1],
[-1, +1, -1],
[+1, +1, -1],
[-1, -1, +1],
[+1, -1, +1],
[-1, +1, +1],
[+1, +1, +1],
];
const faceNormals = [
[+1, +0, +0],
[-1, +0, +0],
[+0, +1, +0],
[+0, -1, +0],
[+0, +0, +1],
[+0, +0, -1],
];
const quadIndices = [0, 1, 2, 0, 2, 3];
return function addCubeVertexData(id, matrix, color) {
for (let f = 0; f < 6; ++f) {
const faceIndices = CUBE_FACE_INDICES[f];
for (let v = 0; v < 6; ++v) {
const ndx = faceIndices[quadIndices[v]];
const position = cornerVertices[ndx];
const normal = faceNormals[f];
positions.push(...m4.transformPoint(matrix, position));
normals.push(...m4.transformDirection(matrix, normal));
colors.push(color);
ids.push(id);
timeStamps.push(-1000);
}
}
};
}());
for (let i = 0; i < numCubes; ++i) {
const direction = fibonacciSphere(numCubes, i);
const cubePosition = v3.mulScalar(direction, radius);
const target = [0, 0, 0];
const up = [0, 1, 0];
const matrix = m4.lookAt(cubePosition, target, up);
const color = (Math.random() * 0xFFFFFF | 0) + 0xFF000000;
cubeColors.push(color);
addCubeVertexData(i + 1, matrix, color);
}
const colorData = new Uint32Array(colors);
const cubeColorsAsUint32 = new Uint32Array(cubeColors);
const timeStampData = new Float32Array(timeStamps);
// pass color as Uint32. Example 0x0000FFFF; // blue with alpha 0
function setCubeColor(id, color) {
// we know each cube uses 36 vertices. If each model was different
// we need to save the offset and number of vertices for each model
const numVertices = 36;
const offset = (id - 1) * numVertices;
colorData.fill(color, offset, offset + numVertices);
}
function setCubeTimestamp(id, timeStamp) {
const numVertices = 36;
const offset = (id - 1) * numVertices;
timeStampData.fill(timeStamp, offset, offset + numVertices);
}
// calls gl.createBuffer, gl.bufferData
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: positions,
normal: normals,
color: new Uint8Array(colorData.buffer),
// the colors are stored as 32bit unsigned ints
// but we want them as 4 channel 8bit RGBA values
id: {
numComponents: 4,
data: new Uint8Array((new Uint32Array(ids)).buffer),
},
timeStamp: {
numComponents: 1,
data: timeStampData,
},
});
const lightDir = v3.normalize([3, 5, 10]);
// creates an 1x1 pixel RGBA/UNSIGNED_BYTE texture
// and a depth renderbuffer and attaches them
// to a framebuffer.
const fbi = twgl.createFramebufferInfo(gl, [
{ format: gl.RGBA, type: gl.UNSIGNED_BYTE, minMag: gl.NEAREST, wrap: gl.CLAMP_TO_EDGE, },
{ format: gl.DEPTH_STENCIL, },
], 1, 1);
// current mouse position in canvas relative coords
let mousePos = {x: 0, y: 0};
let lastHighlightedCubeId = 0;
let highlightedCubeId = 0;
let frameCount = 0;
function getIdAtPixel(x, y, projectionInfo, view, time) {
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, fbi);
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.enable(gl.DEPTH_TEST);
drawCubes(idProgramInfo, projectionInfo, {
totalWidth: gl.canvas.width,
totalHeight: gl.canvas.height,
partWidth: 1,
partHeight: 1,
partX: x,
partY: y,
}, view, time);
const idPixel = new Uint8Array(4);
gl.readPixels(0, 0, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, idPixel);
// convert from RGBA back into ID.
const id = (idPixel[0] << 0) +
(idPixel[1] << 8) +
(idPixel[2] << 16) +
(idPixel[3] << 24);
return id;
}
function drawCubes(programInfo, projectionInfo, partInfo, modelView, time) {
const projection = projectionForPart(projectionInfo, partInfo);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.uniformXXX
twgl.setUniforms(programInfo, {
u_projection: projection,
u_modelView: modelView, // drawing at origin so model is identity
});
gl.drawArrays(primType, 0, bufferInfo.numElements);
}
function projectionForPart(projectionInfo, partInfo) {
const {fov, zNear, zFar} = projectionInfo;
const {
totalWidth,
totalHeight,
partX,
partY,
partWidth,
partHeight,
} = partInfo;
const aspect = totalWidth / totalHeight;
// corners at zNear for total image
const zNearTotalTop = Math.tan(fov) * 0.5 * zNear;
const zNearTotalBottom = -zNearTotalTop;
const zNearTotalLeft = zNearTotalBottom * aspect;
const zNearTotalRight = zNearTotalTop * aspect;
// width, height at zNear for total image
const zNearTotalWidth = zNearTotalRight - zNearTotalLeft;
const zNearTotalHeight = zNearTotalTop - zNearTotalBottom;
const zNearPartLeft = zNearTotalLeft + partX * zNearTotalWidth / totalWidth; const zNearPartRight = zNearTotalLeft + (partX + partWidth) * zNearTotalWidth / totalWidth;
const zNearPartBottom = zNearTotalBottom + partY * zNearTotalHeight / totalHeight;
const zNearPartTop = zNearTotalBottom + (partY + partHeight) * zNearTotalHeight / totalHeight;
return m4.frustum(zNearPartLeft, zNearPartRight, zNearPartBottom, zNearPartTop, zNear, zFar);
}
function render(time) {
time *= 0.001;
++frameCount;
twgl.resizeCanvasToDisplaySize(gl.canvas);
const projectionInfo = {
fov: Math.PI * .35,
zNear: 0.1,
zFar: 1000,
};
const radius = 45;
const angle = time * .2;
const eye = [
Math.cos(angle) * radius,
0,
Math.sin(angle) * radius,
];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
if (lastHighlightedCubeId > 0) {
// restore the last highlighted cube's color
setCubeColor(
lastHighlightedCubeId,
cubeColorsAsUint32[lastHighlightedCubeId]);
lastHighlightedCubeId = -1;
}
{
const x = mousePos.x;
const y = gl.canvas.height - mousePos.y - 1;
highlightedCubeId = getIdAtPixel(x, y, projectionInfo, view, time);
}
if (highlightedCubeId > 0) {
const color = (frameCount & 0x2) ? 0xFF0000FF : 0xFFFFFFFF;
setCubeColor(highlightedCubeId, color);
setCubeTimestamp(highlightedCubeId, time);
lastHighlightedCubeId = highlightedCubeId;
}
highlightedCubeId = Math.random() * numCubes | 0;
// NOTE: We could use `gl.bufferSubData` and just upload
// the portion that changed.
// upload cube color data.
gl.bindBuffer(gl.ARRAY_BUFFER, bufferInfo.attribs.color.buffer);
gl.bufferData(gl.ARRAY_BUFFER, colorData, gl.DYNAMIC_DRAW);
// upload the timestamp
gl.bindBuffer(gl.ARRAY_BUFFER, bufferInfo.attribs.timeStamp.buffer);
gl.bufferData(gl.ARRAY_BUFFER, timeStampData, gl.DYNAMIC_DRAW);
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, null);
gl.enable(gl.DEPTH_TEST);
drawCubes(renderProgramInfo, projectionInfo, {
totalWidth: gl.canvas.width,
totalHeight: gl.canvas.height,
partWidth: gl.canvas.width,
partHeight: gl.canvas.height,
partX: 0,
partY: 0,
}, view, time);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
function getRelativeMousePosition(event, target) {
target = target || event.target;
const rect = target.getBoundingClientRect();
return {
x: event.clientX - rect.left,
y: event.clientY - rect.top,
}
}
// assumes target or event.target is canvas
function getNoPaddingNoBorderCanvasRelativeMousePosition(event, target) {
target = target || event.target;
const pos = getRelativeMousePosition(event, target);
pos.x = pos.x * target.width / target.clientWidth;
pos.y = pos.y * target.height / target.clientHeight;
return pos;
}
gl.canvas.addEventListener('mousemove', (event, target) => {
mousePos = getRelativeMousePosition(event, target);
});
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
note that drawing POINTS in WebGL is generally slower than drawing 2 TRIANGLES of the same size. If I set the number of cubes to 100k and set primType to TRIANGLES it draws 100k cubes. On my integrated GPU the snippet window it runs at about 10-20fps. Of course with that many cubes it's impossible to pick one. If I set the radius to 250 I can at least see the picking is still working.

Three.js Multiple UV maps on a single object

I am trying to figure out how to use two different textures on the front and back of a box.
Whenever I scale my box (ExtrudeGeometry), the UV maps do not seem to update. Therefore I am defining my own UV maps for the front and back of the box.
To define the front UV map I use:
geometry.faceVertexUvs[0]
which works accordingly.
For the back UV map I use:
geometry.faceVertexUvs[1];
However I am not able to access this 'second' layer UV map.
So my question is:
Is it possible to update the UV maps accordingly to the scale of the object?
Is it possible to access a 'second' layer UV map within a material?
I created an example here: jsfiddle.
I created three different boxes, with 3 different scales. From left to right: 0.01 x 2.97 x 2.1, 0.01 x 1 x 1 and 0.01 x 0.297 x 0.21. On the most left box, the textures are only covering a small portion of the box. The middle box has correct texturing. The right box has the updated uv map (otherwise only a small portion of the texture would show up).
(I am required to use the small scale on the box!)
I hope someone can help me out!
Three.js 84
You can define attribute vec2 uv2 in your vertex shader, and then access it as normal.
precision highp int;
precision highp float;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
attribute vec2 uv;
attribute vec2 uv2;
attribute vec3 normal;
attribute vec3 position;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
As an answer to #danyim.
In the end I also avoided using shader codes. I chose to completely uv-unwrap my object and draw my own texture with WebGL (as I only needed a simple front and back texture).
In the function below I draw an object with a variable amount of subdivisions and unwrap these.
With the function drawTexture (further below) I provide an 'O' and a 'X' as textures for my front and rear of the object.
Hopefully this helps you (and possible others) out. If you have any further questions, do not hesitate to ask.
drawObject() {
//Draw a shape with the measures of a object.
var length = 0.01, width = 0.297;
var shape = new THREE.Shape();
shape.moveTo(0, 0);
shape.lineTo(0, width);
shape.lineTo(length, width);
shape.lineTo(length, 0);
shape.lineTo(0, 0);
var canvasTexture = drawTexture('x', 'o');
var textures = [
new THREE.MeshPhongMaterial({ color: 0xf9f9f9, side: THREE.BackSide, overdraw: 0.5 }),//GENERAL 0
new THREE.MeshPhongMaterial({ map: canvasTexture, side: THREE.BackSide, overdraw: 0.5 }), //FRONT 1
new THREE.MeshPhongMaterial({ map: canvasTexture, side: THREE.BackSide, overdraw: 0.5 }), //BACK 2
new THREE.MeshPhongMaterial({ color: 0x00ff00, side: THREE.BackSide, overdraw: 0.5 })
];
var material = new THREE.MultiMaterial(textures);
subDivs = parseInt(objectLength / 40); //Amount of subdivision (more provides a smoother result)
subDivs = 5;
var extrudeSettings = {
amount: 0.21,
steps: this.subDivs,
bevelEnabled: false
};
//Create a UVMap(u,v).
var uvMap = [];
for (let i = 0; i <= (subDivs) * 2; i++) {
var u = i / (subDivs * 2);
uvMap.push(new THREE.Vector2(u, 0));
uvMap.push(new THREE.Vector2(u, 1));
}
//Create the object.
var geometry = new THREE.ExtrudeGeometry(shape, extrudeSettings);
var tempI = (uvMap.length - 2) / 2;
//Map the vertices to the UVMap (only mapping top and bottom of the object (for 'x' and 'o' texture))
for (let i = 0; i < geometry.faceVertexUvs[0].length; i++) {
if (i >= 4 && i < 4 + (subDivs * 2)) {
if (isOdd(i)) {
geometry.faceVertexUvs[0][i] = [uvMap[i - 4], uvMap[i - 2], uvMap[i - 3]];
} else {
geometry.faceVertexUvs[0][i] = [uvMap[i - 4], uvMap[i - 3], uvMap[i - 2]];
}
}
else if (i >= 4 + (subDivs * 4) && i < 4 + (subDivs * 4) + (subDivs * 2)) {
if (isOdd(i)) {
geometry.faceVertexUvs[0][i] = [uvMap[tempI], uvMap[tempI + 2], uvMap[tempI + 1]];
} else {
geometry.faceVertexUvs[0][i] = [uvMap[tempI], uvMap[tempI + 1], uvMap[tempI + 2]];
}
tempI++;
}
}
//Assigning different materialIndices to different faces
for (var i = 4; i <= 13; i++) { //Front
geometry.faces[i].materialIndex = 1;
}
for (var i = 4 + (subDivs * 4); i < 4 + (subDivs * 4) + (subDivs * 2); i++) { //Back
geometry.faces[i].materialIndex = 2;
}
for (var i = 0; i <= 1; i++) {
geometry.faces[i].materialIndex = 3;
}
var plane = new THREE.Mesh(geometry, material);
return plane;
function drawTexture (msg1, msg2) {
var canvas = document.createElement('canvas'); //Create a canvas element.
var size = 128;
canvas.width = size;
canvas.height = size;
var ctx = canvas.getContext('2d');
//Draw a white background
ctx.beginPath();
ctx.rect(0, 0, size, size);
ctx.fillStyle = 'white';
ctx.fill();
//Draw the message (e.g. 'x' or 'o')
ctx.fillStyle = 'black';
ctx.font = '64px Arial';
//Determien the size of the letters.
var metrics1 = ctx.measureText(msg1);
var textWidth1 = metrics1.width;
var textHeight = parseInt(ctx.font);
var metrics2 = ctx.measureText(msg2);
var textWidth2 = metrics2.width;
ctx.fillText(msg1, size / 4 - textWidth1 / 2, size / 2 + (textHeight / 4));
ctx.fillText(msg2, (3 * size / 4) - textWidth2 / 2, size / 2 + (textHeight / 4));
//Store the canvas in a THREE.js texture.
var texture = new THREE.Texture(canvas);
texture.needsUpdate = true;
return texture; //Return Three.Texture

Categories