Resizing on-screen WebGL canvas in iOS Safari causes memory leak - javascript

I have an HTML canvas on-screen that renders using a WebGL context; memory will leak when I resize that canvas’ width and height properties in iOS Safari. This behavior does not happen on any desktop browser.
I made a barebones example using snippet below which reproduces the issue on iOS Safari
The example automatically resizes a WebGL canvas every 1 second using two different sets of dimensions.
Every frame it renders a quad – all I do is call a handful of GL routines to get the quad on the screen (I do not create any new GL objects explicitly after the initial setup).
When this sample runs on Safari iOS on an iPad Air 3 running iOS 14.2, Safari page memory usage steadily increases every resize.
After letting it run for a while, the browser process gets terminated after it reaches 1.25GB memory. It started at less than 300MB.
If I run this same example in a desktop browser, the memory usage is steady the entire time.
The issue appears to be limited to canvases that use WebGL contexts. Canvases using 2D rendering contexts appear unaffected.
Anyone else seeing this or know of a workaround?
Rendering to an offscreen WebGL canvas and using drawImage() to transport it to an on-screen 2d canvas is a solution, but I see lower performance in some browsers, including iOS Safari.
var _canvas;
var _gl;
var _buffer;
var _shaderProgram;
var _coordinatesVar;
var _widths = [768, 1024];
var _heights = [1024, 768];
var _dimIndex = 0;
var _resizeStartTime = undefined;
function _setupScene() {
_canvas = document.getElementsByTagName("canvas")[0];
_gl = _canvas.getContext("webgl") || _canvas.getContext("experimental-webgl");
if (!_gl)
throw new Error("Cannot create WebGL context!");
var vertices = [
-0.5, 0.5,
-0.5, -0.5,
0.5, -0.5,
0.5, -0.5,
-0.5, 0.5,
0.5, 0.5,
];
_buffer = _gl.createBuffer();
_gl.bindBuffer(_gl.ARRAY_BUFFER, _buffer);
_gl.bufferData(_gl.ARRAY_BUFFER, new Float32Array(vertices), _gl.STATIC_DRAW);
var vertCode =
'attribute vec2 coordinates;' +
'void main(void) {' +
' gl_Position = vec4(coordinates, 0.0, 1.0);' +
'}';
var vertShader = _gl.createShader(_gl.VERTEX_SHADER);
_gl.shaderSource(vertShader, vertCode);
_gl.compileShader(vertShader);
if (!_gl.getShaderParameter(vertShader, _gl.COMPILE_STATUS))
throw new Error(_gl.getShaderInfoLog(vertShader));
var fragCode =
'void main(void) {' +
' gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);' +
'}';
var fragShader = _gl.createShader(_gl.FRAGMENT_SHADER);
_gl.shaderSource(fragShader, fragCode);
_gl.compileShader(fragShader);
if (!_gl.getShaderParameter(fragShader, _gl.COMPILE_STATUS))
throw new Error(_gl.getShaderInfoLog(fragShader));
_shaderProgram = _gl.createProgram();
_gl.attachShader(_shaderProgram, vertShader);
_gl.attachShader(_shaderProgram, fragShader);
_gl.linkProgram(_shaderProgram);
if (!_gl.getProgramParameter(_shaderProgram, _gl.LINK_STATUS))
throw new Error(_gl.getProgramInfoLog(_shaderProgram));
_gl.useProgram(_shaderProgram);
_coordinatesVar = _gl.getAttribLocation(_shaderProgram, "coordinates");
_gl.enableVertexAttribArray(_coordinatesVar);
_gl.bindBuffer(_gl.ARRAY_BUFFER, _buffer);
_gl.vertexAttribPointer(_coordinatesVar, 2, _gl.FLOAT, false, 0, 0);
/* initial auto-resize */
_canvas.width = _widths[_dimIndex];
_canvas.height = _heights[_dimIndex];
_dimIndex = _dimIndex > 0 ? 0 : 1;
}
function _drawScene() {
_gl.drawArrays(_gl.TRIANGLES, 0, 6);
}
function _render(timestamp) {
/* The following makes the canvas resizing based on browser window (fullscreen):
_canvas.width = window.innerWidth;
_canvas.height = window.innerHeight;
*/
/* The following automatically changes the canvas dimensions back and forth between two dimensions every second: */
if (undefined === _resizeStartTime)
_resizeStartTime = timestamp;
const elapsed = timestamp - _resizeStartTime;
if (elapsed > 1000) {
_canvas.width = _widths[_dimIndex];
_canvas.height = _heights[_dimIndex];
_dimIndex = _dimIndex > 0 ? 0 : 1;
_resizeStartTime = undefined;
}
_gl.viewport(0, 0, _canvas.width, _canvas.height);
_gl.clearColor(0.8, 0.8, 0.8, 1.0);
_gl.clear(_gl.COLOR_BUFFER_BIT);
_drawScene();
window.requestAnimationFrame(_render);
}
window.addEventListener("load", function() {
_setupScene();
window.requestAnimationFrame(_render);
}, false);
* { margin:0; padding:0; }
html, body { width: 100%; height: 100%; }
canvas { display: block; }
<canvas>Your browser does not support HTML5 canvas.</canvas>

Related

generating a texture to pull values from during fragment shading yields blank screen for correct width and height

I would like to create a texture in code consisting of an array of RGBA color values and use those values to determine the colors of tiles that I'm generating in a fragment shader. I got the idea, and much of the code to do this from the top solution provided to this SO question: Index expression must be constant - WebGL/GLSL error
However, if I create the texture using the height and width that correspond to my color array, I don't see anything render to the canvas. If I hardcode different values, I sometimes get an image, but that image doesn't place the tile colors in the desired positions, of course, and they move around as I change my viewPos variables.
From trial and error testing with a handful of handpicked values, it seems that I MIGHT only be getting an image when gl.texImage2D() receives a height and a width equal to a power of 2, though I don't see anything about this in documentation. 32 was the largest width I could produce an image with, and 16 was the largest height I could produce an image with. 1, 2, 4, and 8 also work. (the texture size should be 27 by 20 for the window size I'm testing with)
Note that the fragment shader still receives the uTileColorSampSize vector that relates to the size of the color array. I only need the gl.texImage2D() width and height values to be hardcoded to produce an image. In fact, every value i've tried for the uniform has produced an image, though each with different tile color patterns.
I've included a slightly simplified version of my Gfx class (the original is kinda messy, and includes a lot of stuff not relevant to this issue) below. I'd imagine the problem is above like 186 or so, but I've included a few additional functions below that in case those happen to be relevant.
class Gfx {
constructor() {
this.canvas = document.getElementById("canvas");
this.gl = canvas.getContext("webgl");
//viewPos changes as you drag your cursor across the canvas
this.x_viewPos = 0;
this.y_viewPos = 0;
}
init() {
this.resizeCanvas(window.innerWidth, window.innerHeight);
const vsSource = `
attribute vec4 aVertPos;
uniform mat4 uMVMat;
uniform mat4 uProjMat;
void main() {
gl_Position = uProjMat * uMVMat * aVertPos;
}
`;
//my tiles get drawn in the frag shader below
const fsSource = `
precision mediump float;
uniform vec2 uViewPos;
uniform vec2 uTileColorSampSize;
uniform sampler2D uTileColorSamp;
void main() {
//tile width and height are both 33px including a 1px border
const float lineThickness = (1.0/33.0);
//gridMult components will either be 0.0 or 1.0. This is used to place the grid lines
vec2 gridMult = vec2(
ceil(max(0.0, fract((gl_FragCoord.x-uViewPos.x)/33.0) - lineThickness)),
ceil(max(0.0, fract((gl_FragCoord.y-uViewPos.y)/33.0) - lineThickness))
);
//tileIndex is used to pull color data from the sampler texture
//add 0.5 due to pixel coords being off in gl
vec2 tileIndex = vec2(
floor((gl_FragCoord.x-uViewPos.x)/33.0) + 0.5,
floor((gl_FragCoord.y-uViewPos.y)/33.0) + 0.5
);
//divide by samp size as tex coords are 0.0 to 1.0
vec4 tileColor = texture2D(uTileColorSamp, vec2(
tileIndex.x/uTileColorSampSize.x,
tileIndex.y/uTileColorSampSize.y
));
gl_FragColor = vec4(
tileColor.x * gridMult.x * gridMult.y,
tileColor.y * gridMult.x * gridMult.y,
tileColor.z * gridMult.x * gridMult.y,
1.0 //the 4th rgba in our sampler is always 1.0 anyway
);
}
`;
const shader = this.buildShader(vsSource, fsSource);
this.programInfo = {
program: shader,
attribLocs: {
vertexPosition: this.gl.getAttribLocation(shader, 'aVertPos')
},
uniformLocs: {
projMat: this.gl.getUniformLocation(shader, 'uProjMat'),
MVMat: this.gl.getUniformLocation(shader, 'uMVMat'),
viewPos: this.gl.getUniformLocation(shader, 'uViewPos'),
tileColorSamp: this.gl.getUniformLocation(shader, 'uTileColorSamp'),
tileColorSampSize: this.gl.getUniformLocation(shader, 'uTileColorSampSize')
}
};
const buffers = this.initBuffers();
//check and enable OES_texture_float to allow us to create our sampler tex
if (!this.gl.getExtension("OES_texture_float")) {
alert("Sorry, your browser/GPU/driver doesn't support floating point textures");
}
this.gl.clearColor(0.0, 0.0, 0.15, 1.0);
this.gl.clearDepth(1.0);
this.gl.enable(this.gl.DEPTH_TEST);
this.gl.depthFunc(this.gl.LEQUAL);
const FOV = 45 * Math.PI / 180; // in radians
const aspect = this.gl.canvas.width / this.gl.canvas.height;
this.projMat = glMatrix.mat4.create();
glMatrix.mat4.perspective(this.projMat, FOV, aspect, 0.0, 100.0);
this.MVMat = glMatrix.mat4.create();
glMatrix.mat4.translate(this.MVMat, this.MVMat, [-0.0, -0.0, -1.0]);
this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffers.position);
this.gl.vertexAttribPointer(this.programInfo.attribLocs.vertPos, 2, this.gl.FLOAT, false, 0, 0);
this.gl.enableVertexAttribArray(this.programInfo.attribLocs.vertPos);
this.glDraw();
}
//glDraw() gets called once above, as well as in every frame of my render loop
//(not included here as I have it in a seperate Timing class)
glDraw() {
this.gl.clear(this.gl.COLOR_BUFFER_BIT | this.gl.DEPTH_BUFFER_BIT);
this.gl.useProgram(this.programInfo.program);
//X and Y TILE_COUNTs varrified to correspond to colorArray size in testing
//(colorArray.length = rgbaLength * X_TILE_COUNT * Y_TILE_COUNT)
//(colorArray.length = rgbaLength * widthInTiles * heightInTiles)
//(colorArray.length = 4 * 27 * 20)
let x_tileColorSampSize = X_TILE_COUNT;
let y_tileColorSampSize = Y_TILE_COUNT;
//getTileColorArray() produces a flat array of floats between 0.0and 1.0
//equal in length to rgbaLength * X_TILE_COUNT * Y_TILE_COUNT
//every 4th value is 1.0, representing tile alpha
let colorArray = this.getTileColorArray();
let colorTex = this.colorMapTexFromArray(
x_tileColorSampSize,
y_tileColorSampSize,
colorArray
);
//SO solution said to use anyting between 0 and 15 for texUnit, they used 3
//I imagine this is just an arbitrary location in memory to hold a texture
let texUnit = 3;
this.gl.activeTexture(this.gl.TEXTURE0 + texUnit);
this.gl.bindTexture(this.gl.TEXTURE_2D, colorTex);
this.gl.uniform1i(
this.programInfo.uniformLocs.tileColorSamp,
texUnit
);
this.gl.uniform2fv(
this.programInfo.uniformLocs.tileColorSampSize,
[x_tileColorSampSize, y_tileColorSampSize]
);
this.gl.uniform2fv(
this.programInfo.uniformLocs.viewPos,
[-this.x_viewPos, this.y_viewPos] //these change as you drag your cursor across the canvas
);
this.gl.uniformMatrix4fv(
this.programInfo.uniformLocs.projMat,
false,
this.projMat
);
this.gl.uniformMatrix4fv(
this.programInfo.uniformLocs.MVMat,
false,
this.MVMat
);
this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);
}
colorMapTexFromArray(width, height, colorArray) {
let float32Arr = Float32Array.from(colorArray);
let oldActive = this.gl.getParameter(this.gl.ACTIVE_TEXTURE);
//SO solution said "working register 31, thanks", next to next line
//not sure what that means but I think they're just looking for any
//arbitrary place to store the texture?
this.gl.activeTexture(this.gl.TEXTURE15);
var texture = this.gl.createTexture();
this.gl.bindTexture(this.gl.TEXTURE_2D, texture);
this.gl.texImage2D(
this.gl.TEXTURE_2D, 0, this.gl.RGBA,
//if I replace width and height with certain magic numbers
//like 4 or 8 (all the way up to 32 for width and 16 for height)
//I will see colored tiles, though obviously they don't map correctly.
//I THINK I've only seen it work with a widths and heights that are
//a power of 2... could the issue be that I need my texture to have
//width and height equal to a power of 2?
width, height, 0,
this.gl.RGBA, this.gl.FLOAT, float32Arr
);
//use gl.NEAREST to prevent gl from blurring texture
this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MAG_FILTER, this.gl.NEAREST);
this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MIN_FILTER, this.gl.NEAREST);
this.gl.bindTexture(this.gl.TEXTURE_2D, null);
this.gl.activeTexture(oldActive);
return texture;
}
//I don't think the issue would be in the functions below, but I included them anyway
resizeCanvas(baseWidth, baseHeight) {
let widthMod = 0;
let heightMod = 0;
//...some math is done here to account for some DOM elements that consume window space...
this.canvas.width = baseWidth + widthMod;
this.canvas.height = baseHeight + heightMod;
this.gl.viewport(0, 0, this.gl.canvas.width, this.gl.canvas.height);
}
initBuffers() {
const posBuff = this.gl.createBuffer();
this.gl.bindBuffer(this.gl.ARRAY_BUFFER, posBuff);
const positions = [
-1.0, 1.0,
1.0, 1.0,
-1.0, -1.0,
1.0, -1.0,
];
this.gl.bufferData(
this.gl.ARRAY_BUFFER,
new Float32Array(positions),
this.gl.STATIC_DRAW
);
return {
position: posBuff
};
}
buildShader(vsSource, fsSource) {
const vertShader = this.loadShader(this.gl.VERTEX_SHADER, vsSource);
const fragShader = this.loadShader(this.gl.FRAGMENT_SHADER, fsSource);
const shaderProg = this.gl.createProgram();
this.gl.attachShader(shaderProg, vertShader);
this.gl.attachShader(shaderProg, fragShader);
this.gl.linkProgram(shaderProg);
if (!this.gl.getProgramParameter(shaderProg, this.gl.LINK_STATUS)) {
console.error('Unable to initialize the shader program: ' + gl.getProgramInfoLog(shaderProg));
return null;
}
return shaderProg;
}
loadShader(type, source) {
const shader = this.gl.createShader(type);
this.gl.shaderSource(shader, source);
this.gl.compileShader(shader);
if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {
console.error('An error occurred compiling the shaders: ' + this.gl.getShaderInfoLog(shader));
this.gl.deleteShader(shader);
return null;
}
return shader;
}
//getTileColorArray as it appears in my code, in case you want to take a peak.
//every tileGrid[i][j] has a color, which is an array of 4 values between 0.0 and 1.0
//the fourth (last) value in tileGrid[i][j].color is always 1.0
getTileColorArray() {
let i_min = Math.max(0, Math.floor(this.x_pxPosToTilePos(this.x_viewPos)));
let i_max = Math.min(GLOBAL.map.worldWidth-1, i_min + Math.ceil(this.x_pxPosToTilePos(this.canvas.width)) + 1);
let j_min = Math.max(0, Math.floor(this.y_pxPosToTilePos(this.y_viewPos)));
let j_max = Math.min(GLOBAL.map.worldHeight-1, j_min + Math.ceil(this.y_pxPosToTilePos(this.canvas.height)) + 1);
let colorArray = [];
for (let i=i_min; i <= i_max; i++) {
for (let j=j_min; j <= j_max; j++) {
colorArray = colorArray.concat(GLOBAL.map.tileGrid[i][j].color);
}
}
return colorArray;
}
}
I've also included a pastebin of my full unaltered Gfx class in case you would like to look at that as well: https://pastebin.com/f0erR9qG
And a pastebin of my simplified code for the line numbers: https://pastebin.com/iB1pUZJa
WebGL 1.0 does not support texture wrapping on textures with non-power of two dimensions. There are two ways to solve this issue, one is to buffer the texture with enough extra data to make it have power of two dimensions, and the other solution it to simply turn off texture wrapping, like so:
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
I'm still getting strange behavior in my frag shader, but its at least showing tiles now. I think the additional strange behavior is just a result of my shader algorithm not matching what I have envisioned.

Face animation in webgl

I need some help with webgl.
I have to open the mouth of a face model (Lee Perry Smith) from code, but I don't know how to identify the correct vertexes to do it.
For my task I'm not allowed to use three.js.
I've tried to get the indexes from blender but I had no luck for some reason (it's like the identified vertexes in blender do not correspond to the son that I generated for webgl.
Does someone have any idea..?
More infos:
I've used this snippet in blender to get the indices: http://blenderscripting.blogspot.it/2011/07/getting-index-of-currently-selected.html
then went into my javascript and used this function to edit the vertexes coordinates (just to see if they were right, even though this is not the real transformation wanted):
function move_vertex(indices,x,y,z){
vertex = headObject.vertices[0];
indices.forEach(function(index){
vertex[3*index] += x;
vertex[3*index+1]+=y;
vertex[3*index+2]+=z;
});
gl.bindBuffer(gl.ARRAY_BUFFER,headObject.modelVertexBuffer[0]);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, new Float32Array(vertex));
gl.bindBuffer(gl.ARRAY_BUFFER,null);
}
There are basically unlimited ways to do this . Which one fits your situation I have no idea.
One would be to use a skinning system. Attach the mouth vertices to bones and move the bones.
Another would be to use morph targets. Basically save the mesh once with mouth open and once with mouth closed. Load both meshes in webgl, pass both to your shader and lerp between them
attribute vec4 position1; // data from mouth closed model
attribute vec4 position2; // data from mouth open model
uniform float mixAmount;
uniform mat4 worldViewProjection;
...
// compute the position to use based on the mixAmount
// 0 = close mouth
// 1 = open mouth
// 0.5 = 50% between open and closed mouth etc..
vec4 position = mix(position1, position2, mixAmount);
// use the result in the standard way
gl_Position = worldViewProjection * position;
You'd do a similar mix for normals though you'd want to normalize the result.
Most modeling packages support using morph targets inside the package. It up to the file format and the exporter whether or not that data gets exported. The easy way to just hack something together would just be to export the face twice and load 2 files with the code you have.
Another might be to use vertex colors. In your modeling program color the lip vertices a distinct color then find those vertices by color in your code.
Another would be to assign the lips a different material then use the material to find the vertices.
Some 3d modeling programs let you add meta data to vertices. That's basically a variation of the vertex colors method. You'd probably need to write your own exporter as few 3rd party formats support extra data. Even if the format could theoretically support extra data most exporters don't export it.
Similarly some 3d modeling programs let you add vertices to selections/clusters/groups which you can then reference to find the lips. Again this method probably requires your own exporter as most format don't support this data
One other really hacky way but will get the job done in a pinch. Select the lip vertices and move them 1000 units to the right. Then in your program you can find all the vertices too far to the right and subtract 1000 units from each one to put them back where they originally would have been. This might mess up your normals but you can recompute normals after.
Yet another would be to use the data you have and program an interface to highlight each vertex one at a time, write down which vertices are the mouth.
For example put a <input type="number"> on the screen. Based on the number do something with that vertex. Set a vertex color or tweak it's position, something you can do to see it. Then write down which vertices are the mouth. If you're lucky they're in some range so you only have to write down the first and last ones.
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector("canvas").getContext("webgl");
const vs = `
attribute vec4 a_position;
attribute vec4 a_normal;
uniform mat4 u_matrix;
varying vec4 v_color;
void main() {
// Multiply the position by the matrix.
gl_Position = u_matrix * a_position;
// Pass the normal as a color to the fragment shader.
v_color = a_normal * .5 + .5;
}
`;
const fs = `
precision mediump float;
// Passed in from the vertex shader.
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
`;
// Yes, this sample is using TWGL (https://twgljs.org).
// You should be able to tell what it's doing from the names
// of the functions and be able to easily translate that to raw WebGL
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
a_position: HeadData.positions,
a_normal: HeadData.normals,
});
const numVertices = bufferInfo.numElements;
let vertexId = 0; // id of vertex we're inspecting
let newVertexId = 251; // id of vertex we want to inspect
// these are normals and get converted to colors in the shader
const black = new Float32Array([-1, -1, -1]);
const red = new Float32Array([ 1, -1, -1]);
const white = new Float32Array([ 1, 1, 1]);
const colors = [
black,
red,
white,
];
const numElem = document.querySelector("#number");
numElem.textContent = newVertexId;
document.querySelector("#prev").addEventListener('click', e => {
newVertexId = (newVertexId + numVertices - 1) % numVertices;
numElem.textContent = newVertexId;
});
document.querySelector("#next").addEventListener('click', e => {
newVertexId = (newVertexId + 1) % numVertices;
numElem.textContent = newVertexId;
});
let frameCount = 0;
function render(time) {
++frameCount;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
// restore old data
// for what's in bufferInfo see
// http://twgljs.org/docs/module-twgl.html#.BufferInfo
const origData = new Float32Array(
HeadData.normals.slice(vertexId * 3, (vertexId + 3) * 3));
const oldOffset = vertexId * 3 * 4; // 4 bytes per float
gl.bindBuffer(gl.ARRAY_BUFFER, bufferInfo.attribs.a_normal.buffer);
gl.bufferSubData(gl.ARRAY_BUFFER, oldOffset, origData);
// set new vertex to a color
const newOffset = newVertexId * 3 * 4; // 4 bytes per float
gl.bufferSubData(
gl.ARRAY_BUFFER,
newOffset,
colors[(frameCount / 3 | 0) % colors.length]);
vertexId = newVertexId;
const fov = 45 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.1;
const zFar = 50;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [0, 0, 25];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projection, view);
const world = m4.identity();
const worldViewProjection = m4.multiply(viewProjection, world);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_matrix: worldViewProjection,
});
gl.drawArrays(gl.TRIANGLES, 0, numVertices);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
.ui {
position: absolute;
left: 1em;
top: 1em;
background: rgba(0,0,0,0.9);
padding: 1em;
font-size: large;
color: white;
font-family: monospace;
}
#number {
display: inline-block;
text-align: center;
}
<script src="https://twgljs.org/dist/2.x/twgl-full.min.js"></script>
<script src="https://webglfundamentals.org/webgl/resources/headdata.js"></script>
<canvas></canvas>
<div class="ui">
<button id="prev">⬅</button>
<span>vert ndx:</span><span id="number"></span>
<button id="next">➡</button>
</div>

WebGL using raw data slower than js Image()

We are writing a web client to stream video, one type of stream uses a proprietary library, so we cannot use current plugins or HTML tags that support standard urls.
I have several years experience with OpenGL, so I decided to try out WebGL for rendering raw data frames.
Rendering using a js Image() that loads png files is extremely fast. But rendering with raw RGBA data is extremely slow.
I am putting the raw data in a Uint8Array() and using the gl.texImage2D() that accepts width height, while the PNG rendering uses the gl.texImage2D() version that accepts an Image and no width height.
I would have assumed the raw data would be faster since it doesn't have to load and decode the png file, but it seems backwards.
My background is largely C++ and have a fair amount of experience with desktop OpenGL. HTML5 and javascript are still fairly new to me.
Why is WebGL rendering the Image() (1024x1024) much much quicker, and even a small image of raw data (32x32) much much slower? Is there a way to speed this up? I am running this on the newest version of Firefox.
Edit:
The problem was actually passing data from the plugin to javascript. I was profiling using Date.getTime(), but apparently that is not a good way since the time before and after creating an array and getting data from the plugin was the same. I've switched to getting data from a local HTTP server which has shown great performance improvement when getting and rendering raw data.
Hmm, let's test
var canvas = document.getElementById("c");
var gl = canvas.getContext("webgl");
var work = document.getElementById("w");
var fps = document.getElementById("f");
var imageData = new Uint8Array(canvas.width * canvas.height * 4);
var program = webglUtils.createProgramFromScripts(
gl, ["vshader", "fshader"], ["a_position"]);
gl.useProgram(program);
var verts = [
1, 1,
-1, 1,
-1, -1,
1, 1,
-1, -1,
1, -1,
];
var vertBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(verts), gl.STATIC_DRAW);
gl.enableVertexAttribArray(0);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
var tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
var adjust = 1;
var workAmount = adjust;
var oneFrame = 1 / 50; // shoot for 50fps since timing is poor
var then = Date.now() * 0.001;
var frameCount = 0;
var maxIndex = canvas.width * canvas.height;
function doStuff() {
var now = Date.now() * 0.001;
var deltaTime = now - then;
then = now;
++frameCount;
if (deltaTime < oneFrame) {
workAmount += adjust;
} else {
workAmount = Math.max(workAmount - adjust, adjust);
}
fps.innerHTML = (1 / deltaTime).toFixed(1);
work.innerHTML = workAmount;
var color = (frameCount & 1) ? 255 : 128;
for (var i = 0; i < workAmount; ++i) {
var index = (Math.random() * maxIndex | 0) * 4;
imageData[index + 0] = color;
imageData[index + 1] = color;
imageData[index + 2] = color;
imageData[index + 3] = 255;
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, canvas.width, canvas.height, 0,
gl.RGBA, gl.UNSIGNED_BYTE, imageData);
}
gl.drawArrays(gl.TRIANGLES, 0, 6);
requestAnimationFrame(doStuff);
}
doStuff();
body, document {
font-family: monospace;
}
#c {
width: 128px;
height: 128px;
border: 1px solid red;
}
#outer {
position: relative;
}
#info {
position: absolute;
left: 10px;
top: 10px;
background-color: white;
padding: 0.5em;
}
<div id="outer">
<canvas id="c" width="1024" height="1024"></canvas>
<div id="info">
<div>fps : <span id="f"></span></div>
<div>work: <span id="w"></span></div>
</div>
</div>
<script src="https://webglfundamentals.org/webgl/resources/webgl-utils.js"></script>
<script id="vshader" type="whatever">
attribute vec4 a_position;
varying vec2 v_texcoord;
void main() {
gl_Position = a_position;
v_texcoord = a_position.xy * 0.5 + 0.5;
}
</script>
<script id="fshader" type="whatever">
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D u_sampler;
void main() {
gl_FragColor = texture2D(u_sampler, v_texcoord);
}
</script>
On my 2014 MBP I get about 20 1024x1024 RGBA/UNSIGNED_BYTE uploads a frame at 50fps on Chrome and about the same on Firefox
What do you get? Are you sure your bottleneck is the texture uploads and not something else?
The slowness could come from (at least) 3 things:
WebGL has to do error checking with texImage2D. Perhaps the browsers knows that data from a Image is always valid and thus skip this check.
WebGL may need to do data conversion behind the hood; For example, If you use premultiplied alpha; then WebGL may need to do the conversion with raw data; whereas Image element is already in premultiplied alpha (most likely).
Browsers are still godly slow at creating TypeArrays. If you are creating a new TypeArray every frame that will slow down performance.

Passing WebRTC video into geometry with GLSL Shader

This is my first time playing around with Vertex Shaders in a WebGL context. I want to texture a primitive with a video, but instead of just mapping the video into the surface I;m trying to translate the luma of the video into vertex displacement. This is kind of like the Rutt Etra, but in a digital format. A bright pixel should push the vertex forward, while a darker pixel does the inverse. Can anyone tell me what I'm doing wrong? I can't find a reference for this error.
When compiling my code, I get the following when using sampler2D and texture2D:
Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.65 Safari/537.36 | WebGL 1.0 (OpenGL ES 2.0 Chromium) | WebKit | WebKit WebGL | WebGL GLSL ES 1.0 (OpenGL ES GLSL ES 1.0 Chromium) Three.js:264
ERROR: 0:57: 'ftransform' : no matching overloaded function found
ERROR: 0:57: 'assign' : cannot convert from 'const mediump float' to 'Position highp 4-component vector of float'
ERROR: 0:60: 'gl_TextureMatrix' : undeclared identifier
ERROR: 0:60: 'gl_TextureMatrix' : left of '[' is not of type array, matrix, or vector
ERROR: 0:60: 'gl_MultiTexCoord0' : undeclared identifier
Three.js:257
<!doctype html>
<html>
<head>
<title>boiler plate for three.js</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<script src="vendor/three.js/Three.js"></script>
<script src="vendor/three.js/Detector.js"></script>
<script src="vendor/three.js/Stats.js"></script>
<script src="vendor/threex/THREEx.screenshot.js"></script>
<script src="vendor/threex/THREEx.FullScreen.js"></script>
<script src="vendor/threex/THREEx.WindowResize.js"></script>
<script src="vendor/threex.dragpancontrols.js"></script>
<script src="vendor/headtrackr.js"></script>
<style>
body {
overflow : hidden;
padding : 0;
margin : 0;
color : #222;
background-color: #BBB;
font-family : arial;
font-size : 100%;
}
#info .top {
position : absolute;
top : 0px;
width : 100%;
padding : 5px;
text-align : center;
}
#info a {
color : #66F;
text-decoration : none;
}
#info a:hover {
text-decoration : underline;
}
#info .bottom {
position : absolute;
bottom : 0px;
right : 5px;
padding : 5px;
}
</style>
</head>
<body>
<!-- three.js container -->
<div id="container"></div>
<!-- info on screen display -->
<div id="info">
<!--<div class="top">
LearningThree.js
boiler plate for
three.js
</div>-->
<div class="bottom" id="inlineDoc" >
- <i>p</i> for screenshot
</div>
</div>
<canvas id="compare" width="320" height="240" style="display:none"></canvas>
<video id="vid" autoplay loop></video>
<script type="x-shader/x-vertex" id="vertexShader">
varying vec2 texcoord0;
void main()
{
// perform standard transform on vertex
gl_Position = ftransform();
// transform texcoords
texcoord0 = vec2(gl_TextureMatrix[0] * gl_MultiTexCoord0);
}
</script>
<script type="x-shader/x-vertex" id="fragmentShader">
varying vec2 texcoord0;
uniform sampler2D tex0;
uniform vec2 imageSize;
uniform float coef;
const vec4 lumcoeff = vec4(0.299,0.587,0.114,0.);
void main (void)
{
vec4 pixel = texture2D(tex0, texcoord0);
float luma = dot(lumcoeff, pixel);
gl_FragColor = vec4((texcoord0.x / imageSize.x), luma, (texcoord0.y / imageSize.y) , 1.0);
}
</script>
<script type="text/javascript">
var stats, scene, renderer;
var camera, cameraControls;
var videoInput = document.getElementById('vid');
var canvasInput = document.getElementById('compare');
var projector = new THREE.Projector();
var gl;
var mesh,
cube,
attributes,
uniforms,
material,
materials;
var videoTexture = new THREE.Texture( videoInput );
if( !init() ) animate();
// init the scene
function init(){
if( Detector.webgl ){
renderer = new THREE.WebGLRenderer({
antialias : true, // to get smoother output
preserveDrawingBuffer : true // to allow screenshot
});
renderer.setClearColorHex( 0xBBBBBB, 1 );
// uncomment if webgl is required
//}else{
// Detector.addGetWebGLMessage();
// return true;
}else{
renderer = new THREE.CanvasRenderer();
gl=renderer;
}
renderer.setSize( window.innerWidth, window.innerHeight );
document.getElementById('container').appendChild(renderer.domElement);
// create a scene
scene = new THREE.Scene();
// put a camera in the scene
camera = new THREE.PerspectiveCamera( 23, window.innerWidth / window.innerHeight, 1, 100000 );
camera.position.z = 0;
scene.add( camera );
//
// // create a camera contol
// cameraControls = new THREEx.DragPanControls(camera)
// transparently support window resize
// THREEx.WindowResize.bind(renderer, camera);
// allow 'p' to make screenshot
THREEx.Screenshot.bindKey(renderer);
// allow 'f' to go fullscreen where this feature is supported
if( THREEx.FullScreen.available() ){
THREEx.FullScreen.bindKey();
document.getElementById('inlineDoc').innerHTML += "- <i>f</i> for fullscreen";
}
materials = new THREE.MeshLambertMaterial({
map : videoTexture
});
attributes = {};
uniforms = {
tex0: {type: 'mat2', value: materials},
imageSize: {type: 'f', value: []},
coef: {type: 'f', value: 1.0}
};
//Adding a directional light source to see anything..
var directionalLight = new THREE.DirectionalLight(0xffffff);
directionalLight.position.set(1, 1, 1).normalize();
scene.add(directionalLight);
// video styling
videoInput.style.position = 'absolute';
videoInput.style.top = '50px';
videoInput.style.zIndex = '100001';
videoInput.style.display = 'block';
// set up camera controller
headtrackr.controllers.three.realisticAbsoluteCameraControl(camera, 1, [0,0,0], new THREE.Vector3(0,0,0), {damping : 1.1});
var htracker = new headtrackr.Tracker();
htracker.init(videoInput, canvasInput);
htracker.start();
// var stats = new Stats();
// stats.domElement.style.position = 'absolute';
// stats.domElement.style.top = '0px';
// document.body.appendChild( stats.domElement );
document.addEventListener('headtrackrStatus',
function (event) {
if (event.status == "found") {
addCube();
}
}
);
}
// animation loop
function animate() {
// loop on request animation loop
// - it has to be at the begining of the function
// - see details at http://my.opera.com/emoller/blog/2011/12/20/requestanimationframe-for-smart-er-animating
requestAnimationFrame( animate );
// do the render
render();
// update stats
//stats.update();
}
function render() {
// convert matrix of every frame of video -> texture
uniforms.tex0 = materials;
uniforms.coef = 0.2;
uniforms.imageSize.x = window.innerWidth;
uniforms.imageSize.y = window.innerHeight;
// update camera controls
// cameraControls.update();
if( videoInput.readyState === videoInput.HAVE_ENOUGH_DATA ){
videoTexture.needsUpdate = true;
}
// actually render the scene
renderer.render( scene, camera );
}
function addCube(){
material = new THREE.ShaderMaterial({
uniforms: uniforms,
attributes: attributes,
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragmentShader').textContent,
transparent: true
});
//The cube
cube = new THREE.Mesh(new THREE.CubeGeometry(40, 30, 10, 1, 1, 1, material), new THREE.MeshFaceMaterial());
cube.overdraw = true;
scene.add(cube);
}
</script>
</body>
</html>
The primary problem here is that you are using the old GLSL reserved words that were intended for programmable / fixed-function interop. In OpenGL ES 2.0 things like gl_MultiTexCoord0 and gl_TextureMatrix [n] are not defined, because they completely removed the legacy fixed-function vertex array baggage that regular OpenGL has to deal with. These reserved words let you have matrix/vertex array state per-texture unit; they do not exist in OpenGL ES, this was their purpose in OpenGL.
To get around this, you have to use generic vertex attributes (e.g. attribute vec2 tex_st) instead of having a 1:1 mapping between texture coordinate pointers and texture units. Likewise, there is no texture matrix associated with each texture unit. To duplicate the functionality of texture matrices, you need to use matrix uniforms in your vertex/fragment shader.
To be honest, I cannot remember the last time I actually found it useful to have a separate texture matrix / texture coordinate pointer for each texture unit when using shaders... I often have 4 or 5 different textures and only need maybe 1 or 2 sets of texture coordinates. It is no big loss.
The kicker here is ftransform (...). This is intended to make it possible to write 1-line vertex shaders in OpenGL that behave the same way as the fixed-function pipeline. You must have copied and pasted a shader that was written for OpenGL 2.x or 3.x (compatibility). Explaining how to fix everything in this shader could be a real chore, you might have to learn more about GLSL before most of what I just wrote makes sense :-\

Preventing Canvas Clear when Resizing Window

I'm trying to create a simple app that draws rectangles within the Canvas tag. I've got the Canvas resizing to fullscreen, but whenever I resize the viewport, Canvas clears. I'm trying to prevent it from clearing and just keeping the content that's within it. Any ideas?
http://mediajux.com/experiments/canvas/drawing/
/*
* This is the primary class used for the application
* #author Alvin Crespo
*/
var app = (function(){
var domBod = document.body;
var canvas = null;
var canvasWidth = null;
var canvasHeight = null;
return {
//Runs after the DOM has achieved an onreadystatechange of "complete"
initApplication: function()
{
//setup envrionment variables
canvas = document.getElementById('canvas') || null;
//we need to resize the canvas at the start of the app to be the full window
this.windowResized();
//only set the canvas height and width if it is not false/null
if(canvas)
{
canvasWidth = canvas.offsetWidth;
canvasHeight = canvas.offsetHeight;
}
//add window events
window.onresize = this.windowResized;
circles.canvas = canvas;
circles.canvasWidth = canvasWidth;
circles.canvasHeight = canvasHeight;
circles.generateCircles(10);
setInterval(function(){
circles.animateCircles();
}, 50);
},
/**
* Executes Resizing procedures on the canvas element
*/
windowResized: function()
{
(this.domBod === null) ? 'true' : 'false';
try{
console.log(canvas);
canvas.setAttribute('width', document.body.clientWidth);
canvas.setAttribute('height', document.body.clientHeight);
}catch(e) {
console.log(e.name + " :: " + e.message);
}
},
/**
* Returns the canvas element
* #returns canvas
*/
getCanvas: function()
{
return canvas;
}
};
})();
Setting the canvas width attribute will clear the canvas.
If you resize the style width (e.g. canvas.style.visibility), it will scale (usually not in such a pretty way).
If you want to make the canvas bigger but keep the elements in it as they are, I would suggest storing the canvas as an image -- e.g. call the toDataURL method to get the image, then draw that to the resized canvas with drawImage().
Here's how I solved this problem with JS3.
Internally, I store the main canvas and context as _canvas and _context respectively.
function resize(w, h){
// create a temporary canvas obj to cache the pixel data //
var temp_cnvs = document.createElement('canvas');
var temp_cntx = temp_cnvs.getContext('2d');
// set it to the new width & height and draw the current canvas data into it //
temp_cnvs.width = w;
temp_cnvs.height = h;
temp_cntx.fillStyle = _background; // the original canvas's background color
temp_cntx.fillRect(0, 0, w, h);
temp_cntx.drawImage(_canvas, 0, 0);
// resize & clear the original canvas and copy back in the cached pixel data //
_canvas.width = w;
_canvas.height = h;
_context.drawImage(temp_cnvs, 0, 0);
}
JS3 also provides an autoSize flag which will automatically resize your canvas to the browser window or the dimensions of its parent div.
Set canvas size with style (css) and do not change attributes.
After resize to fullscreen
Canvas will be resized and not cleared, but will be scaled, than to prevent scale - you need rescale after resize, here is math:
var oldWidth = $("canvas").css("width").replace("px", "");
var oldHeight = $("canvas").css("height").replace("px", "");
$("canvas").css({
"width" : window.innerWidth,
"height": window.innerHeight
});
var ratio1 = oldWidth/window.innerWidth;
var ratio2 = oldHeight/window.innerHeight;
canvas.ctx.scale(ratio1, ratio2);
Please note, that I made copy paste from my code and do some changes with ids and vars names for fast, so could have some small mistkaes like "canvas.ctx" or dom calls.
one way I solved this was:
const canvas = document.getElementById('ctx')
const ctx = canvas.getContext('2d')
var W = canvas.width, H = canvas.height
function resize() {
let temp = ctx.getImageData(0,0,W,H)
ctx.canvas.width = window.innerWidth - 99;
ctx.canvas.height = window.innerHeight - 99;
W = canvas.width, H = canvas.height
ctx.putImageData(temp,0,0)
}
the only issue is that on zooming back out you lose the data that was outside the canvas
I believe you have implement a listener for screen resize and redraw the canvas content when that listener fires.
I had the same issue with my canvas and I have resolved that issue. Please refer the below code. I hope you will resolved the issue using this.
Note : Set alwaysDraw: true in the parameters
HTML
<div id="top-wraper">
<div id="canvas"></div>
</div>
<!-- div used to create our plane -->
<div class="plane" data-vs-id="plane-vs" data-fs-id="plane-fs">
<!-- image that will be used as a texture by our plane -->
<img src="texture-img.png" alt="Leo Music - Music from the heart of a Lion"/>
</div>
JS
<script>
function loadAnimation() {
// set up our WebGL context and append the canvas to our wrapper
var webGLCurtain = new Curtains("canvas");
webGLCurtain.width = 50;
// if there's any error during init, we're going to catch it here
webGLCurtain.onError(function () {
// we will add a class to the document body to display original images
document.body.classList.add("no-curtains");
});
// get our plane element
var planeElement = document.getElementsByClassName("plane")[0];
// set our initial parameters (basic uniforms)
var params = {
vertexShaderID: "plane-vs", // our vertex shader ID
fragmentShaderID: "plane-fs", // our framgent shader ID
alwaysDraw: true,
//crossOrigin: "", // codepen specific
uniforms: {
time: {
name: "uTime", // uniform name that will be passed to our shaders
type: "1f", // this means our uniform is a float
value: 0,
},
}
}
// create our plane mesh
var plane = webGLCurtain.addPlane(planeElement, params);
// if our plane has been successfully created
// we use the onRender method of our plane fired at each requestAnimationFrame call
plane && plane.onRender(function () {
plane.uniforms.time.value++; // update our time uniform value
});
}
window.onload = function () {
loadAnimation();
}
</script>
<script id="plane-vs" type="x-shader/x-vertex">
#ifdef GL_ES
precision mediump float;
#endif
// those are the mandatory attributes that the lib sets
attribute vec3 aVertexPosition;
attribute vec2 aTextureCoord;
// those are mandatory uniforms that the lib sets and that contain our model view and projection matrix
uniform mat4 uMVMatrix;
uniform mat4 uPMatrix;
// our texture matrix uniform (this is the lib default name, but it could be changed)
uniform mat4 uTextureMatrix0;
// if you want to pass your vertex and texture coords to the fragment shader
varying vec3 vVertexPosition;
varying vec2 vTextureCoord;
void main() {
vec3 vertexPosition = aVertexPosition;
gl_Position = uPMatrix * uMVMatrix * vec4(vertexPosition, 1.0);
// set the varyings
// thanks to the texture matrix we will be able to calculate accurate texture coords
// so that our texture will always fit our plane without being distorted
vTextureCoord = (uTextureMatrix0 * vec4(aTextureCoord, 0.0, 1.0)).xy;
vVertexPosition = vertexPosition;
}
</script>
<script id="plane-fs" type="x-shader/x-fragment">
#ifdef GL_ES
precision mediump float;
#endif
// get our varyings
varying vec3 vVertexPosition;
varying vec2 vTextureCoord;
// the uniform we declared inside our javascript
uniform float uTime;
// our texture sampler (default name, to use a different name please refer to the documentation)
uniform sampler2D uSampler0;
void main() {
// get our texture coords
vec2 textureCoord = vTextureCoord;
// displace our pixels along both axis based on our time uniform and texture UVs
// this will create a kind of water surface effect
// try to comment a line or change the constants to see how it changes the effect
// reminder : textures coords are ranging from 0.0 to 1.0 on both axis
// const float PI = 3.141592;
const float PI = 2.0;
textureCoord.x += (
sin(textureCoord.x * 10.0 + ((uTime * (PI / 3.0)) * 0.031))
+ sin(textureCoord.y * 10.0 + ((uTime * (PI / 2.489)) * 0.017))
) * 0.0075;
textureCoord.y += (
sin(textureCoord.y * 20.0 + ((uTime * (PI / 2.023)) * 0.00))
+ sin(textureCoord.x * 20.0 + ((uTime * (PI / 3.1254)) * 0.0))
) * 0.0125;
gl_FragColor = texture2D(uSampler0, textureCoord);
}
</script>
<script src="https://www.curtainsjs.com/build/curtains.min.js" ></script>

Categories