How to render images in WebGL from ArrayBuffer - javascript

I am having a image that I am reading in server side and pushing to web browser via AJAX call. I have a requirement where I have to render them line by line using WebGL.
For Example : Image is 640X480 where 640 is width and 480 is height. Now the total number of pixels will be 640*480 = 307200 pixels. So, I want to render the whole image in 640(total width) intervals in a loop using WebGL.
Now I have texture2D(as per my knowledge) in webgl to do so, but not getting any idea of where to start . I also having the ArrayBuffer with me , only thing is using Texture2D I want to render it slowly ,line by line.
I am ready to go for any js libraries ,if they are satisfying the requirements.
So, to write a image line by line we can do something like this.
Vertex Shader
attribute vec2 a_position;?
attribute vec2 a_texCoord;?
void main() {
???
}
Fragment Shader
#ifdef GL_ES
precision mediump float;
#endif
uniform float time;
uniform vec2 mouse;
uniform vec2 resolution;
void main( void ) {
vec2 position = 1.0 - gl_FragCoord.xy / resolution;
vec3 color = vec3(1.0);
if (time > position.y * 10.0) {
color = texture2D(uImage0, uv);
}
gl_FragColor = vec4(color, 1.0);
}
Javascript For rendering pixel by pixel
function createTextureFromArray(gl, dataArray, type, width, height) {
var data = new Uint8Array(dataArray);
var texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, 0, type, width, height, 0, type, gl.UNSIGNED_BYTE, data);
return texture;
}
var arrayBuffer = new ArrayBuffer(640*480);
for (var i=0; i < 640; i++) {
for (var j=0; j < 480; j++) {
arrayBuffer[i] = Math.floor(Math.random() * 255) + 0; //filling buffer with random data between 0 and 255 which will be further filled to the texture
//NOTE : above data is just dummy data , I will get this data from server pixel by pixel.
}
}
var gl = canvas.getContext('webgl');
// setup GLSL program
var program = createProgramFromScripts(gl, ["2d-vertex-shader", "2d-fragment-shader"]);
gl.useProgram(program);
//what should I add after this ?
Can anybody complete the code , I have no idea of how to write code to accomplish this.

OpenGL is not designed to draw images "line by line." However, you can achieve this effect in software by writing to an array, uploading it as a texture, and sampling from it in your shader when drawing a full screen polygon.
To go about this you should create an unsigned byte array. For each pixel in your image you can have some combination of red, green, blue, and alpha channels. The simplest case would be RGB, 3 unsigned bytes for each pixel. The final array should be sized according to the component size (3), times your width (640), times your height (480). You should initialize the values in your array according to what you want your background color to be, then upload it to the gpu using texImage2D.
To 'draw line by line' would be to update 'width' pixels at a time given a row. Each time you change the image data you should then reupload the image to the gpu then draw the fullscreen polygon.
The fullscreen polygon is simply two triangles that cover the entire clip space of the screen. The screen goes from -1 to 1 in x and y dimensions, so make an array buffer accordingly, upload it with the two triangles, and call drawArrays as you update the texture. The UV's for the polygon should go from 0 to 1, so in your vertex shader you should have a 'varying' output variable that will be 0.5 * position + 0.5. This is used in the fragment shader to sample from the texture.
The official documentation is one of the best places to learn from. The official reference pages for openGL ES or openGL 3 contain relevant information, while the reference card https://www.khronos.org/files/webgl/webgl-reference-card-1_0.pdf show the available functions in WebGL that correspond roughly to the same api.

It's not clear at all what you're trying to accomplish and why you are using WebGL at all. Are you sending one line of data at a time and you want to render that one individual line of data when its received? Are you sending all the data and you just want reveal it a line at time horizontally?
If you have the entire image available then you can just render a larger and larger portion of it using canvas2d. The drawImage function takes optional source and destination rectangles.
// at init time
var x = 0;
// at render time
while (x < img.width) {
var srcX = x;
var srcY = 0;
var srcWidth = 1; // one pixel per frame
var srcHeight = img.height;
var dstX = x;
var dstY = 0;
var dstWidth = 1;
var dstHeight = img.height;
ctx.drawImage(img, srcX, srcY, srcWidth, srcHeight, dstX, dstY, dstWidth, dstHeight);
++x;
}
If you're sending them 1 line of data at a time you can use ImageData to make a 1xheight image and use putImageData to draw it.
// at init time or whenever you know the height
var imageData = ctx.createImageData(1, height);
var x = 0;
// on received next line of data
for (var ii = 0; ii < imageData.length; ++ii) {
imageData.data[ii] = receivedColumnOfPixels[ii];
}
ctx.putImageData(imageData, x, 0);
++x;
If you want to scale the ImageData put it in a second canvas and use that canvas as input to drawImage using the first technique.
You can do the same things in WebGL. If you have the entire texture in memory then each frame adjust your positions and texture coordinates to draw a different part of it. If you're receiving 1 column of data at a time then just use a texture that's 1 x height and draw that at the appropriate place. OR, copy that 1 x height data into the fullsize texture using gl.texSubImage2D and then adjust the positions and texture coordinates appropriately to draw the part of the texture you want to draw to the part of the canvas you want to draw it.
drawImage implemented in WebGL would look something like this. I'm using twgl.js because WebGL is too verbose.
var m4 = twgl.m4;
var gl = document.getElementById("c").getContext("webgl");
// compiles shader, links and looks up locations
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);
// a unit quad
var arrays = {
position: {
numComponents: 2,
data: [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
],
},
};
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
// create a texture using a canvas so we don't have to download one
var ctx = document.createElement("canvas").getContext("2d");
ctx.fillStyle = "blue";
ctx.fillRect(0, 0, ctx.canvas.width, ctx.canvas.height);
ctx.lineWidth = 20;
["red", "orange", "yellow"].forEach(function(color, ndx, array) {
ctx.strokeStyle = color;
ctx.beginPath();
ctx.arc((ndx + 1) / (array.length + 1) * ctx.canvas.width, ctx.canvas.height / 2, ctx.canvas.height * 0.4, 0, Math.PI * 2, false);
ctx.stroke();
});
ctx.fillStyle = "white";
ctx.font = "40px sans-serif";
ctx.textAlign = "center";
ctx.textBaseline = "middle";
ctx.fillText("DrawImage", ctx.canvas.width / 2, ctx.canvas.height / 2);
// calls gl.createTexture, gl.bindTexture, gl.texImage2D, gl.texParameteri
var tex = twgl.createTexture(gl, { src: ctx.canvas });
var texWidth = ctx.canvas.width;
var texHeight = ctx.canvas.height;
// we pass in texWidth and texHeight because unlike images
// we can't look up the width and height of a texture
// we pass in targetWidth and targetHeight to tell it
// the size of the thing we're drawing too. We could look
// up the size of the canvas with gl.canvas.width and
// gl.canvas.height but maybe we want to draw to a framebuffer
// etc.. so might as well pass those in.
// srcX, srcY, srcWidth, srcHeight are in pixels
// computed from texWidth and texHeight
// dstX, dstY, dstWidth, dstHeight are in pixels
// computed from targetWidth and targetHeight
function drawImage(
tex, texWidth, texHeight,
srcX, srcY, srcWidth, srcHeight,
dstX, dstY, dstWidth, dstHeight,
targetWidth, targetHeight) {
var mat = m4.identity();
var tmat = m4.identity();
var uniforms = {
matrix: mat,
textureMatrix: tmat,
texture: tex,
};
// these adjust the unit quad to generate texture coordinates
// to select part of the src texture
// NOTE: no check is done that srcX + srcWidth go outside of the
// texture or are in range in any way. Same for srcY + srcHeight
m4.translate(tmat, [srcX / texWidth, srcY / texHeight, 0], tmat);
m4.scale(tmat, [srcWidth / texWidth, srcHeight / texHeight, 1], tmat);
// these convert from pixels to clip space
m4.translate(mat, [-1, 1, 0], mat);
m4.scale(mat, [2 / targetWidth, -2 / targetHeight, 1], mat);
// these move and scale the unit quad into the size we want
// in the target as pixels
m4.translate(mat, [dstX, dstY, 0], mat);
m4.scale(mat, [dstWidth, dstHeight, 1], mat);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.uniformXXX, gl.activeTexture, gl.bindTexture
twgl.setUniforms(programInfo, uniforms);
// calls gl.drawArray or gl.drawElements
twgl.drawBufferInfo(gl, gl.TRIANGLES, bufferInfo);
}
function render(time) {
time *= 0.001;
var targetWidth = gl.canvas.width;
var targetHeight = gl.canvas.height;
// pick some various src rects and dst rects
var srcX = Math.abs(Math.sin(time * 1 )) * texWidth;
var srcY = Math.abs(Math.sin(time * 1.81)) * texHeight;
var srcWidth = (texWidth - srcX) * Math.abs(Math.sin(time * 2.12));
var srcHeight = (texHeight - srcY) * Math.abs(Math.sin(time * 1.53));
var dstX = Math.abs(Math.sin(time * 0.34)) * targetWidth;
var dstY = Math.abs(Math.sin(time * 2.75)) * targetHeight;
var dstWidth = (targetWidth - dstX) * Math.abs(Math.sin(time * 1.16));
var dstHeight = (targetHeight - dstY) * Math.abs(Math.sin(time * 1.17));
drawImage(
tex, texWidth, texHeight,
srcX, srcY, srcWidth, srcHeight,
dstX, dstY, dstWidth, dstHeight,
targetWidth, targetHeight);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
canvas { border: 1px solid black; }
<script src="https://twgljs.org/dist/twgl-full.min.js"></script>
<script id="vs" type="not-js">
// we will always pass a 0 to 1 unit quad
// and then use matrices to manipulate it
attribute vec4 position;
uniform mat4 matrix;
uniform mat4 textureMatrix;
varying vec2 texcoord;
void main () {
gl_Position = matrix * position;
texcoord = (textureMatrix * position).xy;
}
</script>
<script id="fs" type="not-js">
precision mediump float;
varying vec2 texcoord;
uniform sampler2D texture;
void main() {
gl_FragColor = texture2D(texture, texcoord);
}
</script>
<canvas id="c"></canvas>
To understand them matrix math see these articles and work your way backward or forward in those articles.

Related

How can I delete a specific object in Webgl (Without library)

I'm studying webgl.
Now I spray the food to a random location on the canvas, and when the mouse pointer and the food collide, I try to delete the food.
(The collision implementation of the mouse cursor is another issue, but it is not necessary now.)
However, no matter how many times I looked for it, I could not find a way to erase a specific object even if I explained how to draw it. Is there a way to delete only certain objects from canvas without a library?
The full text of my code is as follows.
var gl;
var points;
window.onload = function init()
{
var canvas = document.getElementById( "gl-canvas" );
gl = WebGLUtils.setupWebGL( canvas );
if ( !gl ) { alert( "WebGL isn't available" ); }
// Four Vertices
var vertices = [
vec2( -0.5, -0.5 ),
vec2( -0.5, 0.5 ),
vec2( 0.5, 0.5 ),
vec2( 0.5, -0.5)
];
//
// Configure WebGL
//
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
// Load shaders and initialize attribute buffers
var program = initShaders( gl, "vertex-shader", "fragment-shader" );
gl.useProgram( program );
// Load the data into the GPU
var bufferId = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufferId );
//gl.bufferData( gl.ARRAY_BUFFER, flatten(vertices), gl.STATIC_DRAW );
// Associate out shader variables with our data buffer
var foodX, foodY;
var foodSize = 20;
var foodNumber = 50;
var vPosition = gl.getAttribLocation( program, "vPosition" );
// Tell the attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer( vPosition, size, type, normalize, stride, offset);
gl.enableVertexAttribArray( vPosition );
// we added a uniform called vResolution.
var vResolution = gl.getUniformLocation(program, "vResolution");
var fColor = gl.getUniformLocation(program, "fColor");
// set the resolution
gl.uniform2f(vResolution, gl.canvas.width, gl.canvas.height);
// draw 50 random rectangles in random colors
while (foodNumber > 0) {
// Setup a random rectangle
// This will write to positionBuffer because
// its the last thing we bound on the ARRAY_BUFFER
// bind point
//food 좌표는 canvas width와 height 사이에 있도록 하며, canvas 밖으로 빠져나가지 않도록 조절한다.
foodX = randomInt(canvas.width - foodSize);
foodY = randomInt(canvas.height-foodSize);
setRectangle(gl, foodX, foodY, foodSize, foodSize);
foodNumber = foodNumber - 1;
// Set a random color.
gl.uniform4f(fColor, Math.random(), Math.random(), Math.random(), 1);
// Draw the rectangle.
var primitiveType = gl.TRIANGLES;
var offset = 0;
var count = 6;
gl.drawArrays(primitiveType, offset, count);
}
};
// Returns a random integer from 0 to range - 1.
function randomInt(range) {
return Math.floor(Math.random() * range);
}
// Fills the buffer with the values that define a rectangle.
function setRectangle(gl, x, y, width, height) {
var x1 = x;
var x2 = x + width;
var y1 = y;
var y2 = y + height;
// NOTE: gl.bufferData(gl.ARRAY_BUFFER, ...) will affect
// whatever buffer is bound to the `ARRAY_BUFFER` bind point
// but so far we only have one buffer. If we had more than one
// buffer we'd want to bind that buffer to `ARRAY_BUFFER` first.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
x1, y1,
x2, y1,
x1, y2,
x1, y2,
x2, y1,
x2, y2]), gl.STATIC_DRAW);
}
function pop(bufferName){
gl.deleteBuffer(bufferName)
}
<!DOCTYPE html>
<html>
<head>
<script id="vertex-shader" type="x-shader/x-vertex">
//attribute vec4 vPosition;
attribute vec2 vPosition;
uniform vec2 vResolution;
void
main()
{
// convert the position from pixels to 0.0 to 1.0
vec2 zeroToOne = vPosition / vResolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clip space)
vec2 clipSpace = zeroToTwo - 1.0;
//gl_Position = vec4(clipSpace, 0.0, 1.0);
// To get it to be the more traditional top left corner used for 2d graphics APIs we can just flip the clip space y coordinate.
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
}
</script>
<script id="fragment-shader" type="x-shader/x-fragment">
precision mediump float;
uniform vec4 fColor;
void
main()
{
gl_FragColor = fColor;
}
</script>
<script type="text/javascript" src="../Common/webgl-utils.js"></script>
<script type="text/javascript" src="../Common/initShaders.js"></script>
<script type="text/javascript" src="../Common/MV.js"></script>
<script type="text/javascript" src="snakeGame.js"></script>
</head>
<body>
<canvas id="gl-canvas" width="1024" height="800">
Oops ... your browser doesn't support the HTML5 canvas element
</canvas>
<script>
var canvas =
</script>
</body>
</html>
There is no such thing as "deleting an object" at the webgl. WebGL is just an API that draws pixels into a canvas. "Objects" are a higher level concept that your code deals with.
You generally keep your own list of things to draw (whether that is the same as your list of objects or not is up to you)
Every "frame" you clear the canvas and then draw all things you want to draw
render function:
clear the canvas
for each thing to draw
draw thing
So "deleting" an object is a matter of not drawing it.
See this and this and this
As an example
const thingsToDraw = [
{ color: [1, 0, 0, 1], center: [0.2, 0.3], },
{ color: [0, 1, 0, 1], center: [0.0, 0.1], },
{ color: [0, 0, 1, 1], center: [-0.5, -0.4], },
{ color: [1, 0.5, 0, 1], center: [-0.2, 0.3], },
{ color: [0, 1, 1, 1], center: [0.7, -0.1], },
{ color: [1, 0, 1, 1], center: [-0.5, 0.4], },
];
const gl = document.querySelector('canvas').getContext('webgl');
const prg = twgl.createProgram(gl, [`
uniform vec4 position;
void main() {
gl_PointSize = 20.0;
gl_Position = position;
}`,`
precision mediump float;
uniform vec4 color;
void main() {
gl_FragColor = color;
}
`]);
const positionLoc = gl.getUniformLocation(prg, "position");
const colorLoc = gl.getUniformLocation(prg, "color");
function drawThing(color, position) {
gl.useProgram(prg);
gl.uniform4f(positionLoc, ...position, 0, 1);
gl.uniform4fv(colorLoc, color);
gl.drawArrays(gl.POINTS, 0, 1); // draw 1 point
}
function render(time) {
time *= 0.001;
gl.clear(gl.COLOR_BUFFER_BIT);
thingsToDraw.forEach((thing, i) => {
const t = time * 10 + i;
const position = [
thing.center[0] + Math.cos(t) * 0.1,
thing.center[1] + Math.sin(t) * 0.1,
];
drawThing(thing.color, position);
});
requestAnimationFrame(render);
}
requestAnimationFrame(render);
document.querySelector('button').addEventListener('click', () => {
thingsToDraw.splice(0, 1);
});
canvas { border: 1px solid black; }
<canvas></canvas>
<button type="button">remove first thing</button>
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
How you decide to track and organize your "objects" or your "things to draw" is entirely up to you. Many 3D systems use a scene graph and then draw the entire graph every frame so 2 ways of not drawing something is to either remove it from the graph or else add some flag to each node whether or not to draw it.
In other systems the scene graph is separate from the list of things to draw.
For small programs people might just use an array (like the example above)

How to add interpolation using webgl vertex and fragment shaders to the image

Hello everyone,
I am trying to render a image using webgl shaders and I have successfully done that using webgl samples but the issue is that when i increase the size of image the quality of image is not good. I want to upscale and interpolate the image using vertex and fragment shader.Here is my sample
"use strict";
function main() {
var image = new Image();
requestCORSIfNotSameOrigin(image, "https://upload.wikimedia.org/wikipedia/commons/5/57/Pneumothorax_CT.jpg")
image.src = "https://upload.wikimedia.org/wikipedia/commons/5/57/Pneumothorax_CT.jpg";
image.width = 1000;
image.height = 1000;
image.onload = function() {
render(image);
}
}
function render(image) {
// Get A WebGL context
/** #type {HTMLCanvasElement} */
var canvas = document.getElementById("canvas");
var gl = canvas.getContext("webgl");
if (!gl) {
return;
}
// setup GLSL program
var program = webglUtils.createProgramFromScripts(gl, ["2d-vertex-shader", "2d-fragment-shader"]);
// look up where the vertex data needs to go.
var positionLocation = gl.getAttribLocation(program, "a_position");
var texcoordLocation = gl.getAttribLocation(program, "a_texCoord");
// Create a buffer to put three 2d clip space points in
var positionBuffer = gl.createBuffer();
// Bind it to ARRAY_BUFFER (think of it as ARRAY_BUFFER = positionBuffer)
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Set a rectangle the same size as the image.
setRectangle(gl, 0, 0, image.width, image.height);
// provide texture coordinates for the rectangle.
var texcoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
0.0, 1.0,
1.0, 0.0,
1.0, 1.0,
]), gl.STATIC_DRAW);
// Create a texture.
var texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
// Set the parameters so we can render any size image.
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
// Upload the image into the texture.
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image);
// lookup uniforms
var resolutionLocation = gl.getUniformLocation(program, "u_resolution");
webglUtils.resizeCanvasToDisplaySize(gl.canvas);
// Tell WebGL how to convert from clip space to pixels
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
// Clear the canvas
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
// Tell it to use our program (pair of shaders)
gl.useProgram(program);
// Turn on the position attribute
gl.enableVertexAttribArray(positionLocation);
// Bind the position buffer.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Tell the position attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
positionLocation, size, type, normalize, stride, offset)
// Turn on the teccord attribute
gl.enableVertexAttribArray(texcoordLocation);
// Bind the position buffer.
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
// Tell the position attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
texcoordLocation, size, type, normalize, stride, offset)
// set the resolution
gl.uniform2f(resolutionLocation, gl.canvas.width, gl.canvas.height);
// Draw the rectangle.
var primitiveType = gl.TRIANGLES;
var offset = 0;
var count = 6;
gl.drawArrays(primitiveType, offset, count);
}
function setRectangle(gl, x, y, width, height) {
var x1 = x;
var x2 = x + width;
var y1 = y;
var y2 = y + height;
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
x1, y1,
x2, y1,
x1, y2,
x1, y2,
x2, y1,
x2, y2,
]), gl.STATIC_DRAW);
}
main();
// This is needed if the images are not on the same domain
// NOTE: The server providing the images must give CORS permissions
// in order to be able to use the image with WebGL. Most sites
// do NOT give permission.
// See: http://webglfundamentals.org/webgl/lessons/webgl-cors-permission.html
function requestCORSIfNotSameOrigin(img, url) {
if ((new URL(url)).origin !== window.location.origin) {
img.crossOrigin = "";
}
}
#import url("https://webglfundamentals.org/webgl/resources/webgl-tutorials.css");
body {
margin: 0;
}
canvas {
width: 100vw;
height: 100vh;
display: block;
}
<div style="height:700px; width:700px; overflow:scroll;">
<canvas id="canvas"></canvas>
</div>
<!-- vertex shader -->
<script id="2d-vertex-shader" type="x-shader/x-vertex">
attribute vec2 a_position;
attribute vec2 a_texCoord;
uniform vec2 u_resolution;
varying vec2 v_texCoord; void main() {
// convert the rectangle from pixels to 0.0 to 1.0
vec2 zeroToOne = a_position / u_resolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clipspace)
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
// pass the texCoord to the fragment shader
// The GPU will interpolate this value between points.
v_texCoord = a_texCoord;
}
</script>
<!-- fragment shader -->
<script id="2d-fragment-shader" type="x-shader/x-fragment">
precision mediump float;
// our texture
uniform sampler2D u_image;
// the texCoords passed in from the vertex shader.
varying vec2 v_texCoord;
void main() {
// Look up a color from the texture.
gl_FragColor = texture2D(u_image, v_texCoord);
}
</script>
<script src="https://webglfundamentals.org/webgl/resources/webgl-utils.js"></script>
I need interpolation when image zoomed or if set by maximum height like AMI exmaple provided below Check This sample
It's not clear what you want to happen.
First off you set gl.NEAREST as your filtering. WebGL has several kind of filtering covered here. Setting them to gl.LINEAR would be better but only
a little
The problem is WebGL 1.0 doesn't support mips for images that are not power of 2 dimensions (2, 4, 8, 16, 32, 128, 256, 512, 1024, etc...). That page describes what mips are used for (interpolation) but mips can only be used on images that are power of 2 dimensions. The image you're trying to display is not power of 2 dimensions, it's 954 × 687 .
You have a few different options.
Download the image, edit to be power of 2 in both dimensions in a photo editing application. Then call gl.generateMipmap to generate mips for interpolation as described in that page
Copy the image to canvas that's a power of 2 in size then upload the canvas as a texture
Create a texture that's the next largest power of 2 then upload your image
function nearestGreaterOrEqualPowerOf2(v) {
return Math.pow(2, Math.ceil(Math.log2(v)));
}
const newWidth = nearestGreaterOrEqualPowerOf2(image.width);
const newHeight = nearestGreaterOrEqualPowerOf2(image.height);
// first make an empty texture of the new size
const level = 0;
const format = gl.RGBA;
const type = gl.UNSIGNED_BYTE;
const border = 0;
gl.texImage2D(gl.TEXTURE_2D, level, format, newWidth, newHeight, border,
format, type, null);
// then upload the image into the bottom left corner of the texture
const xoffset = 0;
const yoffset = 0;
gl.texSubImage2D(gl.TEXTURE_2D, level, xoffset, yoffset, format, type, image);
// now because the texture is a power of 2 in both dimensions you can
// generate mips and turn on maximum filtering
gl.generateMipmap(gl.TEXTURE_2D);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR_MIPMAP_LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
You have a new issue though in all these cases which is that the image is now just using a portion of the texture. You'd have to adjust your texture coordinates either using a texture matrix or by adjusting your texture coordinates directly.
// compute needed texture coordinates to show only portion of texture
var u = newWidth / image.width;
var v = newHeight / image.height;
// provide texture coordinates for the rectangle.
var texcoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0, 0,
u, 0,
0, v,
0, v,
u, 0,
u, v,
]), gl.STATIC_DRAW);
"use strict";
function main() {
var image = new Image();
requestCORSIfNotSameOrigin(image, "https://upload.wikimedia.org/wikipedia/commons/5/57/Pneumothorax_CT.jpg")
image.src = "https://upload.wikimedia.org/wikipedia/commons/5/57/Pneumothorax_CT.jpg";
image.onload = function() {
render(image);
}
}
function render(image) {
// Get A WebGL context
/** #type {HTMLCanvasElement} */
var canvas = document.getElementById("canvas");
var gl = canvas.getContext("webgl");
if (!gl) {
return;
}
// setup GLSL program
var program = webglUtils.createProgramFromScripts(gl, ["2d-vertex-shader", "2d-fragment-shader"]);
// look up where the vertex data needs to go.
var positionLocation = gl.getAttribLocation(program, "a_position");
var texcoordLocation = gl.getAttribLocation(program, "a_texCoord");
function nearestGreaterOrEqualPowerOf2(v) {
return Math.pow(2, Math.ceil(Math.log2(v)));
}
const newWidth = nearestGreaterOrEqualPowerOf2(image.width);
const newHeight = nearestGreaterOrEqualPowerOf2(image.height);
// Create a buffer to put three 2d clip space points in
var positionBuffer = gl.createBuffer();
// Bind it to ARRAY_BUFFER (think of it as ARRAY_BUFFER = positionBuffer)
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Set a rectangle fit in the canvas at the same aspect as the image.
const drawWidth = canvas.clientWidth;
const drawHeight = canvas.clientWidth / drawWidth * image.height;
setRectangle(gl, 0, 0, drawWidth, drawHeight);
// compute needed texture coordinates to show only portion of texture
var u = newWidth / image.width;
var v = newHeight / image.height;
// provide texture coordinates for the rectangle.
var texcoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0, 0,
u, 0,
0, v,
0, v,
u, 0,
u, v,
]), gl.STATIC_DRAW);
// Create a texture.
var texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
// first make an empty texture of the new size
{
const level = 0;
const format = gl.RGBA;
const type = gl.UNSIGNED_BYTE;
const border = 0;
gl.texImage2D(gl.TEXTURE_2D, level, format, newWidth, newHeight, border,
format, type, null);
// then upload the image into the bottom left corner of the texture
const xoffset = 0;
const yoffset = 0;
gl.texSubImage2D(gl.TEXTURE_2D, level, xoffset, yoffset, format, type, image);
}
// now because the texture is a power of 2 in both dimensions you can
// generate mips and turn on maximum filtering
gl.generateMipmap(gl.TEXTURE_2D);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR_MIPMAP_LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
// lookup uniforms
var resolutionLocation = gl.getUniformLocation(program, "u_resolution");
webglUtils.resizeCanvasToDisplaySize(gl.canvas);
// Tell WebGL how to convert from clip space to pixels
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
// Clear the canvas
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
// Tell it to use our program (pair of shaders)
gl.useProgram(program);
// Turn on the position attribute
gl.enableVertexAttribArray(positionLocation);
// Bind the position buffer.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Tell the position attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
positionLocation, size, type, normalize, stride, offset)
// Turn on the teccord attribute
gl.enableVertexAttribArray(texcoordLocation);
// Bind the position buffer.
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
// Tell the position attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
texcoordLocation, size, type, normalize, stride, offset)
// set the resolution
gl.uniform2f(resolutionLocation, gl.canvas.width, gl.canvas.height);
// Draw the rectangle.
var primitiveType = gl.TRIANGLES;
var offset = 0;
var count = 6;
gl.drawArrays(primitiveType, offset, count);
}
function setRectangle(gl, x, y, width, height) {
var x1 = x;
var x2 = x + width;
var y1 = y;
var y2 = y + height;
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
x1, y1,
x2, y1,
x1, y2,
x1, y2,
x2, y1,
x2, y2,
]), gl.STATIC_DRAW);
}
main();
// This is needed if the images are not on the same domain
// NOTE: The server providing the images must give CORS permissions
// in order to be able to use the image with WebGL. Most sites
// do NOT give permission.
// See: http://webglfundamentals.org/webgl/lessons/webgl-cors-permission.html
function requestCORSIfNotSameOrigin(img, url) {
if ((new URL(url)).origin !== window.location.origin) {
img.crossOrigin = "";
}
}
#import url("https://webglfundamentals.org/webgl/resources/webgl-tutorials.css");
body {
margin: 0;
}
canvas {
width: 100vw;
height: 100vh;
display: block;
}
<div style="height:700px; width:700px; overflow:scroll;">
<canvas id="canvas"></canvas>
</div>
<!-- vertex shader -->
<script id="2d-vertex-shader" type="x-shader/x-vertex">
attribute vec2 a_position;
attribute vec2 a_texCoord;
uniform vec2 u_resolution;
varying vec2 v_texCoord; void main() {
// convert the rectangle from pixels to 0.0 to 1.0
vec2 zeroToOne = a_position / u_resolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clipspace)
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
// pass the texCoord to the fragment shader
// The GPU will interpolate this value between points.
v_texCoord = a_texCoord;
}
</script>
<!-- fragment shader -->
<script id="2d-fragment-shader" type="x-shader/x-fragment">
precision mediump float;
// our texture
uniform sampler2D u_image;
// the texCoords passed in from the vertex shader.
varying vec2 v_texCoord;
void main() {
// Look up a color from the texture.
gl_FragColor = texture2D(u_image, v_texCoord);
}
</script>
<script src="https://webglfundamentals.org/webgl/resources/webgl-utils.js"></script>
Already really informative and good answers and comments here.
Also please take into account, that the link you provided using high quality images with high-res and excellent quality, at least with no compression artifacts.
Unpacked ~21mb in NIFTI Data Format.
using ami.js to uppack it:
https://github.com/FNNDSC/ami
Using the exampel by gman with a good image resolution that fits with your screen resolution, should give you a descend result.
Yes, their are some algorithms to fix a bad images quality and deal with image compression artifacts, but (and i don't whant to repeat the comments here) generally speaking once the information is lost, it is gone.

WebGL Drawing an image

I'm new to WebGL, I've worked with OpenGL in Java before. I've been trying to code a simple function that draws an image on a specific location with a specific size and rotation. But after searching on the internet for a while, my code still isn't working.
Currently, I've succeeded in drawing an image, but that image is not close to being in the correct location, having the correct size and rotation. I've lost my overview over what code does and needs what because I've used code from a number of different tutorials since I didn't found one tutorial that had all my specifications.
I know that the image loading part works for sure. I just need help with making a function that
sets up the vertex and fragment shader (for drawing width a texture)
translates, resizes and rotates it into the correct location, size and rotations
and draws it
Could someone help me with that?
You should probably read up on WebGL especially about matrices.
In any case here's "drawImage" from the canvas 2d API re-written in WebGL with the full transform stack.
In other words in Canvas2D you could do this
ctx.save();
ctx.translate(x, y);
ctx.rotate(angle);
ctx.scale(w, h);
ctx.drawImage(img, x, y);
ctx.restore();
Below you can do this
save();
translate(x, y);
rotate(angle);
scale(w, h);
drawImage(targetWidth, targetHeight, tex, texWidth, texHeight, x, y);
restore();
var m4 = twgl.m4;
var gl = document.getElementById("c").getContext('webgl');
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);
// a unit quad
var arrays = {
position: {
numComponents: 2,
data: [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
],
},
};
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
// Let's use a 2d canvas for a texture just so we don't have to download anything
var ctx = document.createElement("canvas").getContext("2d");
var w = 128;
var h = 64;
ctx.canvas.width = w;
ctx.canvas.height = h;
ctx.fillStyle = "blue";
ctx.fillRect(0, 0, w, h);
ctx.fillStyle = "green";
ctx.fillRect(w / 8, h / 8, w / 8 * 6, h / 8 * 6);
ctx.fillStyle = "red";
ctx.fillRect(w / 4, h / 4, w / 2, h / 2);
ctx.textAlign = "center";
ctx.textBaseline = "middle";
ctx.font = "20px sans-serif";
ctx.fillStyle = "yellow";
ctx.fillText("texture", w / 2, h / 2);
var tex = twgl.createTexture(gl, { src: ctx.canvas });
// Implement a matrix stack like Canvas2d
var matrixStack = [ m4.identity() ];
function render(time) {
var t = time * 0.001;
var texWidth = w;
var texHeight = h;
var targetWidth = gl.canvas.width;
var targetHeight = gl.canvas.height;
save();
translate(
(Math.sin(t * 0.9) * 0.5 + 0.5) * targetWidth,
(Math.sin(t * 0.8) * 0.5 + 0.5) * targetHeight);
rotate(t * 0.7);
scale(
Math.sin(t * 0.7) * 0.5 + 1,
Math.sin(t * 0.6) * 0.5 + 1);
// scale and rotate from center of image
translate(texWidth * -0.5, texHeight * -0.5);
drawImage(
targetWidth, targetHeight,
tex, texWidth, texHeight,
0, 0);
restore();
requestAnimationFrame(render);
}
requestAnimationFrame(render);
function getCurrentMatrix() {
return matrixStack[matrixStack.length - 1];
}
function save() {
matrixStack.push(m4.copy(getCurrentMatrix()));
}
function restore() {
matrixStack.pop();
if (!matrixStack.length) {
matrixStack.push(m4.identity());
}
}
function translate(x, y) {
var m = getCurrentMatrix();
m4.translate(m, [x, y, 0], m);
}
function scale(x, y) {
var m = getCurrentMatrix();
m4.scale(m, [x, y, 1], m);
}
function rotate(radians) {
var m = getCurrentMatrix();
m4.rotateZ(m, radians, m);
}
// we pass in texWidth and texHeight because unlike images
// we can't look up the width and height of a texture
// we pass in targetWidth and targetHeight to tell it
// the size of the thing we're drawing too. We could look
// up the size of the canvas with gl.canvas.width and
// gl.canvas.height but maybe we want to draw to a framebuffer
// etc.. so might as well pass those in.
// srcX, srcY, srcWidth, srcHeight are in pixels
// computed from texWidth and texHeight
// dstX, dstY, dstWidth, dstHeight are in pixels
// computed from targetWidth and targetHeight
function drawImage(
targetWidth, targetHeight,
tex, texWidth, texHeight,
srcX, srcY, srcWidth, srcHeight,
dstX, dstY, dstWidth, dstHeight
) {
// handle case where only x, y are passed in
// as in ctx.drawIimage(img, x, y);
if (srcWidth === undefined) {
srcWidth = texWidth;
srcHeight = texHeight;
}
// handle case where only x, y, width, height are passed in
// as in ctx.drawIimage(img, x, y, width, height);
if (dstX === undefined) {
dstX = srcX;
dstY = srcY;
dstWidth = srcWidth;
dstHeight = srcHeight;
}
var mat = m4.identity();
var tmat = m4.identity();
var uniforms = {
matrix: mat,
textureMatrix: tmat,
texture: tex,
};
// these adjust the unit quad to generate texture coordinates
// to select part of the src texture
// NOTE: no check is done that srcX + srcWidth go outside of the
// texture or are in range in any way. Same for srcY + srcHeight
m4.translate(tmat, [srcX / texWidth, srcY / texHeight, 0], tmat);
m4.scale(tmat, [srcWidth / texWidth, srcHeight / texHeight, 1], tmat);
// these convert from pixels to clip space
m4.ortho(0, targetWidth, targetHeight, 0, -1, 1, mat);
// Add in global matrix
m4.multiply(mat, getCurrentMatrix(), mat);
// these move and scale the unit quad into the size we want
// in the target as pixels
m4.translate(mat, [dstX, dstY, 0], mat);
m4.scale(mat, [dstWidth, dstHeight, 1], mat);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, uniforms);
twgl.drawBufferInfo(gl, bufferInfo);
}
html, body, canvas {
margin: 0; width: 100%; height:100%; display: block;
}
<script src="https://twgljs.org/dist/4.x/twgl-full.js"></script>
<script id="vs" type="not-js">
// we will always pass a 0 to 1 unit quad
// and then use matrices to manipulate it
attribute vec4 position;
uniform mat4 matrix;
uniform mat4 textureMatrix;
varying vec2 texcoord;
void main () {
gl_Position = matrix * position;
texcoord = (textureMatrix * position).xy;
}
</script>
<script id="fs" type="not-js">
precision mediump float;
varying vec2 texcoord;
uniform sampler2D texture;
void main() {
gl_FragColor = texture2D(texture, texcoord);
}
</script>
<canvas id="c"></canvas>
and here's an article describing how it works

How to increase performance using WebGL?

I am rendering a texture using WebGL ,However ,the way I am rendering is I am rendering few lines of data and then moving those lines to right and again drawing another set of lines.
For example : I have a image of 640*480 ,which contains 640*480*4 pixels of RGBA, however I am only filling the alpha values and it is a GrayScale medical Dicom image.
Now ,the issue that I am facing is it is rendering the texture with jerks ,the image rendering is not happening smoothly.
For example, This is what happens :
There are 640 lines of data to be rendered.
So, I took a arraybuffer of 640*480*4 and then , Suppose first line came to client via websocket from server to render ,then I will fill the indexes as 3, 640+3, 640*2+3, 640*3+3 and so on until 640*480+3. Then when the second line is received ,I will move first line to second line like 3->7, 640+3->640+7, ......640*480+3->640*480+7. And then the newly received line will be rendered to 3, 640+3, 640*2+3, 640*3+3 and this will continue until the 640th line of image data.
Here's the code that I have done.
Code:
var renderLineData = function (imageAttr) {
var data = imageAttr.data;
var LINES_PER_CHUNK = imageAttr.lines;
var alpha = 4;
if(imageAttr.newImage) {
newBuffer = new ArrayBuffer(imageAttr.width * imageAttr.height * alpha);dataTypedArray = new Uint8Array(newBuffer);
// provide texture coordinates for the rectangle.
provideTextureCoordsForRect();
setParams();
// Upload the image into the texture.
// look up uniform locations
uploadImageToTexture(gl.getUniformLocation(program, 'u_matrix'));
} else {
for (var z = imageAttr.index; z > 0; z--) {
for (i = 0 ; i < LINES_PER_CHUNK; i++) {
for (j = 0 ; j < imageAttr.height; j++) {
dataTypedArray[i * alpha + imageAttr.width*alpha * j + 3 + LINES_PER_CHUNK * alpha * z] = dataTypedArray[i * alpha + imageAttr.width*alpha * j + 3 + LINES_PER_CHUNK * alpha * (z-1)];
}
}
}
}
for (i = 0, k = imageAttr.height*LINES_PER_CHUNK; i < LINES_PER_CHUNK; i++) {
for (j = 0 ; j < imageAttr.height; j++) {
dataTypedArray[i * alpha + imageAttr.width*4 * j + 3] = data[k - imageAttr.height + j];
}
k = k - imageAttr.height;
}
imageAttrTemp = imageAttr;
renderImgSlowly(gl, imageAttr, dataTypedArray);
};
function renderImgSlowly (gl, image, dataTypedArray) {
gl.clear(gl.COLOR_BUFFER_BIT || gl.DEPTH_BUFFER_BIT);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, image.width, image.height, 0, gl.RGBA, gl.UNSIGNED_BYTE, dataTypedArray);
//Draw the rectangle.
gl.drawArrays(gl.TRIANGLES, 0, 6);
}
First off, nothing you are doing is likely to be the speed issue. A 640x320 image is not that large and the amount of processing you're doing in JavaScript is unlikely to be the bottleneck.
On top of that WebGL will have no trouble drawing a single quad which is all you're drawing. Nor will it have a problem uploading a 640x480 texture.
The bottleneck is the network. Sending chunks over the network is slow.
On the other hand, if you want to optimize, why are you shifting the data around in JavaScript? Just put it in the correct place in the texture to start with with gl.texSubImage2D. If you only want to draw the part that has had data put in it then adjust the texture coordinates to select that part of the texture
Also, why are you using RGBA if you only need one channel? Use LUMINANCE.
if (imageAttr.newImage) {
destColumn = imageAttr.width;
gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, imageAttr.width, imageAttr.height, 0,
gl.LUMINANCE, gl.UNSIGNED_BYTE, null);
}
destColumn -= imageAttr.lines;
// should check it destColumn does not go negative!
gl.texSubImage2D(gl.TEXTURE_2D, 0, destColumn, 0, imageAttr.lines, imageAttr.height,
gl.LUMINANCE, gl.UNSIGNED_BYTE, imageAttr.data);
var srcX = destColumn;
var srcY = 0;
var srcWidth = imageAttr.width - destColumn;
var srcHeight = imageAttr.height;
var dstX = destColumn * gl.canvas.width / imageAttr.width;
var dstY = 0;
var dstWidth = srcWidth * gl.canvas.width / imageAttr.width;
var dstHeight = srcHeight;
var texWidth = imageAttr.width;
var texHeight = imageAttr.height;
var targetWidth = gl.canvas.width;
var targetHeight = gl.canvas.height;
drawImageInWebGL(
tex, texWidth, texHeight,
srcX, srcY, srcWidth, srcHeight,
dstX, dstY, dstWidth, dstHeight,
targetWidth, targetHeight);
}
Here's an example
var m4 = twgl.m4;
var gl = document.getElementById("c").getContext("webgl");
// compiles shader, links and looks up locations
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);
// a unit quad
var arrays = {
position: {
numComponents: 2,
data: [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
],
},
};
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
// we're only using 1 texture so just make and bind it now
var tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
var destColumn = 0;
// We're using 1 byte wide texture pieces so we need to
// set UNPACK_ALIGNMENT to 1 as it defaults to 4
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
simulateSendingAnImageNColumnsAtATime(1, 1, addLinesToImageAndDraw);
function addLinesToImageAndDraw(imageAttr) {
if (imageAttr.newImage) {
destColumn = imageAttr.width;
gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, imageAttr.width, imageAttr.height, 0,
gl.LUMINANCE, gl.UNSIGNED_BYTE, null);
}
destColumn -= imageAttr.lines;
// should check it destColumn does not go negative!
gl.texSubImage2D(gl.TEXTURE_2D, 0, destColumn, 0, imageAttr.lines, imageAttr.height,
gl.LUMINANCE, gl.UNSIGNED_BYTE, imageAttr.data);
var srcX = destColumn;
var srcY = 0;
var srcWidth = imageAttr.width - destColumn;
var srcHeight = imageAttr.height;
var dstX = destColumn * gl.canvas.width / imageAttr.width;
var dstY = 0;
var dstWidth = srcWidth * gl.canvas.width / imageAttr.width;
var dstHeight = gl.canvas.height;
var texWidth = imageAttr.width;
var texHeight = imageAttr.height;
var targetWidth = gl.canvas.width;
var targetHeight = gl.canvas.height;
drawImage(
tex, texWidth, texHeight,
srcX, srcY, srcWidth, srcHeight,
dstX, dstY, dstWidth, dstHeight,
targetWidth, targetHeight);
}
// we pass in texWidth and texHeight because unlike images
// we can't look up the width and height of a texture
// we pass in targetWidth and targetHeight to tell it
// the size of the thing we're drawing too. We could look
// up the size of the canvas with gl.canvas.width and
// gl.canvas.height but maybe we want to draw to a framebuffer
// etc.. so might as well pass those in.
// srcX, srcY, srcWidth, srcHeight are in pixels
// computed from texWidth and texHeight
// dstX, dstY, dstWidth, dstHeight are in pixels
// computed from targetWidth and targetHeight
function drawImage(
tex, texWidth, texHeight,
srcX, srcY, srcWidth, srcHeight,
dstX, dstY, dstWidth, dstHeight,
targetWidth, targetHeight) {
var mat = m4.identity();
var tmat = m4.identity();
var uniforms = {
matrix: mat,
textureMatrix: tmat,
texture: tex,
};
// these adjust the unit quad to generate texture coordinates
// to select part of the src texture
// NOTE: no check is done that srcX + srcWidth go outside of the
// texture or are in range in any way. Same for srcY + srcHeight
m4.translate(tmat, [srcX / texWidth, srcY / texHeight, 0], tmat);
m4.scale(tmat, [srcWidth / texWidth, srcHeight / texHeight, 1], tmat);
// these convert from pixels to clip space
m4.ortho(0, targetWidth, targetHeight, 0, -1, 1, mat)
// these move and scale the unit quad into the size we want
// in the target as pixels
m4.translate(mat, [dstX, dstY, 0], mat);
m4.scale(mat, [dstWidth, dstHeight, 1], mat);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.uniformXXX, gl.activeTexture, gl.bindTexture
twgl.setUniforms(programInfo, uniforms);
// calls gl.drawArray or gl.drawElements
twgl.drawBufferInfo(gl, gl.TRIANGLES, bufferInfo);
}
// =====================================================================
// Everything below this line represents stuff from the server.
// so it's irrelevant to the answer
//
function simulateSendingAnImageNColumnsAtATime(minColumnsPerChunk, maxColumnsPerChunk, callback) {
var imageData = createImageToSend(640, 480);
// cut data into columns at start because this work would be done on
// the server
var columns = [];
var x = 0;
while (x < imageData.width) {
// how many columns are left?
var maxWidth = imageData.width - x;
// how many columns should we send
var columnWidth = Math.min(maxWidth, rand(minColumnsPerChunk, maxColumnsPerChunk + 1));
var data = createImageChunk(imageData, imageData.width - x - columnWidth, 0, columnWidth, imageData.height);
columns.push({
newImage: x === 0,
lines: columnWidth,
width: imageData.width,
height: imageData.height,
data: data,
});
x += columnWidth;
}
var columnNdx = 0;
sendNextColumn();
function sendNextColumn() {
if (columnNdx < columns.length) {
callback(columns[columnNdx++]);
if (columnNdx < columns.length) {
// should we make this random to siumlate network speed
var timeToNextChunkMS = 17;
setTimeout(sendNextColumn, timeToNextChunkMS);
}
}
}
}
function createImageChunk(imageData, x, y, width, height) {
var data = new Uint8Array(width * height);
for (var yy = 0; yy < height; ++yy) {
for (var xx = 0; xx < width; ++xx) {
var srcOffset = ((yy + y) * imageData.width + xx + x) * 4;
var dstOffset = yy * width + xx;
// compute gray scale
var gray = Math.max(imageData.data[srcOffset], imageData.data[srcOffset + 1], imageData.data[srcOffset + 2]);
data[dstOffset] = gray;
}
}
return data;
}
function rand(min, max) {
return Math.floor(Math.random() * max - min) + min;
}
function createImageToSend(width, height) {
// create a texture using a canvas so we don't have to download one
var ctx = document.createElement("canvas").getContext("2d");
ctx.width = width;
ctx.height = height;
ctx.fillStyle = "#222";
ctx.fillRect(0, 0, ctx.canvas.width, ctx.canvas.height);
ctx.lineWidth = 20;
["#AAA", "#888", "#666"].forEach(function(color, ndx, array) {
ctx.strokeStyle = color;
ctx.beginPath();
ctx.arc((ndx + 1) / (array.length + 1) * ctx.canvas.width, ctx.canvas.height / 2,
ctx.canvas.height * 0.4, 0, Math.PI * 2, false);
ctx.stroke();
});
ctx.fillStyle = "white";
ctx.font = "40px sans-serif";
ctx.textAlign = "center";
ctx.textBaseline = "middle";
ctx.fillText("Some Image", ctx.canvas.width / 2, ctx.canvas.height / 2);
return ctx.getImageData(0, 0, ctx.canvas.width, ctx.canvas.height);
}
canvas { border: 1px solid black; }
<script src="https://twgljs.org/dist/twgl-full.min.js"></script>
<script id="vs" type="not-js">
// we will always pass a 0 to 1 unit quad
// and then use matrices to manipulate it
attribute vec4 position;
uniform mat4 matrix;
uniform mat4 textureMatrix;
varying vec2 texcoord;
void main () {
gl_Position = matrix * position;
texcoord = (textureMatrix * position).xy;
}
</script>
<script id="fs" type="not-js">
precision mediump float;
varying vec2 texcoord;
uniform sampler2D texture;
void main() {
gl_FragColor = texture2D(texture, texcoord);
}
</script>
<canvas id="c" width="640" height="480"></canvas>
NOTE: This will not be smooth because it is using setTimeout to simulate receiving network data but that's exactly what you're likely seeing.
Here's a sample that rotates the image independently of updating the texture. You can see it runs perfectly smooth. The slowness is not WebGL, the slowness is networking (as simulated by setTimeout)
var m4 = twgl.m4;
var gl = document.getElementById("c").getContext("webgl");
// compiles shader, links and looks up locations
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);
// a unit quad
var arrays = {
position: {
numComponents: 2,
data: [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
],
},
};
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
// we're only using 1 texture so just make and bind it now
var tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
var destColumn = 0;
var imageWidth;
var imageHeight;
// We're using 1 byte wide texture pieces so we need to
// set UNPACK_ALIGNMENT to 1 as it defaults to 4
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
simulateSendingAnImageNColumnsAtATime(1, 1, addLinesToImageAndDraw);
function addLinesToImageAndDraw(imageAttr) {
if (imageAttr.newImage) {
destColumn = imageAttr.width;
imageWidth = imageAttr.width;
imageHeight = imageAttr.height;
gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, imageAttr.width, imageAttr.height, 0,
gl.LUMINANCE, gl.UNSIGNED_BYTE, null);
}
destColumn -= imageAttr.lines;
// should check it destColumn does not go negative!
gl.texSubImage2D(gl.TEXTURE_2D, 0, destColumn, 0, imageAttr.lines, imageAttr.height,
gl.LUMINANCE, gl.UNSIGNED_BYTE, imageAttr.data);
}
function render(time) {
if (imageWidth) {
var srcX = destColumn;
var srcY = 0;
var srcWidth = imageWidth - destColumn;
var srcHeight = imageHeight;
var dstX = destColumn * gl.canvas.width / imageWidth;
var dstY = 0;
var dstWidth = srcWidth * gl.canvas.width / imageWidth;
var dstHeight = gl.canvas.height;
var texWidth = imageWidth;
var texHeight = imageHeight;
var targetWidth = gl.canvas.width;
var targetHeight = gl.canvas.height;
drawImageWithRotation(
time * 0.001,
tex, texWidth, texHeight,
srcX, srcY, srcWidth, srcHeight,
dstX, dstY, dstWidth, dstHeight,
targetWidth, targetHeight);
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
// we pass in texWidth and texHeight because unlike images
// we can't look up the width and height of a texture
// we pass in targetWidth and targetHeight to tell it
// the size of the thing we're drawing too. We could look
// up the size of the canvas with gl.canvas.width and
// gl.canvas.height but maybe we want to draw to a framebuffer
// etc.. so might as well pass those in.
// srcX, srcY, srcWidth, srcHeight are in pixels
// computed from texWidth and texHeight
// dstX, dstY, dstWidth, dstHeight are in pixels
// computed from targetWidth and targetHeight
function drawImageWithRotation(
rotation,
tex, texWidth, texHeight,
srcX, srcY, srcWidth, srcHeight,
dstX, dstY, dstWidth, dstHeight,
targetWidth, targetHeight) {
var mat = m4.identity();
var tmat = m4.identity();
var uniforms = {
matrix: mat,
textureMatrix: tmat,
texture: tex,
};
// these adjust the unit quad to generate texture coordinates
// to select part of the src texture
// NOTE: no check is done that srcX + srcWidth go outside of the
// texture or are in range in any way. Same for srcY + srcHeight
m4.translate(tmat, [srcX / texWidth, srcY / texHeight, 0], tmat);
m4.scale(tmat, [srcWidth / texWidth, srcHeight / texHeight, 1], tmat);
// convert from pixels to clipspace
m4.ortho(0, targetWidth, targetHeight, 0, -1, 1, mat);
// rotate around center of canvas
m4.translate(mat, [targetWidth / 2, targetHeight / 2, 0], mat);
m4.rotateZ(mat, rotation, mat);
m4.translate(mat, [-targetWidth / 2, -targetHeight / 2, 0], mat);
// these move and scale the unit quad into the size we want
// in the target as pixels
m4.translate(mat, [dstX, dstY, 0], mat);
m4.scale(mat, [dstWidth, dstHeight, 1], mat);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.uniformXXX, gl.activeTexture, gl.bindTexture
twgl.setUniforms(programInfo, uniforms);
// calls gl.drawArray or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
}
// =====================================================================
// Everything below this line represents stuff from the server.
// so it's irrelevant to the answer
//
function simulateSendingAnImageNColumnsAtATime(minColumnsPerChunk, maxColumnsPerChunk, callback) {
var imageData = createImageToSend(640, 480);
// cut data into columns at start because this work would be done on
// the server
var columns = [];
var x = 0;
while (x < imageData.width) {
// how many columns are left?
var maxWidth = imageData.width - x;
// how many columns should we send
var columnWidth = Math.min(maxWidth, rand(minColumnsPerChunk, maxColumnsPerChunk + 1));
var data = createImageChunk(imageData, imageData.width - x - columnWidth, 0, columnWidth, imageData.height);
columns.push({
newImage: x === 0,
lines: columnWidth,
width: imageData.width,
height: imageData.height,
data: data,
});
x += columnWidth;
}
var columnNdx = 0;
sendNextColumn();
function sendNextColumn() {
if (columnNdx < columns.length) {
callback(columns[columnNdx++]);
if (columnNdx < columns.length) {
// should we make this random to siumlate network speed
var timeToNextChunkMS = 17;
setTimeout(sendNextColumn, timeToNextChunkMS);
}
}
}
}
function createImageChunk(imageData, x, y, width, height) {
var data = new Uint8Array(width * height);
for (var yy = 0; yy < height; ++yy) {
for (var xx = 0; xx < width; ++xx) {
var srcOffset = ((yy + y) * imageData.width + xx + x) * 4;
var dstOffset = yy * width + xx;
// compute gray scale
var gray = Math.max(imageData.data[srcOffset], imageData.data[srcOffset + 1], imageData.data[srcOffset + 2]);
data[dstOffset] = gray;
}
}
return data;
}
function rand(min, max) {
return Math.floor(Math.random() * max - min) + min;
}
function createImageToSend(width, height) {
// create a texture using a canvas so we don't have to download one
var ctx = document.createElement("canvas").getContext("2d");
ctx.width = width;
ctx.height = height;
ctx.fillStyle = "#222";
ctx.fillRect(0, 0, ctx.canvas.width, ctx.canvas.height);
ctx.lineWidth = 20;
["#AAA", "#888", "#666"].forEach(function(color, ndx, array) {
ctx.strokeStyle = color;
ctx.beginPath();
ctx.arc((ndx + 1) / (array.length + 1) * ctx.canvas.width, ctx.canvas.height / 2,
ctx.canvas.height * 0.4, 0, Math.PI * 2, false);
ctx.stroke();
});
ctx.fillStyle = "white";
ctx.font = "40px sans-serif";
ctx.textAlign = "center";
ctx.textBaseline = "middle";
ctx.fillText("Some Image", ctx.canvas.width / 2, ctx.canvas.height / 2);
return ctx.getImageData(0, 0, ctx.canvas.width, ctx.canvas.height);
}
canvas { border: 1px solid black; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<script id="vs" type="not-js">
// we will always pass a 0 to 1 unit quad
// and then use matrices to manipulate it
attribute vec4 position;
uniform mat4 matrix;
uniform mat4 textureMatrix;
varying vec2 texcoord;
void main () {
gl_Position = matrix * position;
texcoord = (textureMatrix * position).xy;
}
</script>
<script id="fs" type="not-js">
precision mediump float;
varying vec2 texcoord;
uniform sampler2D texture;
void main() {
gl_FragColor = texture2D(texture, texcoord);
}
</script>
<canvas id="c" width="640" height="480"></canvas>
gl.texImage2D is slow and there is not much that can be done to improve on this. The reason is that texImage2D involves a state change and requires that the GPU halt all rendering and then fetch the data from CPU RAM. Depending on the hardware the interface between the main board and GPU can be very slow (in comparison to RAM access speed)
You also add to the problem with the resolution of the image. All images on the GPU have sizes that are powers of 2 (32,64,128,256,512,1024...) independently for height and width. Sending an image that is 640 by 480 does not fit this rule. To accommodate the bad size the GPU will allocate an image that is W 1024 by H 512 pixels and will thus have to re dimension the image data to fit the internal dimensions (Fast as it they are this is not something they are good at). Depending on hardware this will cause an additional slowdown on top of the already slow data transfer.
You may get a slight improvement if you make your data buffer equal to the powers of two rule (POT) (1024, 512).
Your best option is to avoid the transfer until the entire image has loaded then do it just once.
If you really need it live then I suggest you divide the image into smaller images and send the smaller separate images. For example for the POT image size 1024 by 512 could be divided into 128 by 64 images or 1024 by 8 resulting in 64 smaller images. Only send the small images as they become available and on the GPU reassemble the images as one during render. This will give you an almost 64 times improvement on the time it takes to send the image to the GPU.
Apart from that there is not much else that can be done. GPUs are good at rendering, GPUs suck when it comes to mainboard IO, avoid this at all costs (during rendering) to get the most out of the graphics hardware.

The Fastest Way to Batch Calls in WebGL

I'm trying to rewrite my canvas-based rendering for my 2d game engine. I've made good progress and can render textures to the webgl context fine, complete with scaling, rotation and blending. But my performance sucks. On my test laptop, I can get 30 fps in vanilla 2d canvas with 1,000 entities on screen at once; in WebGL, I get 30 fps with 500 entities on screen. I'd expect the situation to be reverse!
I have a sneaking suspicion that the culprit is all this Float32Array buffer garbage I'm tossing around. Here's my render code:
// boilerplate code and obj coordinates
// grab gl context
var canvas = sys.canvas;
var gl = sys.webgl;
var program = sys.glProgram;
// width and height
var scale = sys.scale;
var tileWidthScaled = Math.floor(tileWidth * scale);
var tileHeightScaled = Math.floor(tileHeight * scale);
var normalizedWidth = tileWidthScaled / this.width;
var normalizedHeight = tileHeightScaled / this.height;
var worldX = targetX * scale;
var worldY = targetY * scale;
this.bindGLBuffer(gl, this.vertexBuffer, sys.glWorldLocation);
this.bufferGLRectangle(gl, worldX, worldY, tileWidthScaled, tileHeightScaled);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, this.texture);
var frameX = (Math.floor(tile * tileWidth) % this.width) * scale;
var frameY = (Math.floor(tile * tileWidth / this.width) * tileHeight) * scale;
// fragment (texture) shader
this.bindGLBuffer(gl, this.textureBuffer, sys.glTextureLocation);
this.bufferGLRectangle(gl, frameX, frameY, normalizedWidth, normalizedHeight);
gl.drawArrays(gl.TRIANGLES, 0, 6);
bufferGLRectangle: function (gl, x, y, width, height) {
var left = x;
var right = left + width;
var top = y;
var bottom = top + height;
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
left, top,
right, top,
left, bottom,
left, bottom,
right, top,
right, bottom
]), gl.STATIC_DRAW);
},
bindGLBuffer: function (gl, buffer, location) {
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.vertexAttribPointer(location, 2, gl.FLOAT, false, 0, 0);
},
And here's my simple test shaders (these are missing blending, scaling & rotation):
// fragment (texture) shader
precision mediump float;
uniform sampler2D image;
varying vec2 texturePosition;
void main() {
gl_FragColor = texture2D(image, texturePosition);
}
// vertex shader
attribute vec2 worldPosition;
attribute vec2 vertexPosition;
uniform vec2 canvasResolution;
varying vec2 texturePosition;
void main() {
vec2 zeroToOne = worldPosition / canvasResolution;
vec2 zeroToTwo = zeroToOne * 2.0;
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
texturePosition = vertexPosition;
}
Any ideas on how to get better performance? Is there a way to batch my drawArrays? Is there a way to cut down on the buffer garbage?
Thanks!
There's two big issues I can see here that will adversely affect your performance.
You're creating a lot of temporary Float32Arrays, which are currently expensive to construct (That should get better in the future). It would be far better in this case to create a single array and set the vertices each time like so:
verts[0] = left; verts[1] = top;
verts[2] = right; verts[3] = top;
// etc...
gl.bufferData(gl.ARRAY_BUFFER, verts, gl.STATIC_DRAW);
The bigger issue by far, however, is that you're only drawing a single quad at a time. 3D APIs simply aren't designed to do this efficiently. What you want to do is try and squeeze as many triangles as possible into each drawArrays/drawElements call you make.
There's several ways to do that, the most straightforward being to fill up a buffer with as many quads as you can that share the same texture, then draw them all in one go. In psuedocode:
var MAX_QUADS_PER_BATCH = 100;
var VERTS_PER_QUAD = 6;
var FLOATS_PER_VERT = 2;
var verts = new Float32Array(MAX_QUADS_PER_BATCH * VERTS_PER_QUAD * FLOATS_PER_VERT);
var quadCount = 0;
function addQuad(left, top, bottom, right) {
var offset = quadCount * VERTS_PER_QUAD * FLOATS_PER_VERT;
verts[offset] = left; verts[offset+1] = top;
verts[offset+2] = right; verts[offset+3] = top;
// etc...
quadCount++;
if(quadCount == MAX_QUADS_PER_BATCH) {
flushQuads();
}
}
function flushQuads() {
gl.bindBuffer(gl.ARRAY_BUFFER, vertsBuffer);
gl.bufferData(gl.ARRAY_BUFFER, verts, gl.STATIC_DRAW); // Copy the buffer we've been building to the GPU.
// Make sure vertexAttribPointers are set, etc...
gl.drawArrays(gl.TRIANGLES, 0, quadCount + VERTS_PER_QUAD);
}
// In your render loop
for(sprite in spriteTypes) {
gl.bindTexture(gl.TEXTURE_2D, sprite.texture);
for(instance in sprite.instances) {
addQuad(instance.left, instance.top, instance.right, instance.bottom);
}
flushQuads();
}
That's an oversimplification, and there's ways to batch even more, but hopefully that gives you an idea of how to start batching your calls for better performance.
If you use WebGL Inspector you'll see in the trace if you do any unnecessary GL instructions (they're marked with bright yellow background). This might give you an idea on how to optimize your rendering.
Generally speaking, sort your draw calls so all using the same program, then attributes, then textures and then uniforms are done in order. This way you'll have as few GL instructions (and JS instructions) as possible.

Categories