To prevent XY-problems, I'll describe the problem, followed by my current idea for a solution. Given this has to have been solved a thousand times by other people, I need a sanity check, whether I am running down the completely wrong path, or not.
In 2D pixel graphics (GL_NEAREST), when translating by half of a screen pixel, rounding behavior determines the color of a fragment. This means frequently choosing the wrong texel.
From my understanding, in the following image:
On the left side, fragments' UV-coordinates will be exactly between texels (by construction, UVs selecting size of the quad pixels from the texture).
The rasterizer won't include some fragments (e.g. left side, red stripes, not shown on the right), due to conflict rules for seamless connections between triangles.
Default rounding will only be correct, if UV-space up is also screen-space up. This fails immediately when rotating, see the right side.
E.g. for a y-flipped texture, the selected texels are one row off. Clamping to prevent texture bleeds would result in one row being used twice, and one missing¹. When freely rotating sprites, these errors may occur for certain rows or columns.
I'm aware of two common approaches, which both seem to fail:
Rounding vertices to pixel-corners, which I don't see working when sprites can rotate.
Having transparent border pixels in the texture-atlas. This is a somewhat dirty patch, and has costs. It destroys power-of-two sized textures (unless increasing the size by four times), and required border-size depends on mipmap-level.
For 2D pixel-graphics, almost everything is a textured quad. Given this, my current idea is to render each quad slightly larger (to have both side's fragments included in the problem-case). I'd then manually round to middle of texels in the fragment-shader, and drop fragments, which would be outside of the intended texture.
Is this a bad idea? How do other people solve this?
(1): Example webgl code always needs a lot of boilerplate. The problem imho does not require a demonstration, but people do love clicking things which display something.
The following has y-flipped UVs, and clamps in the fragment-shader. To be able to see texels selected, a colorful texture is generated:
The entire thing is on 8x8 pixels, using a quad constructed to be 5x5, upscaled for ease-of-view.
const assert = (condition, message) => {
if (!condition) throw new Error(message);
};
const canvas = document.getElementById('canvas');
const gl = canvas.getContext('webgl2', { antialias: false });
assert(gl !== null, `WebGL2 was unexpectedly not supported.`);
const distribution = (x) => Math.E ** (-((1.6 * x) ** 2));
const rollover = 11;
const getColors = (i) => [0, 1, 2].map(e => Math.round(0xff * distribution((((e + 0.5) + (i % rollover) / rollover * 3) % 3) - 1.5)));
const textureSize = 8;
const testImage = new Uint8Array(Array.from(
{ length: textureSize * textureSize },
(_, i) => [...getColors(i), 0xff],
).flat());
const positionPerPixel = 2 / 8;
const selectionInPixels = 5;
const offsetY = -0.5;
const vertices = [
[0, 0 + offsetY],
[selectionInPixels, 0 + offsetY],
[selectionInPixels, selectionInPixels + offsetY],
[0, selectionInPixels + offsetY],
].map(coordPair => coordPair.map(coord => (coord - 2) * positionPerPixel));
const pixelOffset = 2;
const normalizedCoordPerPixel = 1 / textureSize;
const textureCoords = [
[0, selectionInPixels],
[selectionInPixels, selectionInPixels],
[selectionInPixels, 0],
[0, 0],
].map(coordPair => coordPair.map(coord => (coord + pixelOffset) * normalizedCoordPerPixel));
const vData = new Float32Array(vertices.map((e, i) => [...e, ...textureCoords[i]]).flat());
const indices = new Uint16Array([0, 1, 2, 0, 2, 3]);
const texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 8, 8, 0, gl.RGBA, gl.UNSIGNED_BYTE, testImage);
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
gl.enableVertexAttribArray(0);
gl.enableVertexAttribArray(1);
const vBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vBuffer);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 16, 0);
gl.vertexAttribPointer(1, 2, gl.FLOAT, false, 16, 8);
gl.bufferData(gl.ARRAY_BUFFER, vData, gl.STATIC_DRAW);
const iBuffer = gl.createBuffer();
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, iBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, indices, gl.STATIC_DRAW);
const vertexShaderSrc = `#version 300 es
precision highp float;
layout(location = 0) in vec2 aPosition;
layout(location = 1) in vec2 aTextureCoord;
out vec2 vTextureCoord;
void main(void) {
gl_Position = vec4(aPosition, 0.0, 1.0);
vTextureCoord = aTextureCoord;
}`;
const maxTextureCoord = Math.max(...textureCoords.flat());
const minTextureCoord = Math.min(...textureCoords.flat());
const fragmentShaderSrc = `#version 300 es
precision highp float;
uniform sampler2D sampler;
in vec2 vTextureCoord;
out vec4 fColor;
void main(void){
vec2 clamped = vec2(
clamp(vTextureCoord.x, ${minTextureCoord} + 0.01, ${maxTextureCoord} - 0.01),
clamp(vTextureCoord.y, ${minTextureCoord} + 0.01, ${maxTextureCoord} - 0.01)
);
fColor = texture(sampler, clamped);
}`;
const program = gl.createProgram();
assert(program !== null, `Program was unexpectedly \`null\`.`);
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
assert(vertexShader !== null, `Vertex-shader was unexpectedly \`null\`.`);
gl.shaderSource(vertexShader, vertexShaderSrc);
gl.compileShader(vertexShader);
assert(gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS), `Vertex-shader failed to compile:\n${gl.getShaderInfoLog(vertexShader)}`);
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
assert(fragmentShader !== null, `Vertex-shader was unexpectedly \`null\`.`);
gl.shaderSource(fragmentShader, fragmentShaderSrc);
gl.compileShader(fragmentShader);
assert(gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS), `Fragment-shader failed to compile:\n${gl.getShaderInfoLog(fragmentShader)}`);
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
assert(gl.getProgramParameter(program, gl.LINK_STATUS), `Program linking failed:\n${gl.getProgramInfoLog(program)}`);
gl.useProgram(program);
const uniformLocationSampler = gl.getUniformLocation(program, 'sampler');
gl.uniform1i(uniformLocationSampler, 0);
gl.clearColor(0, 0, 0, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
#canvas {
width: 256px;
height: 256px;
image-rendering: pixelated;
}
<canvas id='canvas' width='8' height='8'></canvas>
The code is a slightly modified copy&paste from a related problem (limited subtexel precision leading to UVs snapping to exactly half-way between texels). Therefore, some oddities may still be left in there.
Related
I am getting what looks like a precision problem at 16 bit for texture uv. This might be common knowledge, and texture casting to normalized integer, but I could not find much¹ about that, nor would it make much sense to me (why degrade the coordinates at the last step?).
In real code (which is too long to show), this results in a texture bleed when zooming in too far, way before the expected issues for float32. The following demo has a lot of boilerplate, so here are the parts I think are important:
The test texture is a 512x512 checkerboard of 8x8 tiles, green and blue.
A square is rendered, with uv set to an 8x8 blue tile above the middle, sitting so it covers half the canvas.
const offset = -1 / (1 << bitsRequired); slightly shifts these uv on the y-axis.
const zoom = 2 ** (bitsRequired - 14); zooms in on the edge.
By construction, the uv offset is then set to a step that requires more than 16 bit. The zoom is chosen so exactly one green pixel should be rendered, without precision problems. However, at exactly 17 bit required precision, the pixel vanishes (at least for me, but it could be dependent on hardware/driver/whatever - if someone cannot reproduce, and it hasn't been mentioned already, please do).
At first, I thought I made a mistake. However, adding the rounding manually before the texture call, uncommenting the following, makes the green pixel line reappear:
vec2 texCoordRounded = vec2(
(floor(vTextureCoord.x * 512.) + 0.5) / 512.,
(floor(vTextureCoord.y * 512.) + 0.5) / 512.
);
Now I am confused. Did I miss something, or make a mistake? Does texture cast to some normalized integer? Why does this look like my precision runs out at 16 bit?
The following is copy&paste of the same code, with changed parameters (too cumbersome to parameterize this demo code):
With an offset requiring less than 16 bit, the green pixel line appears:
const assert = (condition, message) => {
if (!condition) throw new Error(message);
};
const canvas = document.getElementById('canvas');
const gl = canvas.getContext('webgl2');
assert(gl !== null, `WebGL2 was unexpectedly not supported.`);
const testImage = new Uint8Array(Array.from(
{ length: 512 * 512 },
(_, i) => (i % 16 > 7) !== (Math.floor(i / 512) % 16 > 7)
? [0, 0xff, 0, 0xff]
: [0, 0, 0xff, 0xff],
).flat());
const bitsRequired = 16;
const offset = -1 / (1 << bitsRequired);
const vData = new Float32Array([
-1, 0, 0, 0.5 + offset, 1, 0, 0.015625, 0.5 + offset,
1, 2, 0.015625, 0.515625 + offset, -1, 2, 0, 0.515625 + offset,
]);
const zoom = 2 ** (bitsRequired - 14);
const projection = new Float32Array([
zoom, 0, 0, 0,
0, zoom, 0, 0,
0, 0, -zoom, 0,
0, 0, 0, 1,
]);
const indices = new Uint16Array([0, 1, 2, 0, 2, 3]);
const texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 512, 512, 0, gl.RGBA, gl.UNSIGNED_BYTE, testImage);
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
gl.enableVertexAttribArray(0);
gl.enableVertexAttribArray(1);
const vBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vBuffer);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 16, 0);
gl.vertexAttribPointer(1, 2, gl.FLOAT, false, 16, 8);
gl.bufferData(gl.ARRAY_BUFFER, vData, gl.STATIC_DRAW);
const iBuffer = gl.createBuffer();
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, iBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, indices, gl.STATIC_DRAW);
const vertexShaderSrc = `#version 300 es
precision highp float;
uniform mat4 projection;
layout(location = 0) in vec2 aPosition;
layout(location = 1) in vec2 aTextureCoord;
out vec2 vTextureCoord;
void main(void) {
gl_Position = projection * vec4(aPosition, 0.0, 1.0);
vTextureCoord = aTextureCoord;
}`;
const fragmentShaderSrc = `#version 300 es
precision highp float;
uniform sampler2D sampler;
in vec2 vTextureCoord;
out vec4 fColor;
void main(void){
// vec2 texCoordRounded = vec2(
// (floor(vTextureCoord.x * 512.) + 0.5) / 512.,
// (floor(vTextureCoord.y * 512.) + 0.5) / 512.
// );
// vec4 color = texture(sampler, texCoordRounded);
vec4 color = texture(sampler, vTextureCoord);
fColor = color;
}`;
const program = gl.createProgram();
assert(program !== null, `Program was unexpectedly \`null\`.`);
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
assert(vertexShader !== null, `Vertex-shader was unexpectedly \`null\`.`);
gl.shaderSource(vertexShader, vertexShaderSrc);
gl.compileShader(vertexShader);
assert(gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS), `Vertex-shader failed to compile:\n${gl.getShaderInfoLog(vertexShader)}`);
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
assert(fragmentShader !== null, `Vertex-shader was unexpectedly \`null\`.`);
gl.shaderSource(fragmentShader, fragmentShaderSrc);
gl.compileShader(fragmentShader);
assert(gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS), `Fragment-shader failed to compile:\n${gl.getShaderInfoLog(fragmentShader)}`);
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
assert(gl.getProgramParameter(program, gl.LINK_STATUS), `Program linking failed:\n${gl.getProgramInfoLog(program)}`);
gl.useProgram(program);
const uniformLocationSampler = gl.getUniformLocation(program, 'sampler');
gl.uniform1i(uniformLocationSampler, 0);
const uniformLocationProjection = gl.getUniformLocation(program, 'projection');
gl.uniformMatrix4fv(uniformLocationProjection, false, projection);
gl.clearColor(0, 0, 0, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
<canvas id='canvas' width='256' height='256'></canvas>
Changing bitsRequired to 17 (only change) causes the problem, the green pixel line disappears:
const assert = (condition, message) => {
if (!condition) throw new Error(message);
};
const canvas = document.getElementById('canvas');
const gl = canvas.getContext('webgl2');
assert(gl !== null, `WebGL2 was unexpectedly not supported.`);
const testImage = new Uint8Array(Array.from(
{ length: 512 * 512 },
(_, i) => (i % 16 > 7) !== (Math.floor(i / 512) % 16 > 7)
? [0, 0xff, 0, 0xff]
: [0, 0, 0xff, 0xff],
).flat());
const bitsRequired = 17;
const offset = -1 / (1 << bitsRequired);
const vData = new Float32Array([
-1, 0, 0, 0.5 + offset, 1, 0, 0.015625, 0.5 + offset,
1, 2, 0.015625, 0.515625 + offset, -1, 2, 0, 0.515625 + offset,
]);
const zoom = 2 ** (bitsRequired - 14);
const projection = new Float32Array([
zoom, 0, 0, 0,
0, zoom, 0, 0,
0, 0, -zoom, 0,
0, 0, 0, 1,
]);
const indices = new Uint16Array([0, 1, 2, 0, 2, 3]);
const texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 512, 512, 0, gl.RGBA, gl.UNSIGNED_BYTE, testImage);
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
gl.enableVertexAttribArray(0);
gl.enableVertexAttribArray(1);
const vBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vBuffer);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 16, 0);
gl.vertexAttribPointer(1, 2, gl.FLOAT, false, 16, 8);
gl.bufferData(gl.ARRAY_BUFFER, vData, gl.STATIC_DRAW);
const iBuffer = gl.createBuffer();
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, iBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, indices, gl.STATIC_DRAW);
const vertexShaderSrc = `#version 300 es
precision highp float;
uniform mat4 projection;
layout(location = 0) in vec2 aPosition;
layout(location = 1) in vec2 aTextureCoord;
out vec2 vTextureCoord;
void main(void) {
gl_Position = projection * vec4(aPosition, 0.0, 1.0);
vTextureCoord = aTextureCoord;
}`;
const fragmentShaderSrc = `#version 300 es
precision highp float;
uniform sampler2D sampler;
in vec2 vTextureCoord;
out vec4 fColor;
void main(void){
// vec2 texCoordRounded = vec2(
// (floor(vTextureCoord.x * 512.) + 0.5) / 512.,
// (floor(vTextureCoord.y * 512.) + 0.5) / 512.
// );
// vec4 color = texture(sampler, texCoordRounded);
vec4 color = texture(sampler, vTextureCoord);
fColor = color;
}`;
const program = gl.createProgram();
assert(program !== null, `Program was unexpectedly \`null\`.`);
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
assert(vertexShader !== null, `Vertex-shader was unexpectedly \`null\`.`);
gl.shaderSource(vertexShader, vertexShaderSrc);
gl.compileShader(vertexShader);
assert(gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS), `Vertex-shader failed to compile:\n${gl.getShaderInfoLog(vertexShader)}`);
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
assert(fragmentShader !== null, `Vertex-shader was unexpectedly \`null\`.`);
gl.shaderSource(fragmentShader, fragmentShaderSrc);
gl.compileShader(fragmentShader);
assert(gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS), `Fragment-shader failed to compile:\n${gl.getShaderInfoLog(fragmentShader)}`);
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
assert(gl.getProgramParameter(program, gl.LINK_STATUS), `Program linking failed:\n${gl.getProgramInfoLog(program)}`);
gl.useProgram(program);
const uniformLocationSampler = gl.getUniformLocation(program, 'sampler');
gl.uniform1i(uniformLocationSampler, 0);
const uniformLocationProjection = gl.getUniformLocation(program, 'projection');
gl.uniformMatrix4fv(uniformLocationProjection, false, projection);
gl.clearColor(0, 0, 0, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
<canvas id='canvas' width='256' height='256'></canvas>
Activating the previously commented manual rounding before the texture call (only change) on what's still float32 causes the green pixel line to re-appear, fixing the problem, but why?
const assert = (condition, message) => {
if (!condition) throw new Error(message);
};
const canvas = document.getElementById('canvas');
const gl = canvas.getContext('webgl2');
assert(gl !== null, `WebGL2 was unexpectedly not supported.`);
const testImage = new Uint8Array(Array.from(
{ length: 512 * 512 },
(_, i) => (i % 16 > 7) !== (Math.floor(i / 512) % 16 > 7)
? [0, 0xff, 0, 0xff]
: [0, 0, 0xff, 0xff],
).flat());
const bitsRequired = 17;
const offset = -1 / (1 << bitsRequired);
const vData = new Float32Array([
-1, 0, 0, 0.5 + offset, 1, 0, 0.015625, 0.5 + offset,
1, 2, 0.015625, 0.515625 + offset, -1, 2, 0, 0.515625 + offset,
]);
const zoom = 2 ** (bitsRequired - 14);
const projection = new Float32Array([
zoom, 0, 0, 0,
0, zoom, 0, 0,
0, 0, -zoom, 0,
0, 0, 0, 1,
]);
const indices = new Uint16Array([0, 1, 2, 0, 2, 3]);
const texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 512, 512, 0, gl.RGBA, gl.UNSIGNED_BYTE, testImage);
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
gl.enableVertexAttribArray(0);
gl.enableVertexAttribArray(1);
const vBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vBuffer);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 16, 0);
gl.vertexAttribPointer(1, 2, gl.FLOAT, false, 16, 8);
gl.bufferData(gl.ARRAY_BUFFER, vData, gl.STATIC_DRAW);
const iBuffer = gl.createBuffer();
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, iBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, indices, gl.STATIC_DRAW);
const vertexShaderSrc = `#version 300 es
precision highp float;
uniform mat4 projection;
layout(location = 0) in vec2 aPosition;
layout(location = 1) in vec2 aTextureCoord;
out vec2 vTextureCoord;
void main(void) {
gl_Position = projection * vec4(aPosition, 0.0, 1.0);
vTextureCoord = aTextureCoord;
}`;
const fragmentShaderSrc = `#version 300 es
precision highp float;
uniform sampler2D sampler;
in vec2 vTextureCoord;
out vec4 fColor;
void main(void){
vec2 texCoordRounded = vec2(
(floor(vTextureCoord.x * 512.) + 0.5) / 512.,
(floor(vTextureCoord.y * 512.) + 0.5) / 512.
);
vec4 color = texture(sampler, texCoordRounded);
// vec4 color = texture(sampler, vTextureCoord);
fColor = color;
}`;
const program = gl.createProgram();
assert(program !== null, `Program was unexpectedly \`null\`.`);
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
assert(vertexShader !== null, `Vertex-shader was unexpectedly \`null\`.`);
gl.shaderSource(vertexShader, vertexShaderSrc);
gl.compileShader(vertexShader);
assert(gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS), `Vertex-shader failed to compile:\n${gl.getShaderInfoLog(vertexShader)}`);
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
assert(fragmentShader !== null, `Vertex-shader was unexpectedly \`null\`.`);
gl.shaderSource(fragmentShader, fragmentShaderSrc);
gl.compileShader(fragmentShader);
assert(gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS), `Fragment-shader failed to compile:\n${gl.getShaderInfoLog(fragmentShader)}`);
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
assert(gl.getProgramParameter(program, gl.LINK_STATUS), `Program linking failed:\n${gl.getProgramInfoLog(program)}`);
gl.useProgram(program);
const uniformLocationSampler = gl.getUniformLocation(program, 'sampler');
gl.uniform1i(uniformLocationSampler, 0);
const uniformLocationProjection = gl.getUniformLocation(program, 'projection');
gl.uniformMatrix4fv(uniformLocationProjection, false, projection);
gl.clearColor(0, 0, 0, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
<canvas id='canvas' width='256' height='256'></canvas>
[1]: Edit: I found a comment in an otherwise unrelated question which supports my guess of sampling using normalized integers, but still no official documentation:
Many modern GPUs use some fixed-point representation for the texcoords, so there is only a limited set of positions which can be sampled between two texels (typically 256).
Note that the comment is from 2014 (9 years old), and my guess would be the default is 16 bit normalized integer instead of 8 bit by now.
Edit2: I now also found this in the directx d3d spec (thanks to a blog post)
Texture coordinates for sampling operations are snapped to fixed point (after being scaled by texture size), to uniformly distribute precision across texture space, in choosing filter tap locations/weights. Weight values are converted back to floating point before actual filtering arithmetic is performed.
I still can't find any authoritative documentation for opengl/webgl though. It's getting more and more clear, that what's happening is exactly my guess, but where is the documentation, and is "uniform distribution" enough reason to cut off 8 bit of precision?
why degrade the coordinates at the last step?
... because GPUs can do a few billion texture filtering operations per second, and you really don't want to waste power and silicon area doing calculations at fp32 precision if all practical use cases need 8-bit fixed point.
Note this is 8-bits of sub-texel accuracy (i.e. granularity for GL_LINEAR filtering between two adjacent texels). Selecting texels is done at whatever higher precision is needed (most modern GPUs can uniquely address 16K textures, with 8-bits of subtexel accuracy).
The question may not be clear, but I am gonna clear it here. Let's consider an array which have th co-ordinates of a triangle in clip space :
var coor = [
-0.4, 0.6,
0.0, -0.5,
-0.5, 0.0,
0.5, -0.5,
0.0
]
So, the coor array will help us to draw a triangle using WebGL. But instead of it I want to go something like this :
var coor2 = [ 100, 100, 100 ]
In the coor2 I gave the measure of sides to draw a triangle. It will be an equilateral trinagle. So, can I do something such that I enter the three sides of a triangle and a script converts them to co-ordinate which can be cliped in the clip space and can be read by WebGL ?
Thanks In Advance
WebGL only cares that you set gl_Position to clip space values. It doesn't care how you do it. It's up to you to decide how to do that. Inside the vertex shader, at least in WebGL1, things are stateless. Meaning when you draw a triangle your vertex shader will be called 3 times. It needs to set gl_Position to 3 different values in clipspace with no state between calls. So given 100 three times, how would you compute a different value each time your vertex shader is called?
Just imagine it in JavaScript
const gl_Position1 = vertexShader(100);
const gl_Position2 = vertexShader(100);
const gl_Position3 = vertexShader(100);
How is the function vertexShader supposed to produce 3 different values with no other input? Unless there is some other state it can't produce a different value. vertex shaders in WebGL1 don't have any other state.
You need to pass in some data that changes every iteration.
Of course you if the values changed then you could produce a triangle. Example
function vertexShader(v) {
const angle = v / 3 * Math.PI * 2;
return [Math.cos(angle), Math.sin(angle), 0, 1];
}
const gl_Position1 = vertexShader(0);
const gl_Position2 = vertexShader(1);
const gl_Position3 = vertexShader(2);
Would produce a triangle. Converting that to GLSL and WebGL
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute float v;
#define PI radians(180.0)
void main() {
float angle = v / 3.0 * PI * 2.0;
gl_Position = vec4(cos(angle), sin(angle), 0, 1);
}
`;
const fs = `
precision mediump float;
void main() {
gl_FragColor = vec4(1, 0, 0, 1);
}
`;
const program = twgl.createProgram(gl, [vs, fs]);
const vLocation = gl.getAttribLocation(program, 'v');
const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([0, 1, 2]), gl.STATIC_DRAW);
gl.enableVertexAttribArray(vLocation);
gl.vertexAttribPointer(vLocation, 1, gl.FLOAT, false, 0, 0);
gl.useProgram(program);
gl.drawArrays(gl.TRIANGLES, 0, 3);
canvas { border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
Of course given we only passed in [0, 1, 2] we can't easily specifiy a position but the point is at least now that we have a value that changes we can produce a triangle which is just to re-iterate WebGL only cares that you set gl_Position to clip space values. It doesn't care how you do it.
See this article for more
I have the following code in glsl:
// snippet 1, works well
uniform vec4 uDiffuse;
uniform sampler2D uDiffuseMap;
uniform vec4 uSpecular;
uniform sampler2D uSpecularMap;
uniform float uShininess;
uniform samplerCube uEnvironmentMap;
// snippet 2, not work
struct PhongMaterial {
vec4 diffuse;
sampler2D diffuseMap;
vec4 specular;
sampler2D specularMap;
float shininess;
samplerCube environmentMap; // works well if I remove this line.
};
But it throw the following error:
[.WebGL-0x7fabfe002e00]RENDER WARNING: there is no texture bound to the unit 0
[.WebGL-0x7fabfe002e00]GL ERROR :GL_INVALID_OPERATION : GetShaderiv: <- error from previous GL command
[.WebGL-0x7fabfe002e00]GL ERROR :GL_INVALID_OPERATION : GLES2DecoderImpl::DoBindTexImage2DCHROMIUM: <- error from previous GL command
[.WebGL-0x7fabfe002e00]GL ERROR :GL_INVALID_OPERATION : glFramebufferTexture2D: <- error from previous GL command
[.WebGL-0x7fabfe002e00]GL ERROR :GL_INVALID_OPERATION : GLES2DecoderImpl::DoBindTexImage2DCHROMIUM: <- error from previous GL command
WebGL: too many errors, no more errors will be reported to the console for this context.
Here is an example:
https://codepen.io/scarletsky/pen/KEgBzx?editors=1010
What I want to do is to implement a shader which can receive sampler2D and samplerCube. When no samplerCube passed in the shader, it will throw error.
I have no idea what to do next. Can anyone help me ?
Your real error is likely some where else and related to you not binding your textures correctly or looking up the wrong locations or something else
RENDER WARNING: there is no texture bound to the unit 0
Here's a working example with your uniform structure
const fs = `
precision mediump float;
struct PhongMaterial {
vec4 diffuse;
sampler2D diffuseMap;
vec4 specular;
sampler2D specularMap;
float shininess;
samplerCube environmentMap;
};
uniform PhongMaterial material;
void main() {
vec4 diffuse = texture2D(material.diffuseMap, gl_PointCoord.xy);
vec4 specular = texture2D(material.specularMap, gl_PointCoord.xy);
vec4 cube = textureCube(
material.environmentMap,
vec3(gl_PointCoord.xy, gl_PointCoord.x * gl_PointCoord.y) * 2. - 1.);
// use all 3 textures so we can see they were set
vec4 diffuseOrSpecular = mix(diffuse, specular, step(0.25, gl_PointCoord.y));
gl_FragColor = mix(diffuseOrSpecular, cube, step(0.5, gl_PointCoord.y));
}
`
const vs = `
void main() {
gl_Position = vec4(0, 0, 0, 1);
gl_PointSize = 128.0;
}
`;
const gl = document.querySelector('canvas').getContext('webgl');
const prg = twgl.createProgram(gl, [vs, fs]);
const diffuseLocation = gl.getUniformLocation(prg, 'material.diffuseMap');
const specularLocation = gl.getUniformLocation(prg, 'material.specularMap');
const envmapLocation = gl.getUniformLocation(prg, 'material.environmentMap');
const texDiffuse = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texDiffuse);
{
const level = 0;
const format = gl.RGBA;
const width = 1;
const height = 1;
const type = gl.UNSIGNED_BYTE;
const pixel = new Uint8Array([255, 255, 0, 255]); // yellow
gl.texImage2D(gl.TEXTURE_2D, level, format, width, height, 0, format, type, pixel);
}
const texSpecular = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texSpecular);
{
const level = 0;
const format = gl.RGBA;
const width = 1;
const height = 1;
const type = gl.UNSIGNED_BYTE;
const pixel = new Uint8Array([0, 0, 255, 255]); // blue
gl.texImage2D(gl.TEXTURE_2D, level, format, width, height, 0, format, type, pixel);
}
const texCube = gl.createTexture();
gl.bindTexture(gl.TEXTURE_CUBE_MAP, texCube);
for (let i = 0; i < 6; ++i) {
const level = 0;
const format = gl.RGBA;
const width = 1;
const height = 1;
const type = gl.UNSIGNED_BYTE;
const pixel = new Uint8Array([(i & 1) * 255, (i & 2) * 255, (i & 4) * 255, 255]);
gl.texImage2D(gl.TEXTURE_CUBE_MAP_POSITIVE_X + i, level, format, width, height, 0, format, type, pixel);
}
gl.useProgram(prg);
// put the yellow diffuse texture on texture unit 0
gl.activeTexture(gl.TEXTURE0 + 0);
gl.bindTexture(gl.TEXTURE_2D, texDiffuse);
// use texture on texture unit 0
gl.uniform1i(diffuseLocation, 0);
// put the blue specular texture on texture unit 1
gl.activeTexture(gl.TEXTURE0 + 1);
gl.bindTexture(gl.TEXTURE_2D, texSpecular);
// tell the specular sampler to use texture unit 1
gl.uniform1i(specularLocation, 1);
// put the cubemap on texture unit 2
gl.activeTexture(gl.TEXTURE0 + 2);
gl.bindTexture(gl.TEXTURE_CUBE_MAP, texCube);
// tell the cubemap sampler to use texture unit 2
gl.uniform1i(envmapLocation, 2);
// draw one 128x128 pixel point
gl.drawArrays(gl.POINTS, 0, 1);
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
NOTE: you are required to supply a valid texture for every sampler uniform your shader claims is being used regardless of whether or not it is actually being used.
To find out if the shader claims it's being used call
gl.getUniformLocation(program, nameOfSamplerUniform);
If it returns non-null then AFAIK as WebGL is concerned you must supply a valid texture for that sampler.
If you don't actually need one (because of conditionals or something) then keep around a 1 pixel texture for 2D or 6 pixel texture, 1 pixel per face for cube maps and attach that texture when you don't need a specific texture.
For these cases I usually keep a white and/or black texture around. For example let's say I had math like
color = diffuseMapColor * diffuseColor + envMapColor;
If I only want diffuseColor then I can set diffuseMapColor to white and envMapColor to black which is effectively
color = 1 * diffuseColor + 0;
Similarly of I only want diffuseMapColor I can set diffuseColor to white and envMapColor to black and get
color = diffuseMapColor * 1 + 0;
and if I only want envMapColor then setting diffuseColor to 0 will work
color = diffuseMapColor * 0 + envMapColor;
is the same as
color = 0 + envMapColor;
On the other hand, most 3D engines would generate a different shaders for these cases. If no environment map is used they'd generate a shader that doesn't include an environment map. This is because generally doing less work in a shader is faster than doing more so a good 3D engine generates shaders for each case of what is needed.
Hello everyone,
I am trying to render a image using webgl shaders and I have successfully done that using webgl samples but the issue is that when i increase the size of image the quality of image is not good. I want to upscale and interpolate the image using vertex and fragment shader.Here is my sample
"use strict";
function main() {
var image = new Image();
requestCORSIfNotSameOrigin(image, "https://upload.wikimedia.org/wikipedia/commons/5/57/Pneumothorax_CT.jpg")
image.src = "https://upload.wikimedia.org/wikipedia/commons/5/57/Pneumothorax_CT.jpg";
image.width = 1000;
image.height = 1000;
image.onload = function() {
render(image);
}
}
function render(image) {
// Get A WebGL context
/** #type {HTMLCanvasElement} */
var canvas = document.getElementById("canvas");
var gl = canvas.getContext("webgl");
if (!gl) {
return;
}
// setup GLSL program
var program = webglUtils.createProgramFromScripts(gl, ["2d-vertex-shader", "2d-fragment-shader"]);
// look up where the vertex data needs to go.
var positionLocation = gl.getAttribLocation(program, "a_position");
var texcoordLocation = gl.getAttribLocation(program, "a_texCoord");
// Create a buffer to put three 2d clip space points in
var positionBuffer = gl.createBuffer();
// Bind it to ARRAY_BUFFER (think of it as ARRAY_BUFFER = positionBuffer)
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Set a rectangle the same size as the image.
setRectangle(gl, 0, 0, image.width, image.height);
// provide texture coordinates for the rectangle.
var texcoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
0.0, 1.0,
1.0, 0.0,
1.0, 1.0,
]), gl.STATIC_DRAW);
// Create a texture.
var texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
// Set the parameters so we can render any size image.
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
// Upload the image into the texture.
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image);
// lookup uniforms
var resolutionLocation = gl.getUniformLocation(program, "u_resolution");
webglUtils.resizeCanvasToDisplaySize(gl.canvas);
// Tell WebGL how to convert from clip space to pixels
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
// Clear the canvas
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
// Tell it to use our program (pair of shaders)
gl.useProgram(program);
// Turn on the position attribute
gl.enableVertexAttribArray(positionLocation);
// Bind the position buffer.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Tell the position attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
positionLocation, size, type, normalize, stride, offset)
// Turn on the teccord attribute
gl.enableVertexAttribArray(texcoordLocation);
// Bind the position buffer.
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
// Tell the position attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
texcoordLocation, size, type, normalize, stride, offset)
// set the resolution
gl.uniform2f(resolutionLocation, gl.canvas.width, gl.canvas.height);
// Draw the rectangle.
var primitiveType = gl.TRIANGLES;
var offset = 0;
var count = 6;
gl.drawArrays(primitiveType, offset, count);
}
function setRectangle(gl, x, y, width, height) {
var x1 = x;
var x2 = x + width;
var y1 = y;
var y2 = y + height;
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
x1, y1,
x2, y1,
x1, y2,
x1, y2,
x2, y1,
x2, y2,
]), gl.STATIC_DRAW);
}
main();
// This is needed if the images are not on the same domain
// NOTE: The server providing the images must give CORS permissions
// in order to be able to use the image with WebGL. Most sites
// do NOT give permission.
// See: http://webglfundamentals.org/webgl/lessons/webgl-cors-permission.html
function requestCORSIfNotSameOrigin(img, url) {
if ((new URL(url)).origin !== window.location.origin) {
img.crossOrigin = "";
}
}
#import url("https://webglfundamentals.org/webgl/resources/webgl-tutorials.css");
body {
margin: 0;
}
canvas {
width: 100vw;
height: 100vh;
display: block;
}
<div style="height:700px; width:700px; overflow:scroll;">
<canvas id="canvas"></canvas>
</div>
<!-- vertex shader -->
<script id="2d-vertex-shader" type="x-shader/x-vertex">
attribute vec2 a_position;
attribute vec2 a_texCoord;
uniform vec2 u_resolution;
varying vec2 v_texCoord; void main() {
// convert the rectangle from pixels to 0.0 to 1.0
vec2 zeroToOne = a_position / u_resolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clipspace)
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
// pass the texCoord to the fragment shader
// The GPU will interpolate this value between points.
v_texCoord = a_texCoord;
}
</script>
<!-- fragment shader -->
<script id="2d-fragment-shader" type="x-shader/x-fragment">
precision mediump float;
// our texture
uniform sampler2D u_image;
// the texCoords passed in from the vertex shader.
varying vec2 v_texCoord;
void main() {
// Look up a color from the texture.
gl_FragColor = texture2D(u_image, v_texCoord);
}
</script>
<script src="https://webglfundamentals.org/webgl/resources/webgl-utils.js"></script>
I need interpolation when image zoomed or if set by maximum height like AMI exmaple provided below Check This sample
It's not clear what you want to happen.
First off you set gl.NEAREST as your filtering. WebGL has several kind of filtering covered here. Setting them to gl.LINEAR would be better but only
a little
The problem is WebGL 1.0 doesn't support mips for images that are not power of 2 dimensions (2, 4, 8, 16, 32, 128, 256, 512, 1024, etc...). That page describes what mips are used for (interpolation) but mips can only be used on images that are power of 2 dimensions. The image you're trying to display is not power of 2 dimensions, it's 954 × 687 .
You have a few different options.
Download the image, edit to be power of 2 in both dimensions in a photo editing application. Then call gl.generateMipmap to generate mips for interpolation as described in that page
Copy the image to canvas that's a power of 2 in size then upload the canvas as a texture
Create a texture that's the next largest power of 2 then upload your image
function nearestGreaterOrEqualPowerOf2(v) {
return Math.pow(2, Math.ceil(Math.log2(v)));
}
const newWidth = nearestGreaterOrEqualPowerOf2(image.width);
const newHeight = nearestGreaterOrEqualPowerOf2(image.height);
// first make an empty texture of the new size
const level = 0;
const format = gl.RGBA;
const type = gl.UNSIGNED_BYTE;
const border = 0;
gl.texImage2D(gl.TEXTURE_2D, level, format, newWidth, newHeight, border,
format, type, null);
// then upload the image into the bottom left corner of the texture
const xoffset = 0;
const yoffset = 0;
gl.texSubImage2D(gl.TEXTURE_2D, level, xoffset, yoffset, format, type, image);
// now because the texture is a power of 2 in both dimensions you can
// generate mips and turn on maximum filtering
gl.generateMipmap(gl.TEXTURE_2D);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR_MIPMAP_LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
You have a new issue though in all these cases which is that the image is now just using a portion of the texture. You'd have to adjust your texture coordinates either using a texture matrix or by adjusting your texture coordinates directly.
// compute needed texture coordinates to show only portion of texture
var u = newWidth / image.width;
var v = newHeight / image.height;
// provide texture coordinates for the rectangle.
var texcoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0, 0,
u, 0,
0, v,
0, v,
u, 0,
u, v,
]), gl.STATIC_DRAW);
"use strict";
function main() {
var image = new Image();
requestCORSIfNotSameOrigin(image, "https://upload.wikimedia.org/wikipedia/commons/5/57/Pneumothorax_CT.jpg")
image.src = "https://upload.wikimedia.org/wikipedia/commons/5/57/Pneumothorax_CT.jpg";
image.onload = function() {
render(image);
}
}
function render(image) {
// Get A WebGL context
/** #type {HTMLCanvasElement} */
var canvas = document.getElementById("canvas");
var gl = canvas.getContext("webgl");
if (!gl) {
return;
}
// setup GLSL program
var program = webglUtils.createProgramFromScripts(gl, ["2d-vertex-shader", "2d-fragment-shader"]);
// look up where the vertex data needs to go.
var positionLocation = gl.getAttribLocation(program, "a_position");
var texcoordLocation = gl.getAttribLocation(program, "a_texCoord");
function nearestGreaterOrEqualPowerOf2(v) {
return Math.pow(2, Math.ceil(Math.log2(v)));
}
const newWidth = nearestGreaterOrEqualPowerOf2(image.width);
const newHeight = nearestGreaterOrEqualPowerOf2(image.height);
// Create a buffer to put three 2d clip space points in
var positionBuffer = gl.createBuffer();
// Bind it to ARRAY_BUFFER (think of it as ARRAY_BUFFER = positionBuffer)
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Set a rectangle fit in the canvas at the same aspect as the image.
const drawWidth = canvas.clientWidth;
const drawHeight = canvas.clientWidth / drawWidth * image.height;
setRectangle(gl, 0, 0, drawWidth, drawHeight);
// compute needed texture coordinates to show only portion of texture
var u = newWidth / image.width;
var v = newHeight / image.height;
// provide texture coordinates for the rectangle.
var texcoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0, 0,
u, 0,
0, v,
0, v,
u, 0,
u, v,
]), gl.STATIC_DRAW);
// Create a texture.
var texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
// first make an empty texture of the new size
{
const level = 0;
const format = gl.RGBA;
const type = gl.UNSIGNED_BYTE;
const border = 0;
gl.texImage2D(gl.TEXTURE_2D, level, format, newWidth, newHeight, border,
format, type, null);
// then upload the image into the bottom left corner of the texture
const xoffset = 0;
const yoffset = 0;
gl.texSubImage2D(gl.TEXTURE_2D, level, xoffset, yoffset, format, type, image);
}
// now because the texture is a power of 2 in both dimensions you can
// generate mips and turn on maximum filtering
gl.generateMipmap(gl.TEXTURE_2D);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR_MIPMAP_LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
// lookup uniforms
var resolutionLocation = gl.getUniformLocation(program, "u_resolution");
webglUtils.resizeCanvasToDisplaySize(gl.canvas);
// Tell WebGL how to convert from clip space to pixels
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
// Clear the canvas
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
// Tell it to use our program (pair of shaders)
gl.useProgram(program);
// Turn on the position attribute
gl.enableVertexAttribArray(positionLocation);
// Bind the position buffer.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Tell the position attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
positionLocation, size, type, normalize, stride, offset)
// Turn on the teccord attribute
gl.enableVertexAttribArray(texcoordLocation);
// Bind the position buffer.
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
// Tell the position attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
texcoordLocation, size, type, normalize, stride, offset)
// set the resolution
gl.uniform2f(resolutionLocation, gl.canvas.width, gl.canvas.height);
// Draw the rectangle.
var primitiveType = gl.TRIANGLES;
var offset = 0;
var count = 6;
gl.drawArrays(primitiveType, offset, count);
}
function setRectangle(gl, x, y, width, height) {
var x1 = x;
var x2 = x + width;
var y1 = y;
var y2 = y + height;
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
x1, y1,
x2, y1,
x1, y2,
x1, y2,
x2, y1,
x2, y2,
]), gl.STATIC_DRAW);
}
main();
// This is needed if the images are not on the same domain
// NOTE: The server providing the images must give CORS permissions
// in order to be able to use the image with WebGL. Most sites
// do NOT give permission.
// See: http://webglfundamentals.org/webgl/lessons/webgl-cors-permission.html
function requestCORSIfNotSameOrigin(img, url) {
if ((new URL(url)).origin !== window.location.origin) {
img.crossOrigin = "";
}
}
#import url("https://webglfundamentals.org/webgl/resources/webgl-tutorials.css");
body {
margin: 0;
}
canvas {
width: 100vw;
height: 100vh;
display: block;
}
<div style="height:700px; width:700px; overflow:scroll;">
<canvas id="canvas"></canvas>
</div>
<!-- vertex shader -->
<script id="2d-vertex-shader" type="x-shader/x-vertex">
attribute vec2 a_position;
attribute vec2 a_texCoord;
uniform vec2 u_resolution;
varying vec2 v_texCoord; void main() {
// convert the rectangle from pixels to 0.0 to 1.0
vec2 zeroToOne = a_position / u_resolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clipspace)
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
// pass the texCoord to the fragment shader
// The GPU will interpolate this value between points.
v_texCoord = a_texCoord;
}
</script>
<!-- fragment shader -->
<script id="2d-fragment-shader" type="x-shader/x-fragment">
precision mediump float;
// our texture
uniform sampler2D u_image;
// the texCoords passed in from the vertex shader.
varying vec2 v_texCoord;
void main() {
// Look up a color from the texture.
gl_FragColor = texture2D(u_image, v_texCoord);
}
</script>
<script src="https://webglfundamentals.org/webgl/resources/webgl-utils.js"></script>
Already really informative and good answers and comments here.
Also please take into account, that the link you provided using high quality images with high-res and excellent quality, at least with no compression artifacts.
Unpacked ~21mb in NIFTI Data Format.
using ami.js to uppack it:
https://github.com/FNNDSC/ami
Using the exampel by gman with a good image resolution that fits with your screen resolution, should give you a descend result.
Yes, their are some algorithms to fix a bad images quality and deal with image compression artifacts, but (and i don't whant to repeat the comments here) generally speaking once the information is lost, it is gone.
I am using PNaCl ffmpeg to open, read and decode RTSP stream. I am now having raw video frames which I need to transfer to WebGl to render on the canvas.
How can I render binary data on the canvas?
I am running the following code: I presume that I should get a grey canvas after running this code, because I am passing RGBA values of (120,120,120,1) to the synthetic data.
var canvas = document.getElementById('canvas');
var gl = initWebGL(canvas); //function initializes webgl
initViewport(gl, canvas); //initializes view port
console.log('viewport initialized');
var data = [];
for (var i = 0 ; i < 256; i++){
data.push(120,120,120,1.0);
}
console.log(data);
var pixels = new Uint8Array(data); // 16x16 RGBA image
var texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(
gl.TEXTURE_2D, // target
0, // mip level
gl.RGBA, // internal format
16, 16, // width and height
0, // border
gl.RGBA, //format
gl.UNSIGNED_BYTE, // type
pixels // texture data
);
console.log('pixels');
console.log(pixels);
<canvas id="canvas"></canvas>
I should get a grey 16x16 box being represented on the canvas, but I am not getting that. What additional steps do I need to take to correctly render the 2D bitmap on the canvas?
PS. I am taking help from this article.
Console output:
As pointed out in the comments, alpha in WebGL in the type of texture you're creating is 0 to 255. You're putting in 1.0 which = 1/255 or an alpha of 0.004
But on top of that you say
I am running the following code: I presume that I should get a grey canvas after running this code
That code is not enough for WebGL. WebGL requires you to supply a vertex shader and fragment shader, vertex data for vertices and then call either gl.drawArrays or gl.drawElements to render something. The code you provided doesn't do those things and without those things we can't tell what else you're doing.
You're also only supplying mip level 0. You either need to supply mips or set texture filtering so only the first level is used otherwise the texture is unrenderable (you'll get a warning about it in the JavaScript console of most browsers).
Here's a working example
var canvas = document.getElementById('canvas');
var gl = canvas.getContext("webgl");
var data = [];
for (var i = 0 ; i < 256; i++){
data.push(120,120,120,255);
}
var pixels = new Uint8Array(data); // 16x16 RGBA image
var texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(
gl.TEXTURE_2D, // target
0, // mip level
gl.RGBA, // internal format
16, 16, // width and height
0, // border
gl.RGBA, //format
gl.UNSIGNED_BYTE, // type
pixels // texture data
);
gl.generateMipmap(gl.TEXTURE_2D); // you need to do this or set filtering
// compiles and links the shaders and looks up uniform and attribute locations
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);
var arrays = {
position: [
-1, -1, 0,
1, -1, 0,
-1, 1, 0,
-1, 1, 0,
1, -1, 0,
1, 1, 0,
],
};
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
var uniforms = {
u_texture: texture,
};
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, uniforms);
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, gl.TRIANGLES, bufferInfo);
canvas { border: 1px solid black; }
<script id="vs" type="notjs">
attribute vec4 position;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
// Since we know we'll be passing in -1 to +1 for position
v_texcoord = position.xy * 0.5 + 0.5;
}
</script>
<script id="fs" type="notjs">
precision mediump float;
uniform sampler2D u_texture;
varying vec2 v_texcoord;
void main() {
gl_FragColor = texture2D(u_texture, v_texcoord);
}
</script>
<script src="https://twgljs.org/dist/twgl.min.js"></script>
<canvas id="canvas"></canvas>