I wonder, how can I obtain any WebGL program instance (WebGLProgram) from any desired WebGL context?
To fetch the WebGL context is NOT a problem. You are searching the DOM of the current page for the canvas element using document.getElementsByTagName() or document.getElementById(), if you know the exact canvas id:
let canvas = document.getElementById( "canvasId" );
let context = canvas.getContext( "webgl" );
Here we fetch the current context as I suppose, but if I want to get some shader parameters or get certain value from already running vertex/fragment shader - I need to have a WebGL program, which is associated with the current WebGL rendering context.
But I can't find any method in WebGL API like context.getAttachedProgram() or context.getActiveProgram().
So what is the way get the active WebGL program which is used for the rendering process?
Maybe, there is some special WebGL parameter?
There is no way to get all the programs or any other resources from a WebGL context. If the context is already existing the best you can do is look at the current resources with things like gl.getParameter(gl.CURRENT_PROGRAM) etc..
What you can do instead is wrap the WebGL context
var allPrograms = [];
someContext.createProgram = (function(oldFunc) {
return function() {
// call the real createProgram
var prg = oldFunc.apply(this, arguments);
// if a program was created save it
if (prg) {
allPrograms.push(prg);
}
return prg;
};
}(someContext.createProgram));
Of course you'd need to wrap gl.deleteProgram as well to remove things from the array of all programs.
someContext.deleteProgram = (function(oldFunc) {
return function(prg) {
// call the real deleteProgram
oldFunc.apply(this, arguments);
// remove the program from allPrograms
var ndx = allPrograms.indexOf(prg);
if (ndx >= 0) {
allPrograms.splice(ndx, 1);
}
};
}(someContext.deleteProgram));
These are the techniques used by things like the WebGL Inspector and the WebGL Shader Editor Extension.
If you want to wrap all contexts you can use a similar technique to wrap getContext.
HTMLCanvasElement.prototype.getContext = (function(oldFunc) {
return function(type) {
var ctx = oldFunc.apply(this, arguments);
if (ctx && (type === "webgl" || type === "experimental-webgl")) {
ctx = wrapTheContext(ctx);
}
return ctx;
};
}(HTMLCanvasElement.prototype.getContext));
gl.getParameter(gl.CURRENT_PROGRAM). Check out https://www.khronos.org/files/webgl/webgl-reference-card-1_0.pdf pg 2 to the right.
Related
pretty new to ML and tensorflow!
I made an object detection model with http://cloud.annotations.ai that permits to train and convert a model in different formats, tfjs (model_web) too.
That website provides also boilerplates for running the model within a browser (react app)... just like you do - probably it is the same code, didn't spend enough time.
So I have this model running inside a browser, giving prediction about objects in a photo with pretty good results considering the amount of example I gave and the prediction score (0.89). the given bounding box is good too.
But, unfortunately, I didn't have "just one video" to analyze frame by frame inside a browser, I've got plenty of them. So I decided to switch to node.js, porting the code as is.
Guess what? TF.js relies on DOM and browser components, and almost none examples that works with Node exists. So not a big deal, just spent a morning figuring out all the missing parts.
Finally I'm able to run my model over videos that are splitted in frames, at a decent speed - although having the "Hello there, use tfjs-node to gain speed" banner when I'm already using tfjs-node - but results seems odd.
Comparing the same picture with the same model_web folder gave the same prediction but with lower score (0.80 instead of 0.89) and a different bounding box, with object not centered at all.
(TL;DR)
Does tfjs have different implementation of the libraries (tfjs and tfjs-node) that makes different use of the same model? I don't think it can be a problem of input because - after a long search and fight - i figure out two ways to give the image to tf.browser.getPixel in Node (and I'm still wondering why I have to use a "browser" method inside tfjs-node). Anyone made comparisons?
So... that's the code I used, for your reference:
model_web is being loaded with tf.loadGraphModel("file://path/to/model_web/model.json");
two different ways to convert a JPG and make it works with tf.browser.getPixel()
const inkjet = require('inkjet');
const {createCanvas, loadImage} = require('canvas');
const decodeJPGInkjet = (file) => {
return new Promise((rs, rj) => {
fs.readFile(file).then((buffer) => {
inkjet.decode(buffer, (err, decoded) => {
if (err) {
rj(err);
} else {
rs(decoded);
}
});
});
});
};
const decodeJPGCanvas = (file) => {
return loadImage(file).then((image) => {
const canvas = createCanvas(image.width, image.height);
const ctx = canvas.getContext('2d');
ctx.drawImage(image, 0, 0, image.width, image.height);
const data = ctx.getImageData(0, 0, image.width, image.height);
return {data: new Uint8Array(data.data), width: data.width, height: data.height};
});
};
and that's the code that use the loaded model to give predictions - same code for node and browser, found at https://github.com/cloud-annotations/javascript-sdk/blob/master/src/index.js - doesn't works on node as it is, I changed require("#tensorflow/tfjs"); with require("#tensorflow/tfjs-node"); and replaced fetch with fs.read
const runObjectDetectionPrediction = async (graph, labels, input) => {
const batched = tf.tidy(() => {
const img = tf.browser.fromPixels(input);
// Reshape to a single-element batch so we can pass it to executeAsync.
return img.expandDims(0);
});
const height = batched.shape[1];
const width = batched.shape[2];
const result = await graph.executeAsync(batched);
const scores = result[0].dataSync();
const boxes = result[1].dataSync();
// clean the webgl tensors
batched.dispose();
tf.dispose(result);
const [maxScores, classes] = calculateMaxScores(
scores,
result[0].shape[1],
result[0].shape[2]
);
const prevBackend = tf.getBackend();
// run post process in cpu
tf.setBackend("cpu");
const indexTensor = tf.tidy(() => {
const boxes2 = tf.tensor2d(boxes, [result[1].shape[1], result[1].shape[3]]);
return tf.image.nonMaxSuppression(
boxes2,
maxScores,
20, // maxNumBoxes
0.5, // iou_threshold
0.5 // score_threshold
);
});
const indexes = indexTensor.dataSync();
indexTensor.dispose();
// restore previous backend
tf.setBackend(prevBackend);
return buildDetectedObjects(
width,
height,
boxes,
maxScores,
indexes,
classes,
labels
);
};
Do different implementation of the libraries (tfjs and tfjs-node) that makes different use of the same model
If the same model is deployed both in the browser and in nodejs, the prediction will be the same thing.
If the predicted value are different, it might be related to the tensor used for the prediction. The processing from the image to the tensor might be different resulting in different tensors being used for the prediction thus causing the output to be different.
i figure out two ways to give the image to tf.browser.getPixel in Node (and I'm still wondering why I have to use a "browser" method inside tfjs-node)
The canvas package use the system graphic to create the browser like canvas environment that can be used by nodejs. This makes it possible to use tf.browser namespace especially when dealing with image conversion. However it is still possible to use directly nodejs buffer to create a tensor.
I can't figure out what to do in case of webgl loss in my application (written with electron js) with three js. We have these two functions
// renderer is THREE.WebGLRenderer
renderer.context.canvas.addEventListener("webglcontextlost", contextLostFunction);
renderer.context.canvas.addEventListener("webglcontextrestored", contextRestoredFunction);
When I simulate context loss using something like this
var canvas = document.getElementById("playground").childNodes[0].childNodes[0];
var gl = canvas.getContext("webgl");
var WEBGL_lose_context = gl.getExtension('WEBGL_lose_context');
WEBGL_lose_context.loseContext();
Then webglcontextrestored event fires and everything restores as should be.
When webgl is killed for real or using something like this
renderer.context.getExtension( 'WEBGL_lose_context' ).loseContext();
Then this event webglcontextrestored never has been fired.
What is going ? What to do to catch that context has been lost.
Thanks for any ideas.
You should use same extension reference you use to loose the context to restore the context with the restoreContext() method of the object:
var canvas = document.getElementById("playground").childNodes[0].childNodes[0];
var gl = canvas.getContext("webgl");
var WEBGL_lose_context = gl.getExtension('WEBGL_lose_context');
WEBGL_lose_context.loseContext();
window.setTimeout(()=> {
WEBGL_lose_context.restoreContext();
}, 2000);
you can also do it from the inspector to simulate it in an iterative way...
So, in Sketch, you can mark a layer/group as exportable.
And then the layer/group can be exported as .png/.svg/.pdf etc. I was trying to make a Sketch Plugin recently, where I need to mark a layer/group as exportable from code. A layer in code is represented using MSLayer and group is MSLayerGroup. The sketch documentation is not mature enough yet, so I used ClassDump to extract all the headers that has been used in the app. I have been looking for a method that might seem to do my job, but it has been days and am still out of luck. Can anybody please help me out in this regard?
Sketch supports slice and export to image. You can use - (void)saveArtboardOrSlice:(id)arg1 toFile:(id)arg2;
method of MSDocument.
This is almost how to do it.
var loopLayerChildren = [[layerToExport children] objectEnumerator],
rect = [MSSliceTrimming trimmedRectForSlice:layer],
useSliceLayer = false,
exportFilePath,
slice;
// Check for MSSliceLayer and overwrite the rect if present
while (layerChild = [loopLayerChildren nextObject]) {
if ([layerChild class] == 'MSSliceLayer') {
rect = [MSSliceTrimming trimmedRectForSlice:layerChild];
useSliceLayer = true;
}
}
slice = [MSExportRequest requestWithRect:rect scale:1];
if (!useSliceLayer) {
slice.shouldTrim = true;
}
// export to image file
[(this.document) saveArtboardOrSlice: slice toFile:exportFilePath];
Reference from #GeertWill's sketch-to-xcode-assets-catalog plugin.
With the Web Audio API, is there a way to discover a node's connections?
For example, given
ctx = new AudioContext();
g1 = ctx.createGain();
g2 = ctx.createGain();
g1.connect(g2);
is there a method I can call on g1 that will return [g2]?
I'm interested in writing a javascript library to visualize the current audio graph, similar to the Firefox Web Audio Editor.
You could potentially do something like this:
var connect = AudioNode.prototype.connect;
var disconnect = AudioNode.prototype.disconnect;
AudioNode.prototype.connect = function( dest ) {
this._connections || ( this._connections = [] );
if ( this._connections.indexOf( dest ) === -1 ) {
this._connections.push( dest );
}
return connect.apply( this, arguments );
};
AudioNode.prototype.disconnect = function() {
this._connections = [];
return disconnect.apply( this, arguments );
};
This is a quick example, and it doesn't account for disconnect arguments. But something along those lines could work, I think.
There are good reasons not to do something like this. But
it would allow you to keep the application code generic, which is really what you need if you want to be able to visualize arbitrary audio graphs.
The short answer is no - there is no such method. You'll have to keep track of your connections yourself.
Requirement:
Now: Draw on a Canvas, and hit Save (store Canvas state/drawing offline - but NOT as image).
Later: Open up the Canvas with previously saved drawing showing, and continue to draw again.
For drawing we normally use code as follows:
canvas = document.getElementById('can');
ctx = canvas.getContext("2d");
...
ctx.beginPath();
ctx.moveTo(prevX, prevY);
ctx.lineTo(currX, currY);
....
In order to restore Canvas state later - exporting to Image does not help.
I want to restore the Canvas to it's original state to continue editing the drawing at a later date.
I guess, the Canvas context has to be exported and stored offline - how?
Your best shot here is to use a Proxy that will both store the draw commands and perform the drawings.
Since the browser support for Proxy is very bad (only FF as of today), you'll have to build the Proxy yourself, either by using nosuchmethod, or by building a new brand new WatchedContext Class out of the Context2D.
I took the last solution (WatchedContext Class) for this short demo :
function WatchedContext(hostedCtx) {
this.commands= [];
Context2dPrototype = CanvasRenderingContext2D.prototype;
for (var p in Context2dPrototype ) {
this[p] = function(methodName) {
return function() {
this.commands.push(methodName, arguments);
return Context2dPrototype[methodName].apply(hostedCtx, arguments);
}
}(p);
}
this.replay=function() {
for (var i=0; i<this.commands.length; i+=2) {
var com = this.commands[i];
var args = this.commands[i+1];
Context2dPrototype[com].apply(hostedCtx, args);
}
}
}
Obviously you might need some other method (start/stop recording, clear, ...)
Just a small example of use :
var cv = document.getElementById('cv');
var ctx=cv.getContext('2d');
var watchedContext=new WatchedContext(ctx);
// do some drawings on the watched context
// --> they are performed also on the real context
watchedContext.beginPath();
watchedContext.moveTo(10, 10);
watchedContext.lineTo(100, 100);
watchedContext.stroke();
// clear context (not using the watched context to avoid recording)
ctx.clearRect(0,0,100,1000);
// replay what was recorded
watchedContext.replay();
You can see here :
http://jsbin.com/gehixavebe/2/edit?js,output
That the replay does work, and the line is re-drawn as a result of replaying the stored commands.
For storing offline you can either store the commands locally using localStorage or store them remotely on a server an use AJAX calls or similar.