I want to do an audio input vizualizer with Phaser 3, I’m trying to get the mic input to a shader but I can’t find a way to make it work.
I have a basic understanding on shaders and I can work with textures that are images but I really don’t understand how to provide a sound. I checked a working example made in three.js: three.js webaudio - visualizer and I already managed to get the sound input from the microphone as an Uint8Array of 1024 numbers.
Here’s the shader I’m using:
// simplesound.gsl.js
#ifdef GL_ES
precision highp float;
#endif
precision mediump float;
uniform vec2 resolution;
uniform sampler2D iChannel0;
varying vec2 fragCoord;
void main() {
vec2 uv = fragCoord.xy / resolution.xy;
vec2 mu = texture2D(iChannel0, uv).rg;
float y = uv.y - mu.x;
y = smoothstep(0., 0.02, abs(y - 0.1));
gl_FragColor = vec4(y);
}
And here's my scene code, trying to make it work:
import Phaser from 'phaser';
// This will provide the array mentioned above with code that will use `navigator.getUserMedia`.
import { setupAudioContext } from '../audiostream';
export default class MainScene2 extends Phaser.Scene {
constructor() {
super({ key: 'MainScene2' });
}
preload() {
this.load.glsl('simplesound', '/static/simplesound.glsl.js');
}
create() {
this.shader = this.add.shader('simplesound', 400, 300, 800, 600);
// When the user presses the 'g' key we will start listening for mic input
const GKey = this.input.keyboard.addKey('G');
GKey.on('down', () => {
setupAudioContext((array) => {
// this array is the array mentioned above, in the three.js example they do something like creating
// a texture from this input and providing that texture to the shader uniform. I tried different things but
// nothing worked :(
//
// I tried using this.shader.setChannel0 and this.shader.setUniform but nothing seems to work as well.
});
});
}
}
I've been trying to make this work for a while, but couldn't get anything :(
For a possible solution without shaders, by using only using phaser and javascript could look like this (really no shader, but I'm also really interested in how a shader version, would look like).
In this demo I'm using data from an audio file. So that it works for your UseCase, you just would have to plug the microphone data into the data variable.
Demo:
(comments in the code, are made to highlight the main idea)
Click and wait some seconds. btw.: I added some screenshake, to give the demo more juice.
document.body.style = 'margin:0;';
var data = [];
var playing = -1;
var audioContext = new (window.AudioContext || window.webkitAudioContext)();
var analyser = audioContext.createAnalyser();
var buffer;
var source;
var url = 'https://labs.phaser.io/assets/audio/Rossini - William Tell Overture (8 Bits Version)/left.ogg'
// START Audio part for Demo
function loadAudio() {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function() {
audioContext.decodeAudioData(request.response, function(buf) {
buffer = buf;
playAudio();
});
};
request.send();
}
function playAudio() {
source = audioContext.createBufferSource();
source.buffer = buffer;
source.connect(audioContext.destination);
source.connect(analyser);
source.start(0);
}
// END Audio part for Demo
var config = {
type: Phaser.AUTO,
width: 536,
height: 183,
scene: {
create,
update
},
banner: false
};
var game = new Phaser.Game(config);
// this would be the varibale that should be updated from the audio source
var markers;
var createRandomData = true;
function create () {
// Start create Marker texture
// this could be remove if you want to load an actual image
let g = this.make.graphics({x: 0, y: 0, add: false});
g.lineStyle(10, 0xffffff);
g.beginPath();
g.moveTo(0, 0);
g.lineTo(50, 0);
g.strokePath();
g.generateTexture('marker', 30, 10);
// End create Marker texture
// Create the markers
// the repeat property sets how many markers you want to display, if you want all 1024 => that would be your value
markers = this.add.group({ key: 'marker', repeat: 50,
setXY: { x: 10, y: 10, stepX: 35 }, setOrigin: { x: 0, y: 0}});
this.add.rectangle(10, 10, 180, 20, 0).setOrigin(0);
let label = this.add.text( 10, 10, 'Click to start music', {color: 'red', fontSize:'20px', fontStyle:'bold'} )
// start and stop the playback of music
this.input.on('pointerdown', function () {
switch (playing) {
case -1:
loadAudio();
playing = 1;
label.setText('Click to stop music');
break;
case 0:
playAudio();
playing = 1;
label.setText('Click to stop music');
break;
case 1:
source.stop();
playing = 0;
label.setText('Click to start music');
break;
}
});
}
function update(){
if (markers){
// here we update the y-position of the marker in depending on the value of the data.
// ( min y = 10 and max y ~ 245)
markers.children.iterate(function (child, idx) {
child.y = 10 + (config.height - 20) / 255 * data[idx];
// you could even add some camera shake, for more effect
if(idx < 3 && data[idx] > 253){
this.cameras.main.shake(30);
}
}, this);
// if the analyser is valid and updates the data variable
// this part could some where else, I just wanted to keep the code concise
if(analyser){
var spectrums = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(spectrums);
// convert data to a plain array and updating the data variable
data = [].slice.call(spectrums);
}
}
}
<script src="https://cdn.jsdelivr.net/npm/phaser#3.55.2/dist/phaser.js"></script>
Basically this application "only" alters the Y-positions of each marker, based on the value returned from the audio array, which is loaded from the audio file.
Disclaimer: this is rough demo code, and could use some cleanup/improvement, if it should be used in production.
This is a working solution, using the mentioned shader, through phaser
After a short deep-dive into glsl and after starting to understand shader's and the surrounding concepts, I found out how shader work's with phaser (atleast for this task), it is actually pretty elegant and straightforward, how it was implemented.
Basically you "just" would need to:
load the shader in preload
this.load.glsl('simplesound', 'simplesound.glsl.js');
add the shader to the scene, in the create function for example
this.shader = this.add.shader('simplesound', x, y, width, height);
create two texture to pass to the shader (so that you can swap them), in the create function
(becarful to select a texture size that respects the power-of-two size)
this.g = this.make.graphics({ x: 0, y: 0, add: false });
this.g.generateTexture('tex0', 64, 64);
this.g.generateTexture('tex1', 64, 64);
In the update function, create a texture that represents the audio data
...
var spectrums = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(spectrums);
data = [].slice.call(spectrums);
...
let g = this.make.graphics({add:false});
data.forEach( (value, idx ) => {
g.fillStyle(value <<16 );
g.fillRect(idx * 10, 0, 10, value);
});
and at last update the next texture, and pass new texture-key of the newly recreated texture to the shader
if(this.shader.getUniform('iChannel0').textureKey == 'tex0'){
this.textures.remove('tex1');
g.generateTexture('tex1', 64, 64);
this.shader.setChannel0('tex1');
} else {
this.textures.remove('tex0');
g.generateTexture('tex0', 64, 64);
this.shader.setChannel0('tex0');
}
Ofcourse this code could be optimizied with functions, better variable and texture-key naming, but this is left to the reader.
Small working Demo:
(uses audio file, instead of mic, and is based on the code of my previous answer)
document.body.style = 'margin:0;';
const TEXTURE_SIZE = 128;
const glslScript = `#ifdef GL_ES
precision highp float;
#endif
precision mediump float;
uniform vec2 resolution;
uniform sampler2D iChannel0;
varying vec2 fragCoord;
void main() {
vec2 uv = fragCoord.xy / resolution.xy;
vec2 mu = texture2D(iChannel0, uv).rg;
float y = uv.y - mu.x;
y = smoothstep(0., 0.02, abs(y - 0.1));
gl_FragColor = vec4(y);
}`;
var playing = -1;
var audioContext = new (window.AudioContext || window.webkitAudioContext)();
var analyser = audioContext.createAnalyser();
var source;
var url = 'https://labs.phaser.io/assets/audio/Rossini - William Tell Overture (8 Bits Version)/left.ogg'
// START Audio part for Demo
function loadAudio() {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = function () {
audioContext.decodeAudioData(request.response, function (buf) {
playAudio(buf);
});
};
request.send();
}
function playAudio(buffer) {
source = audioContext.createBufferSource();
source.buffer = buffer;
source.connect(audioContext.destination);
source.connect(analyser);
source.start(0);
}
// END Audio part for Demo
var config = {
type: Phaser.WEBGL,
width: 536,
height: 183,
scene: {
create,
update
},
banner: false
};
var game = new Phaser.Game(config);
function create() {
this.add.text(10, 10, 'Click to start and stop the music')
.setColor('#000000')
.setOrigin(0)
.setDepth(1000)
.setFontFamily('Arial');
this.add.rectangle(0, 0, config.width, 40, 0xffffff)
.setDepth(999)
.setOrigin(0);
var baseShader = new Phaser.Display.BaseShader('shader', glslScript);
this.shader = this.add.shader(baseShader, 0, 10, config.width, config.height - 10)
.setOrigin(0);
// start and stop the playback of music
this.input.on('pointerdown', function () {
switch (playing) {
case -1:
loadAudio();
playing = 1;
break;
case 0:
playAudio();
playing = 1;
break;
case 1:
source.stop();
playing = 0;
break;
}
});
}
function update() {
if (analyser) {
var spectrums = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(spectrums);
let data = [].slice.call(spectrums);
let g = this.make.graphics({ add: false });
data.forEach((value, idx) => {
g.fillStyle(value << 16);
g.fillRect(idx * 10, 0, 10, value);
});
let textureName = this.shader.getUniform('iChannel0').textureKey == 'tex0' ? 'tex1' : 'tex0';
if(this.textures.exists(textureName)){
this.textures.remove(textureName);
}
g.generateTexture(textureName, TEXTURE_SIZE, TEXTURE_SIZE);
this.shader.setChannel0(textureName);
}
}
<script src="https://cdn.jsdelivr.net/npm/phaser#3.55.2/dist/phaser.js"></script>
Related
I'm currently using Phaser 3, although my question isn't technically restricted to that framework, as it's more of a general JS/canvas/maths question, but:
I have a line drawn with graphics(). It’s anchored at one end, and the other end is draggable. I made a quick demo and so far, so good - you can see what I have already on CodePen.
Dragging the marker around and redrawing the line is no problem, but what I’d like is for that line to have a maximum length of 100, so even if you’re still dragging beyond that point, the line would still follow the mouse, but not get any longer than 100. Dragging inside that maximum radius, the line would shrink as normal.
I’ve put together a visual that hopefully explains it:
The issue is that I suspect this is VERY MATHS and I am very, very weak with maths. Could anyone explain like I’m five what I need to do to my code to achieve this?
Edit: Adding code in a snippet here, as requested:
var config = {
type: Phaser.AUTO,
width: 800,
height: 400,
backgroundColor: '#2d2d2d',
parent: 'phaser-example',
scene: {
preload: preload,
create: create,
update: update
}
};
var path;
var curve;
var graphics;
var game = new Phaser.Game(config);
function preload() {
this.load.spritesheet('dragcircle', 'https://labs.phaser.io/assets/sprites/dragcircle.png', { frameWidth: 16 });
}
function create() {
graphics = this.add.graphics();
path = { t: 0, vec: new Phaser.Math.Vector2() };
curve = new Phaser.Curves.Line([ 400, 390, 300, 230 ]);
var point0 = this.add.image(curve.p0.x, curve.p0.y, 'dragcircle', 0);
var point1 = this.add.image(curve.p1.x, curve.p1.y, 'dragcircle', 0).setInteractive();
point1.setData('vector', curve.p1);
this.input.setDraggable(point1);
this.input.on('drag', function (pointer, gameObject, dragX, dragY) {
gameObject.x = dragX;
gameObject.y = dragY;
gameObject.data.get('vector').set(dragX, dragY);
});
this.input.on('dragend', function (pointer, gameObject) {
let distance = Phaser.Math.Distance.Between(curve.p0.x, curve.p0.y, curve.p1.x, curve.p1.y);
console.log(distance);
});
}
function update() {
graphics.clear();
graphics.lineStyle(2, 0xffffff, 1);
curve.draw(graphics);
curve.getPoint(path.t, path.vec);
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/phaser/3.55.2/phaser.min.js"></script>
You are right, you would need some math, but phaser has many helper functions, that will do the heavy lifting.
The main idea is, of this solution is
define a maxLength
get the the new point on drag, and create a real Phaser Vector2
here is some math is needed, to create the vector, just calculate destination point minus origin point
new Phaser.Math.Vector2(pointer.x - point0.x, pointer.y - point0.y) (origin point being the starting point of the desired vector, and destination point being the mouse pointer)
calculate the length of the created vector and compare it with the maxLength
if too long adjust the vector, with the handy function setLength (link to the documentation, this is where you would have needed math, but thankfully Phaser does it for us)
set the new coordinates for point1 and the curve endpoint
Here a quick demo (based on your code):
var config = {
type: Phaser.AUTO,
width: 500,
height: 170,
scene: {
preload: preload,
create: create,
update: update
}
};
var curve;
var graphics;
var game = new Phaser.Game(config);
function preload() {
this.load.spritesheet('dragcircle', 'https://labs.phaser.io/assets/sprites/dragcircle.png', { frameWidth: 16 });
}
function create() {
graphics = this.add.graphics();
curve = new Phaser.Curves.Line([ config.width/2, config.height - 20, config.width/2, 10 ]);
// define a length, could be a global constant
let maxLength = curve.p0.y - curve.p1.y;
var point0 = this.add.image(curve.p0.x, curve.p0.y, 'dragcircle', 0);
var point1 = this.add.image(curve.p1.x, curve.p1.y, 'dragcircle', 0).setInteractive();
this.input.setDraggable(point1);
// Just add for Debug Info
this.add.circle(curve.p0.x, curve.p0.y, maxLength)
.setStrokeStyle(1, 0xffffff, .5)
this.input.on('drag', function (pointer) {
let vector = new Phaser.Math.Vector2(pointer.x - point0.x, pointer.y - point0.y);
let distance = Phaser.Math.Distance.Between( point0.x, point0.y, pointer.x, pointer.y);
if(distance > maxLength){
vector.setLength(maxLength);
}
point1.x = point0.x + vector.x;
point1.y = point0.y + vector.y;
curve.p1.x = point1.x;
curve.p1.y = point1.y;
});
// NOT REALLY NEEDED
/*this.input.on('dragend', function (pointer, gameObject) {
let distance = Phaser.Math.Distance.Between(curve.p0.x, curve.p0.y, curve.p1.x, curve.p1.y);
console.log(distance);
});*/
}
function update() {
graphics.clear();
graphics.lineStyle(2, 0xffffff, 1);
curve.draw(graphics);
}
<script src="https://cdn.jsdelivr.net/npm/phaser#3.55.2/dist/phaser.js"></script>
Optional - Code Version using Phaser.GameObjects.Line:
This uses less code, and thanks to the Line GameObject (link to Documentation), you can directly use the vector to update the line, and also don't need the update function, graphics and so.
const config = {
type: Phaser.CANVAS,
width: 500,
height: 160,
scene: {
create
}
};
const game = new Phaser.Game(config);
const MAX_LINE_LENGTH = 100;
function create() {
let points = [ {x: config.width/2, y: config.height - 20}, {x: config.width/2, y: config.height - 120} ];
let point0 = this.add.circle(points[0].x, points[0].y, 6)
.setStrokeStyle(4, 0xff0000);
let point1 = this.add.circle(points[1].x, points[1].y, 6)
.setStrokeStyle(4, 0xff0000)
.setInteractive();
this.input.setDraggable(point1);
// Just add for Debug Info
this.add.circle(point0.x, point0.y, MAX_LINE_LENGTH)
.setStrokeStyle(1, 0xffffff, .5);
let line = this.add.line(points[0].x, points[0].y, 0, 0, 0, -100, 0x00ff00)
.setOrigin(0);
this.input.on('drag', function (pointer) {
let vector = new Phaser.Math.Vector2(pointer.x - point0.x, pointer.y - point0.y);
let distance = Phaser.Math.Distance.Between( point0.x, point0.y, pointer.x, pointer.y);
if(distance > MAX_LINE_LENGTH){
vector.setLength(MAX_LINE_LENGTH);
}
point1.x = point0.x + vector.x;
point1.y = point0.y + vector.y;
line.setTo(0, 0, vector.x, vector.y);
});
}
<script src="//cdn.jsdelivr.net/npm/phaser#3.55.2/dist/phaser.js"></script>
What I want to do
I'm trying to port my own WebGL-based engine to WeChat MiniGame environment, and currently trying just to have WebGL context that will be cleared with pink color:
What's the issue
I've followed examples that Tencent provides as well as ThreeJS example on how to setup game project. It works great within WeChat Developer Tool (as seen in the above image), however when I try to open it on my device (Android phone), it is stuck at 100% loading screen:
It stays like this for about 1 minute, and then shows black screen.
My code
There's no resource loading in my code.
Here is what is in my main.js:
var ctx = canvas.getContext('webgl', {
antialias: true,
depth: true,
preserveDrawingBuffer: true
});
ctx.viewport(0,0,ctx.canvas.width,ctx.canvas.height)
ctx.colorMask(true,true,true,true)
ctx.depthMask(true)
ctx.enable(ctx.BLEND)
ctx.blendFunc(ctx.SRC_ALPHA, ctx.ONE_MINUS_SRC_ALPHA)
ctx.clearColor(1.0,0.0,1.0,1.0)
export default class Main {
constructor() {
window.requestAnimationFrame(this.loop.bind(this), canvas)
}
render() {
ctx.clear(ctx.COLOR_BUFFER_BIT | ctx.DEPTH_BUFFER_BIT)
}
update() {
}
loop() {
this.update()
this.render()
window.requestAnimationFrame(this.loop.bind(this), canvas)
}
}
My game.js is also simple:
import './weapp-adapter/index.js'
import './symbol'
import Main from './js/main'
new Main()
My game.json only contains following:
{
"deviceOrientation": "portrait"
}
Additional info
I've also noticed that when I try Threejs example (which works on the device) and comment lines in render function, it will then behave the same (get stuck in 100% loading).
Solution
I've finally figured out how to solve it:
When I've placed WebGL context initialization in very first call of animation frame, while actual rendering is done in all other calls, it worked as expected on my Android device. Here is main.js I've changed:
export default class Main {
constructor() {
this.render = this.render_first
requestAnimationFrame(() => this.animate())
}
showmsg(t,c) {
wx.showModal({
title: ""+t,
content: ""+c,
showCancel: false,
confirmText:'OK',
success: function(res){}
});
}
animate() {
this.render();
requestAnimationFrame(() => this.animate())
}
render_first() {
this.render = this.render_normal
var _this = this
this.domElement = canvas
var contextAttributes = {
alpha: false,
depth: true,
stencil: false,
antialias: false
}
this.domElement.addEventListener("webglcontextlost", function(e){
_this.showmsg("WebGL","Context lost");
}, false)
this.domElement.addEventListener("webglcontextrestored", function(e){
_this.showmsg("WebGL","Context restored");
}, false)
this._gl = this.domElement.getContext( 'webgl', contextAttributes ) || this.domElement.getContext( 'experimental-webgl', contextAttributes )
var _gl = this._gl
var vsrc = ""
vsrc += "uniform mat4 uModelView;"
vsrc += "uniform mat4 uProjView;"
vsrc += "attribute highp vec4 aPosition;"
vsrc += "void main(void) {"
vsrc += " gl_Position = ( uProjView * uModelView ) * aPosition;"
vsrc += "}"
var vid = _gl.createShader(_gl.VERTEX_SHADER)
_gl.shaderSource(vid,vsrc)
_gl.compileShader(vid)
if (!_gl.getShaderParameter(vid, _gl.COMPILE_STATUS)) {
console.error("Vertex shader failed: ", _gl.getShaderInfoLog(vid))
}
this._vid = vid
var fsrc = ""
fsrc += "void main(void) {"
fsrc += " gl_FragColor = vec4(1.0,1.0,0.0,1.0);"
fsrc += "}"
var fid = _gl.createShader(_gl.FRAGMENT_SHADER)
_gl.shaderSource(fid,fsrc)
_gl.compileShader(fid)
if (!_gl.getShaderParameter(fid, _gl.COMPILE_STATUS)) {
console.error("Fragment shader failed: ", _gl.getShaderInfoLog(fid))
}
this._fid = fid
var pid = _gl.createProgram()
_gl.attachShader(pid,vid)
_gl.attachShader(pid,fid)
_gl.linkProgram(pid)
if (!_gl.getProgramParameter(pid, _gl.LINK_STATUS)) {
let info = _gl.getProgramInfoLog(pid)
console.error("Program link failed:", info )
}
_gl.useProgram(pid)
this._pid = pid
var aPosition = _gl.getAttribLocation(pid,"aPosition")
var uModelView = _gl.getUniformLocation(pid,"uModelView")
var uProjView = _gl.getUniformLocation(pid,"uProjView")
_gl.uniformMatrix4fv( uModelView, false, [1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1] )
_gl.uniformMatrix4fv( uProjView, false, [1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1] )
this.uni = [uModelView, uProjView]
this.phase = 0.0
var data = [0,0,0,1,0,0,0,1,0]
var idata = [0,1,2]
var vbID = _gl.createBuffer()
_gl.bindBuffer(_gl.ARRAY_BUFFER,vbID)
_gl.bufferData(_gl.ARRAY_BUFFER, new Float32Array(data), _gl.STATIC_DRAW)
var vbiID = _gl.createBuffer();
_gl.bindBuffer(_gl.ELEMENT_ARRAY_BUFFER, vbiID)
_gl.bufferData(_gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(idata), _gl.STATIC_DRAW)
_gl.vertexAttribPointer( aPosition, 3, _gl.FLOAT, false, 0, 0 )
_gl.enableVertexAttribArray( aPosition )
_gl.bindBuffer(_gl.ELEMENT_ARRAY_BUFFER, vbiID)
this.vb = [vbID,vbiID]
_gl.clearColor(1.0,0.0,1.0,1.0)
}
render_normal() {
var _gl = this._gl
var et = 60.0 / 1000.0
this.phase += 180.0 * 60.0 / 1000.0
var py = Math.sin(this.phase * Math.PI/180.0) * 0.5
_gl.uniformMatrix4fv( this.uni[0], false, [1,0,0,0,0,1,0,0,0,0,1,0,py,0,0,1] )
_gl.clear( _gl.COLOR_BUFFER_BIT )
_gl.drawElements(_gl.TRIANGLES, 3, _gl.UNSIGNED_SHORT, 0)
}
}
What was the problem
It seems that on an actual device, WeChat minigame runs animation loop in a separate thread than main JavaScript execution. Since WebGL (OpenGL) context is accessible only within the same thread (except in native apps where multiple threads can share same context), it would crash on the device since render function would attempt to access gl context that was initialized in a different thread.
This wasn't visible in WeChat Developer Tools, since that tool does not simulate exactly how device architecture work, and animation frame and JavaScript execution seems happen in the same thread.
I have a canvas for the game world and a canvas for the display screen. I also have a polygon with nodes V(x,y) to serve as a viewport that follows the player and his rotation. I would like to know how to clip from the game world along the polygon, rotate and draw to the smaller canvas.`
//main looping function
var requestAnimFrame = (function(){
return window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function(callback){
window.setTimeout(callback, 1000 / 60);
};
})();
//joystick setup
var leftManager = null;
var rightManager = null;
//precalculated math
var twoPi = Math.PI*2;
var halfPi = Math.PI/2;
var thirdOfCircleInRadians = twoPi/3;
//game canvas setup
var gameCvs = document.getElementById('gameCanvas');
gameCvs.width = 480;
gameCvs.height = 320;
//gameCvs.width - 960;
//gameCvs.height = 640;
var gameCtx = gameCvs.getContext("2d");
//game loop
var lastTime = 0;
function main() {
var now = Date.now();
var dt = lastTime==0? 0.016 : (now - lastTime) / 1000.0;
update(dt);
render(dt);
lastTime = now;
requestAnimFrame(main);
}
//collision class shorthand
var V = SAT.Vector;
var C = SAT.Circle;
var P = SAT.Polygon;
var R = new SAT.Response();
P.prototype.draw = function (ctx,type) {
ctx.save();
switch(type){
case 'van': ctx.fillStyle = "rgba(66, 66, 66, 0.5)"; break;
case 'col': ctx.fillStyle = "rgba(0, 0, 0, 1.0)"; break;
default: ctx.fillStyle = "rgba(0, 0, 0, 1.0)"; break;
}
ctx.translate(this.pos.x, this.pos.y);
ctx.beginPath();
var points = this.calcPoints;
ctx.moveTo(points[0].x, points[0].y);
var i = points.length;
while (i--) ctx.lineTo(points[i].x, points[i].y);
ctx.closePath();
//stroke to see through camera, when camera is not drawn use fill
ctx.stroke();
//ctx.fill();
ctx.restore();
};
//first for collisions, second for vanity. first is black, second is grey
var O = function(colPolygon,vanPolygon){
this.colPolygon = colPolygon;
this.vanPolygon = vanPolygon;
this.visible = false;
};
var objectVendor = function(type,position){
switch(type){
case 'tree':
return new O(new P(position,[
new V(10.5,19.5),
new V(20.5,9.5),
new V(23,-4),
new V(15,-16.5),
new V(-4,-19.5),
new V(-18,-14.5),
new V(-23,-0.5),
new V(-18.5,14.5),
new V(-8,20)
]),new P(position,[
new V(21,39),
new V(41,19),
new V(46,-8),
new V(30,-33),
new V(-8,-39),
new V(-36,-29),
new V(-46,-1),
new V(-37,29),
new V(-16,40)]));
break;
default: return false; break;
}
return false;
}
//Camera and Player Polygons
var cameraPoly = new P(new V(0,0),[
new V(-240,-160),
new V(240,-160),
new V(240,160),
new V(-240,160)
]);
var player = new P(new V(0,0),[
new V(5,2.5),
new V(7.5,2),
new V(7.5,-2),
new V(5,-2.5),
new V(-5,-2.5),
new V(-7.5,-2),
new V(-7.5,2),
new V(-5,2.5)
]);
//players start position on the screen, and starting angle, init velocity
player.pos = new V(240,160);
player.setAngle(1);
//players velocity for movement
player.vel = new V(0,0);
var world = {
objects: [],
visibleObjects: [],
worldCvs: null,
worldCtx: null,
init: function(){
//set up world canvas
this.worldCvs = document.createElement('canvas');
this.worldCvs.width = 480;
this.worldCvs.height = 480;
this.worldCtx = this.worldCvs.getContext("2d");
//populate world with stuff
this.objects.push(objectVendor('tree',new V(100,100)));
this.objects.push(objectVendor('tree',new V(150,200)));
this.objects.push(objectVendor('tree',new V(75,300)));
},
update: function(dt){
this.visibleObjects = [];
cameraPoly.setAngle(player.angle);
//cameraPoly.pos = player.pos;
cameraPoly.pos = new V(player.pos.x+(110*Math.cos(player.angle+halfPi)),player.pos.y+(110*Math.sin(player.angle+halfPi)));
//update objects to mark if they are in view
var i = this.objects.length;
while(i--){
if(SAT.testPolygonPolygon(this.objects[i].vanPolygon, cameraPoly, R)){
this.visibleObjects.push(this.objects[i]);
}
}
//}
},
draw: function(dt){
this.worldCtx.setTransform(1,0,0,1,0,0);
this.worldCtx.clearRect(0,0,this.worldCvs.width,this.worldCvs.height);
player.draw(this.worldCtx);
var i = this.visibleObjects.length;
while(i--){
this.visibleObjects[i].colPolygon.draw(this.worldCtx,'col');
this.visibleObjects[i].vanPolygon.draw(this.worldCtx,'van');
}
//for testing
cameraPoly.draw(this.worldCtx);
/*
this.worldCtx.save();
this.worldCtx.beginPath();
var i = cameraPoly.calcPoints.length;
this.worldCtx.moveTo(cameraPoly.calcPoints[0].x,cameraPoly.calcPoints[0].y);
while(i--){
this.worldCtx.lineTo(cameraPoly.calcPoints[i].x,cameraPoly.calcPoints[i].y);
}
this.worldCtx.clip();
this.worldCtx.restore();
*/
}
}
function render(dt){
gameCtx.setTransform(1,0,0,1,0,0);
gameCtx.clearRect(0,0,gameCvs.width,gameCvs.height);
world.draw();
//gameCtx.save();
//gameCtx.translate(cameraPoly.pos.x,cameraPoly.pos.y);
//gameCtx.translate(gameCtx.width/2,gameCtx.height/2);
//gameCtx.rotate(-player.angle+halfPi);
//gameCtx.translate(-world.worldCvs.width/2,-world.worldCvs.height/2);
gameCtx.drawImage(world.worldCvs,0,0);
//gameCtx.restore();
}
function update(dt){
world.update();
}
function init(){
//joystick setup
leftManager = nipplejs.create({
zone:document.getElementById("leftJoystick"),
color:"black",
size:75,
threshold:1.0,
position:{
top:"50%",
left:"50%"
},
mode:"static",
restOpacity:0.75,
});
rightManager = nipplejs.create({
zone:document.getElementById("rightJoystick"),
color:"black",
size:75,
threshold:1.0,
position:{
top:"50%",
right:"50%"
},
mode:"static",
restOpacity:0.75,
});
//joystick event setup
leftManager.get().on('move end', function(evt,data){
//console.log(evt);
//console.log(data);
});
rightManager.get().on('move end', function(evt,data){
//console.log(evt);
//console.log(data);
});
world.init();
main();
}
init();
`
I'm using libraries SAT.js and nipplejs.js currently.
Typically this is done in a little different of a way than you seem to be thinking of it. Instead of thinking about the viewport existing somewhere in the world, you should think about the viewport being fixed and the world being transformed behind it; you don't copy part of the world to the viewport, you draw the world offset and rotated by a certain amount, and only draw the parts that are inside the viewport. Matrices are an easy and common way to represent this transformation. You may want to read more about them here.
In practice, this would just amount to changing your existing call to worldCtx.setTransform() at the beginning of each draw frame. That link has information about how to calculate a good transform matrix, and you can find similar resources all over the place since it's pretty standard math.
In particular, you'll want to multiply a rotation and a translation matrix. Translation matrices are only possible if you use a matrix with higher-order than your coordinate space; for 2D, a 3x3 matrix, and for 3D, a 4x4 matrix. You could instead choose to just add some offset to your coordinates as you draw them, but worldCtx.setTransform already takes a matrix with a 3rd column for putting flat offsets into.
Changing the render function to the following will solve the problem, just rushing myself and didn't think things through very well.
`
function render(dt){
gameCtx.setTransform(1,0,0,1,0,0);
gameCtx.clearRect(0,0,gameCvs.width,gameCvs.height);
world.draw();
gameCtx.translate(gameCvs.width/2,gameCvs.height/2);
gameCtx.rotate(-player.angle+Math.PI);
gameCtx.translate(-cameraPoly.pos.x,-cameraPoly.pos.y);
gameCtx.drawImage(world.worldCvs,0,0);
}`
What this is doing is resetting any transformations on the context, clearing it for a new redrawing, creating the world canvas, translating to display center, rotating by the proper amount for reference point, translating to reference center point on negative axis to move game canvas proper amount so that drawing at 0,0 is in the correct location. Thank you for the reference material!
Edit;
working codepen (need to provide video file to avoid cross-origin policy)
https://codepen.io/bw1984/pen/pezOXm
I am attempting to modify the excellent rutt etra example here https://airtightinteractive.com/demos/js/ruttetra/ to work for video (still using threejs) and am encountering strange issues with performance.
My code currently works as expected, and actually runs quite smoothly on chrome on my macbook pro, but seems to cause some sort of slow memory leak which i assume is to do with all the heavy lifting which is having to be done by getImageData. Strangely enough its only noticeable once i attempt to refresh the tab, so looks like it may be related to the garbage collection in chrome maybe? anyway to shunt the grunt work onto the GPU instead of killing the CPU?
I just wondered if i am missing anything obvious in terms of code optimisation or if the performance issues i am facing are to be expected given the nature of what i am trying to do.
I am only interested in WebGL / chrome functionality so dont really need to worry about browser compatibility of any kind.
<script>
var container, camera, scene, renderer, controls;
// PI
var PI = Math.PI;
var TWO_PI = PI*2;
// size
SCREEN_WIDTH = window.innerWidth;
SCREEN_HEIGHT = window.innerHeight;
SCREEN_PIXEL_RATIO = window.devicePixelRatio;
// camera
var VIEW_ANGLE = 45;
var ASPECT = SCREEN_WIDTH / SCREEN_HEIGHT;
var NEAR = 0.1;
var FAR = 20000000;
// video raster
var video;
var videoImage;
var videoImageContext;
var _imageHeight;
var _imageWidth;
// lines
var _lineGroup;
// gui
var _guiOptions = {
stageSize: 1,
scale: 1.0,
scanStep: 5,
lineThickness: 10.0,
opacity: 1.0,
depth: 50,
autoRotate: false
};
// triggered from audio.php getMediaStream
function runme()
{
console.log('runme running');
init();
animate();
}
runme();
function init()
{
container = document.createElement('div');
document.body.appendChild(container);
//----------
// scene
//----------
scene = new THREE.Scene();
//----------
// camera
//----------
camera = new THREE.PerspectiveCamera(VIEW_ANGLE, ASPECT, NEAR, FAR);
//camera.position.set(0,0,450);
camera.position.set(0,150,300);
//----------
// objects
//----------
// create the video element
video = document.createElement('video');
// video.id = 'video';
// video.type = ' video/ogg; codecs="theora, vorbis" ';
video.src = 'data/sintel.ogv';
//video.src = 'data/az.mp4';
video.load(); // must call after setting/changing source
video.play();
videoImage = document.createElement('canvas');
//videoImage.width = 480;
//videoImage.height = 204;
videoImageContext = videoImage.getContext('2d');
_imageWidth = videoImage.width;
_imageHeight = videoImage.height;
//videoImageContext.fillStyle = '#ffffff';
//videoImageContext.fillRect(0, 0, videoImage.width, videoImage.height);
//----------
// controls
//----------
controls = new THREE.OrbitControls(camera);
//----------
// events
//----------
window.addEventListener('resize', onWindowResize, false);
//----------
// render
//----------
var args = {
//antialias: true // too slow
}
renderer = new THREE.WebGLRenderer(args);
renderer.setClearColor(0x000000, 1);
renderer.setPixelRatio(SCREEN_PIXEL_RATIO); //Set pixel aspect ratio
renderer.setSize(SCREEN_WIDTH, SCREEN_HEIGHT);
// attach to dom
container.appendChild(renderer.domElement);
//render();
}
function render()
{
if(video.readyState === video.HAVE_ENOUGH_DATA && !video.paused && !video.ended) // and video.currentTime > 0
{
//_imageWidth = videoImage.width;
//_imageHeight = videoImage.height;
videoImageContext.drawImage(video,0,0,_imageWidth,_imageHeight);
// Grab the pixel data from the backing canvas
var _data = videoImageContext.getImageData(0,0,videoImage.width,videoImage.height).data;
//log(data);
//_pixels = data;
var x = 0, y = 0;
if(_lineGroup)
{
scene.remove(_lineGroup);
//_lineGroup = null;
}
_lineGroup = new THREE.Object3D();
var _material = new THREE.LineBasicMaterial({
color: 0xffffff,
linewidth: _guiOptions.lineThickness
});
// loop through the image pixels
for(y = 0; y < _imageHeight; y+= _guiOptions.scanStep)
{
var _geometry = new THREE.Geometry();
for(x=0; x<_imageWidth; x+=_guiOptions.scanStep)
{
var color = new THREE.Color(getColor(x, y, _data));
var brightness = getBrightness(color);
var posn = new THREE.Vector3(x -_imageWidth/2,y - _imageHeight/2, -brightness * _guiOptions.depth + _guiOptions.depth/2);
//_geometry.vertices.push(new THREE.Vertex(posn));
_geometry.vertices.push(posn);
_geometry.colors.push(color);
_color = null;
_brightness = null;
_posn = null;
}
// add a line
var _line = new THREE.Line(_geometry, _material);
//log(line);
_lineGroup.add(_line);
// gc
_geometry = null;
}
scene.add(_lineGroup);
_data = null;
_line = null;
}
renderer.render(scene,camera);
}
function animate(){
requestAnimationFrame(animate);
stats.update();
render();
}
function onWindowResize(){
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
render();
}
// Returns a hexadecimal color for a given pixel in the pixel array.
function getColor(x, y, _pixels)
{
var base = (Math.floor(y) * _imageWidth + Math.floor(x)) * 4;
var c = {
r: _pixels[base + 0],
g: _pixels[base + 1],
b: _pixels[base + 2],
a: _pixels[base + 3]
};
return (c.r << 16) + (c.g << 8) + c.b;
}
// return pixel brightness between 0 and 1 based on human perceptual bias
function getBrightness(c)
{
return ( 0.34 * c.r + 0.5 * c.g + 0.16 * c.b );
}
</script>
any help anyone could provide would be much appreciated, even if its just pointing me in the right direction as i am only just beginning to experiment with this stuff and have almost given myself an aneurysm trying to wrap my tiny mind around it.
The slow memory leak is most likely due to:
// add a line
var _line = new THREE.Line(_geometry, _material);
//log(line);
_lineGroup.add(_line);
THREE.Line is an object, containing other objects and lots of data. Every time you instantiate it, it creates .matrix, .matrixWorld, .modelViewMatrix, .normalMatrix which are all arrays with a bunch of numbers. .position,.quaternion, .scale, .rotation and probably .up are vectors,quats etc. and are slightly smaller but also arrays with special constructors.
Allocating all this every 16 miliseconds only to be released the next frame is probably the cause of your "leak".
You should create a pool of THREE.Line objects, and draw that every frame instead. The number of drawn objects you can control with .visible and mutate their transformation properties.
#pailhead I took your advice about pre-rendering the lines and lineGroup in advance and then updating the vertices on each animation frame instead and now its purring like a kitten. Also needed to insert the following line to make sure updated coords are picked up;
e.geometry.verticesNeedUpdate = true;
I cant figure out how to get a hosted video to work on codepen (cross-origin policy violation issues) but i have put a version up anyway to show the working code.
https://codepen.io/bw1984/pen/pezOXm
I will try to get a self-hosted (working) version up as soon as i can
I've been trying in vain to get colour working, but that will have to be an exercise for another day.
I'm trying to make an app that will simulate long exposure photography. The idea is that I grab the current frame from the webcam and composite it onto a canvas. Over time, the photo will 'expose', getting brighter and brighter. (see http://www.chromeexperiments.com/detail/light-paint-live-mercury/?f=)
I have a shader that works perfectly. It's just like the 'add' blend mode in photoshop. The problem is that I can't get it to recycle the previous frame.
I thought that it would be something simple like renderer.autoClear = false; but that option seems to do nothing in this context.
Here's the code that uses THREE.EffectComposer to apply the shader.
onWebcamInit: function () {
var $stream = $("#user-stream"),
width = $stream.width(),
height = $stream.height(),
near = .1,
far = 10000;
this.renderer = new THREE.WebGLRenderer();
this.renderer.setSize(width, height);
this.renderer.autoClear = false;
this.scene = new THREE.Scene();
this.camera = new THREE.OrthographicCamera(width / -2, width / 2, height / 2, height / -2, near, far);
this.scene.add(this.camera);
this.$el.append(this.renderer.domElement);
this.frameTexture = new THREE.Texture(document.querySelector("#webcam"));
this.compositeTexture = new THREE.Texture(this.renderer.domElement);
this.composer = new THREE.EffectComposer(this.renderer);
// same effect with or without this line
// this.composer.addPass(new THREE.RenderPass(this.scene, this.camera));
var addEffect = new THREE.ShaderPass(addShader);
addEffect.uniforms[ 'exposure' ].value = .5;
addEffect.uniforms[ 'frameTexture' ].value = this.frameTexture;
addEffect.renderToScreen = true;
this.composer.addPass(addEffect);
this.plane = new THREE.Mesh(new THREE.PlaneGeometry(width, height, 1, 1), new THREE.MeshBasicMaterial({map: this.compositeTexture}));
this.scene.add(this.plane);
this.frameTexture.needsUpdate = true;
this.compositeTexture.needsUpdate = true;
new FrameImpulse(this.renderFrame);
},
renderFrame: function () {
this.frameTexture.needsUpdate = true;
this.compositeTexture.needsUpdate = true;
this.composer.render();
}
Here is the shader. Nothing fancy.
uniforms: {
"tDiffuse": { type: "t", value: null },
"frameTexture": { type: "t", value: null },
"exposure": { type: "f", value: 1.0 }
},
vertexShader: [
"varying vec2 vUv;",
"void main() {",
"vUv = uv;",
"gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );",
"}"
].join("\n"),
fragmentShader: [
"uniform sampler2D frameTexture;",
"uniform sampler2D tDiffuse;",
"uniform float exposure;",
"varying vec2 vUv;",
"void main() {",
"vec4 n = texture2D(frameTexture, vUv);",
"vec4 o = texture2D(tDiffuse, vUv);",
"vec3 sum = n.rgb + o.rgb;",
"gl_FragColor = vec4(mix(o.rgb, sum.rgb, exposure), 1.0);",
"}"
].join("\n")
This is in essence equivalent to posit labs answer, but I've had success with a more streamlined solution - I create an EffectComposer with only the ShaderPass I want recycled, then swap renderTargets for that composer with each render.
Initialization:
THREE.EffectComposer.prototype.swapTargets = function() {
var tmp = this.renderTarget2;
this.renderTarget2 = this.renderTarget1;
this.renderTarget1 = tmp;
};
...
composer = new THREE.EffectComposer(renderer,
new THREE.WebGLRenderTarget(512, 512, { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter, format: THREE.RGBFormat })
);
var addEffect = new THREE.ShaderPass(addShader, 'frameTexture');
addEffect.renderToScreen = true;
this.composer.addPass(addEffect);
render:
composer.render();
composer.swapTargets();
A secondary EffectComposer can then take one of the two renderTargets and push it to the screen or transform it further.
Also note I declare the "frameTexture" as textureID when initializing the ShaderPass. This lets ShaderPass know to update the frameTexture uniform with the results from the previous Pass.
To achieve this kind of feedback effect, you have to alternate writing to separate instances of WebGLRenderTarget. Otherwise, the frame buffer is overwritten. Not totally sure why this happens... but here is the solution.
init:
this.rt1 = new THREE.WebGLRenderTarget(512, 512, { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter, format: THREE.RGBFormat });
this.rt2 = new THREE.WebGLRenderTarget(512, 512, { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter, format: THREE.RGBFormat });
render:
this.renderer.render(this.scene, this.camera);
this.renderer.render(this.scene, this.camera, this.rt1, false);
// swap buffers
var a = this.rt2;
this.rt2 = this.rt1;
this.rt1 = a;
this.shaders.add.uniforms.tDiffuse.value = this.rt2;
Try with this:
this.renderer = new THREE.WebGLRenderer( { preserveDrawingBuffer: true } );