Load Image Generated From A Function in p5.js canvas - javascript

I'm trying to load an image that is returned as a result of a function in a library called gaborgen.js
The idea is that I should be able to generate a Gabor patch image using this library and then I need to store this in an image variable in p5, and display it on the canvas.
The way the library works is that it has a function gaborgen() that takes in two parameters, so gaborgen(50,50) would return a Gabor patch with those attributes. The way the library works is that it returns the image as a base64 PNG.
I tried to load the image as follows, but it failed. The resulting sketch is just a blank screen.
var img;
function setup() {
createCanvas(640, 360);
img = createImg(gaborgen(50,40));
}
function draw(){
background(0);
image(img, 0, 0, img.elt.width, img.elt.height);
}
The gaborgen() function in the Gaborgen.js library is as follows;
gaborgen = function(tilt, sf) {
var a, aspectratio, b, contrast, gab_x, gab_y, gridArray, i, j, m, multConst, phase, preSinWave, ref, reso, sc, scaledM, sf_max, sf_min, sinWave, tilt_max, tilt_min, varScale, x, x_centered, x_factor, y, y_centered, y_factor;
if ((tilt > 100 || tilt < 1) || (sf > 100 || sf < 1)) {
console.log("ERROR: gaborgen arguenment input out of bounds");
}
reso = 400;
phase = 0;
sc = 50.0;
contrast = 100.0;
aspectratio = 1.0;
tilt_min = 0;
tilt_max = 90;
sf_min = .01;
sf_max = .1;
tilt = rescale_core(tilt, tilt_min, tilt_max, 1, 100);
sf = rescale_core(sf, sf_min, sf_max, 1, 100);
x = reso / 2;
y = reso / 2;
a = numeric.cos([deg2rad(tilt)]) * sf * 360;
b = numeric.sin([deg2rad(tilt)]) * sf * 360;
multConst = 1 / (numeric.sqrt([2 * pi]) * sc);
varScale = 2 * numeric.pow([sc], 2);
gridArray = numeric.linspace(0, reso);
ref = meshgrid(gridArray), gab_x = ref[0], gab_y = ref[1];
x_centered = numeric.sub(gab_x, x);
y_centered = numeric.sub(gab_y, y);
x_factor = numeric.mul(numeric.pow(x_centered, 2), -1);
y_factor = numeric.mul(numeric.pow(y_centered, 2), -1);
preSinWave = numeric.add(numeric.add(numeric.mul(a, x_centered), numeric.mul(b, y_centered)), phase);
i = 0;
while (i < reso) {
j = 0;
while (j < reso) {
preSinWave[i][j] = deg2rad(preSinWave[i][j]);
j += 1;
}
i += 1;
}
sinWave = numeric.sin(preSinWave);
m = numeric.add(.5, numeric.mul(contrast, numeric.transpose(numeric.mul(numeric.mul(multConst, numeric.exp(numeric.add(numeric.div(x_factor, varScale), numeric.div(y_factor, varScale)))), sinWave))));
scaledM = rescale(m, 0, 254);
return numeric.imageURL([scaledM, scaledM, scaledM]);
};
Any idea how I can load a base64 PNG returned by a function into p5.js like this?

You can use a base-64 encoded image directly in P5.js by passing the string directly into the loadImage() function. Here's an example:
var img;
function setup() {
createCanvas(400, 400);
img = loadImage('data:image/png;base64, iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==');
}
function draw() {
background(220);
image(img, 0, 0, width, height);
}
Notice the data:image/png;base64, part of the argument.
I don't know what gaborgen() function returns, so you're going to need to do some debugging to figure out exactly where your code breaks down. Work forward in smaller steps and check your developer tools for errors.

Related

Trying to use createGraphics() in p5.js along with eye tracker and it isn't working

I'm still a beginner coder and I'm making a project where the eye positions (networked by socket) of two clients when landing on the same positions on the canvas would play the music note it lands on. I'm still in the beginning stages of this project and currently, I'm trying to draw the client's eye position on the canvas while the grid of music notes on a p5 renderer object. I had coded the grid to add an ellipse to where the mouse has clicked. The grid is successfully drawn but I no longer can interact with the grid i.e add or remove ellipses on click.
So now I'm a bit lost on how to solve this issue. Before when the eye was being drawn on p5.renderer I also tried the clear() function to get rid of the trails but it didn't work.
So I have two problems 1) trying to get rid of eye position trails and 2) using create graphics with mouse pressed function not working in code below.
// NETWORKED + EYE VARIABLES
let socket = io();
let ctracker;
let clients = {};
let data = {};
let w = 1200;
let h = 600;
let leftEyeX, leftEyeY, rightEyeX, rightEyeY;
let cnvPosX;
let cnvPosY;
/// SOUND VARIABLES //
let bell, bell1, bell2, bell3; // contains sound source
let bPat, b1Pat, b2Pat; // an array of no.s that we can use to make beats // 1 = on, 0= off
let bPhrase, b1Phrase, b2Phrase; // defines how the beat pattern is interpreted
let part; // attach all the instruments to the to make it into a machine i.e on/off
let bpmCTRL;
let beatLength; // how big is sequence before it starts looping
let cellSize;
let cnv;
let overlayCnv;
let bg, largeText,smallText, MleftEye, MRightEye;
let row1 =[];
function preload() {
// background
bg = loadImage("https://cdn.glitch.com/e024d4d5-2c9e-473c-acd3-c2e01a1ef9cd%2Fdaw-bg.png?v=1589319965142");
//main game instruction
largeText = loadImage("https://cdn.glitch.com/e024d4d5-2c9e-473c-acd3-c2e01a1ef9cd%2FAsset%2015.png?v=1589381930278");
// small game description
smallText = loadImage("https://cdn.glitch.com/e024d4d5-2c9e-473c-acd3-c2e01a1ef9cd%2FAsset%203.png?v=1589381926354");
//sound files
bell = loadSound (
"https://cdn.glitch.com/e024d4d5-2c9e-473c-acd3-c2e01a1ef9cd%2F203490__tesabob2001__g-5.mp3?v=1589326854551",
() => {}
);
bell1 = loadSound (
"https://cdn.glitch.com/e024d4d5-2c9e-473c-acd3-c2e01a1ef9cd%2F203485__tesabob2001__c5.mp3?v=1589326924689",
() => {}
);
bell2 = loadSound (
"https://cdn.glitch.com/e024d4d5-2c9e-473c-acd3-c2e01a1ef9cd%2F203489__tesabob2001__f5.mp3?v=1589326917439",
() => {}
);
bell3 = loadSound (
"https://cdn.glitch.com/e024d4d5-2c9e-473c-acd3-c2e01a1ef9cd%2F203491__tesabob2001__g-4.mp3?v=1589326867294", () => {}
);
};
function setup() {
cnvPosX = 120;
cnvPosY = 50;
// setup camera capture
var videoInput = createCapture(VIDEO);
videoInput.size(w,h);
videoInput.position(cnvPosX, cnvPosY);
videoInput.hide();
// setup canvas
ctracker = new clm.tracker();
ctracker.init(pModel);
ctracker.start(videoInput.elt);
noStroke();
socket.on('getUpdate', function(data){
clients[data.name] = data;
});
cnv = createCanvas(w, h);
cnv.position(cnvPosX,cnvPosY)
overlayCnv = createGraphics(w,h);
// overlayCnv.position(cnvPosX,cnvPosY);
overlayCnv.mousePressed(canvasPressed);
beatLength = 6;
cellSize = 200;
numOfRows = 3;
// canvas for eyes
// basic structure of a DAW
// time is a sceduled delay in note play time
bPat = [0, 0, 0, 0, 0, 0];
b1Pat = [0, 0, 0, 0, 0, 0];
b2Pat = [0, 0, 0, 0, 0, 0];
function selectSong() {
row1 = [bell3, bell];
selected = row1[floor(random(2))];
console.log(selected);
selected.play();
}
// name, callback, array
bPhrase = new p5.Phrase(
"bell",
time => {
selectSong()
},
bPat
);
b1Phrase = new p5.Phrase(
"bell1",
time => {
// make a variable to go there insiead of bell -> use random function to give a value to the variable
bell1.play(time); },
b1Pat
);
b2Phrase = new p5.Phrase(
"bell2",
time => {
bell2.play(time);
},
b2Pat
);
part = new p5.Part();
part.addPhrase(bPhrase);
part.addPhrase(b1Phrase);
part.addPhrase(b2Phrase);
bpmCTRL = createSlider(30, 200, 60, 1); // smallest val, highest val, starting val, incremental val
bpmCTRL.position(10, 10); // x, y
bpmCTRL.input(() => {
part.setBPM(bpmCTRL.value());
});
part.setBPM("60");
drawMatrix();
///// user interact
function canvasPressed() {
console.log("mousepressed")
let rowClicked = floor(numOfRows * (mouseY / height));
let columnClicked = floor(beatLength * (mouseX / width));
if (rowClicked === 0) {
console.log("first row");
bPat[columnClicked] = +!bPat[columnClicked];
} else if (rowClicked === 1) {
console.log("second row");
b1Pat[columnClicked] = +!b1Pat[columnClicked];
} else if (rowClicked === 2) {
console.log("third row");
b2Pat[columnClicked] = +!b2Pat[columnClicked];
}
drawMatrix();
}
/// drawing the grid
function drawMatrix() {
overlayCnv.background(bg);
//line
overlayCnv.stroke(25,40,100);
overlayCnv.strokeWeight(2);
for (let i = 0; i < beatLength + 1; i++) {
overlayCnv.line(i * cellSize, 0, i * cellSize, height);
}
for (let i = 0; i < numOfRows + 1; i++) {
overlayCnv.line(0, (i * height) / numOfRows, width, (i * height) / numOfRows);
}
//circle
overlayCnv.noStroke();
overlayCnv.fill(25,40,100);
for (let i = 0; i < beatLength; i++) {
if (bPat[i] === 1) {
overlayCnv.ellipse(i * cellSize + 0.5 * cellSize, 100, 50);
}
if (b1Pat[i] === 1) {
overlayCnv.ellipse(i * cellSize + 0.5 * cellSize, 300, 40);
}
if (b2Pat[i] === 1) {
overlayCnv.ellipse(i * cellSize + 0.5 * cellSize, 500, 30);
}
}
}
image(overlayCnv, 0, 0);
}
function draw() {
let positions = ctracker.getCurrentPosition();
for (let i = 0; i < positions.length; i++) {
// draw ellipse at each position point
leftEyeX = positions[27][0];
leftEyeY = positions[27][1];
rightEyeX = positions[32][0];
rightEyeY = positions[32][1];
// ellipse(positions[i][0], positions[i][1], 8, 8);
fill(255);
ellipse(rightEyeX, rightEyeY, 18, 18);
fill(255);
ellipse(leftEyeX, leftEyeY, 18, 18);
// formatting each point into a dict entry to send over socket io
data[(27).toString() + "." + '0'] = leftEyeX;
data[(27).toString() + "." + '1'] = leftEyeY;
data[(32).toString() + "." + '0'] = rightEyeX;
data[(32).toString() + "." + '1'] = rightEyeY;
// image(eyeCnv,0,0); // x, y relative to canvas x & y
}
}
/////// conditional to check if all files are loaded
function keyPressed() {
if (key === " ") {
if (bell.isLoaded() && bell1.isLoaded() && bell2.isLoaded()) {
if (!part.isPlaying) {
part.loop();
} else {
part.stop();
}
} else {
console.log("relax");
}
}
}
Nevermind, the error was just that background kept drawing in draw()

Generate the Dominant Colors for an RGB image with XMLHttpRequest

A Note For Readers: This is a long question, but it needs a background to understand the question asked.
The color quantization technique is commonly used to get the dominant colors of an image.
One of the well-known libraries that do color quantization is Leptonica through the Modified Median Cut Quantization (MMCQ) and octree quantization (OQ)
Github's Color-thief by #lokesh is a very simple implementation in JavaScript of the MMCQ algorithm:
var colorThief = new ColorThief();
colorThief.getColor(sourceImage);
Technically, the image on a <img/> HTML element is backed on a <canvas/> element:
var CanvasImage = function (image) {
this.canvas = document.createElement('canvas');
this.context = this.canvas.getContext('2d');
document.body.appendChild(this.canvas);
this.width = this.canvas.width = image.width;
this.height = this.canvas.height = image.height;
this.context.drawImage(image, 0, 0, this.width, this.height);
};
And that is the problem with TVML, as we will see later on.
Another implementation I recently came to know was linked on this article Using imagemagick, awk and kmeans to find dominant colors in images that links to Using python to generate awesome linux desktop themes.
The author posted an article about Using python and k-means to find the dominant colors in images that was used there (sorry for all those links, but I'm following back my History...).
The author was super productive, and added a JavaScript version too that I'm posting here: Using JavaScript and k-means to find the dominant colors in images
In this case, we are generating the dominant colors of an image, not using the MMCQ (or OQ) algorithm, but K-Means.
The problem is that the image must be a as well:
<canvas id="canvas" style="display: none;" width="200" height="200"></canvas>
and then
function analyze(img_elem) {
var ctx = document.getElementById('canvas').getContext('2d')
, img = new Image();
img.onload = function() {
var results = document.getElementById('results');
results.innerHTML = 'Waiting...';
var colors = process_image(img, ctx)
, p1 = document.getElementById('c1')
, p2 = document.getElementById('c2')
, p3 = document.getElementById('c3');
p1.style.backgroundColor = colors[0];
p2.style.backgroundColor = colors[1];
p3.style.backgroundColor = colors[2];
results.innerHTML = 'Done';
}
img.src = img_elem.src;
}
This is because the Canvas has a getContext() method, that expose 2D image drawing APIs - see An introduction to the Canvas 2D API
This context ctx is passed to the image processing function
function process_image(img, ctx) {
var points = [];
ctx.drawImage(img, 0, 0, 200, 200);
data = ctx.getImageData(0, 0, 200, 200).data;
for (var i = 0, l = data.length; i < l; i += 4) {
var r = data[i]
, g = data[i+1]
, b = data[i+2];
points.push([r, g, b]);
}
var results = kmeans(points, 3, 1)
, hex = [];
for (var i = 0; i < results.length; i++) {
hex.push(rgbToHex(results[i][0]));
}
return hex;
}
So you can draw an image on the Canvas through the Context and get image data:
ctx.drawImage(img, 0, 0, 200, 200);
data = ctx.getImageData(0, 0, 200, 200).data;
Another nice solution is in CoffeeScript, ColorTunes, but this is using a as well:
ColorTunes.getColorMap = function(canvas, sx, sy, w, h, nc) {
var index, indexBase, pdata, pixels, x, y, _i, _j, _ref, _ref1;
if (nc == null) {
nc = 8;
}
pdata = canvas.getContext("2d").getImageData(sx, sy, w, h).data;
pixels = [];
for (y = _i = sy, _ref = sy + h; _i < _ref; y = _i += 1) {
indexBase = y * w * 4;
for (x = _j = sx, _ref1 = sx + w; _j < _ref1; x = _j += 1) {
index = indexBase + (x * 4);
pixels.push([pdata[index], pdata[index + 1], pdata[index + 2]]);
}
}
return (new MMCQ).quantize(pixels, nc);
};
But, wait, we have no <canvas/> element in TVML!
Of course, there are native solutions like Objective-C ColorCube, DominantColor - this is using K-means
and the very nice and reusable ColorArt by #AaronBrethorst from CocoaControls.
Despite the fact that this could be used in a TVML application through a native to JavaScriptCore bridge - see How to bridge TVML/JavaScriptCore to UIKit/Objective-C (Swift)?
my aim is to make this work completely in TVJS and TVML.
The simplest MMCQ JavaScript implementation does not need a Canvas: see Basic Javascript port of the MMCQ (modified median cut quantization) by Nick Rabinowitz, but needs the RGB array of the image:
var cmap = MMCQ.quantize(pixelArray, colorCount);
that is taken from the HTML <canvas/> and that is the reason for it!
function createPalette(sourceImage, colorCount) {
// Create custom CanvasImage object
var image = new CanvasImage(sourceImage),
imageData = image.getImageData(),
pixels = imageData.data,
pixelCount = image.getPixelCount();
// Store the RGB values in an array format suitable for quantize function
var pixelArray = [];
for (var i = 0, offset, r, g, b, a; i < pixelCount; i++) {
offset = i * 4;
r = pixels[offset + 0];
g = pixels[offset + 1];
b = pixels[offset + 2];
a = pixels[offset + 3];
// If pixel is mostly opaque and not white
if (a >= 125) {
if (!(r > 250 && g > 250 && b > 250)) {
pixelArray.push([r, g, b]);
}
}
}
// Send array to quantize function which clusters values
// using median cut algorithm
var cmap = MMCQ.quantize(pixelArray, colorCount);
var palette = cmap.palette();
// Clean up
image.removeCanvas();
return palette;
}
[QUESTION]
How to generate the dominant colors of a RGB image without using the HTML5 <canvas/>, but in pure JavaScript from an image's ByteArray fetched with XMLHttpRequest?
[UPDATE]
I have posted this question to Color-Thief github repo, adapting the RGB array calculations to the latest codebase.
The solution I have tried was this
ColorThief.prototype.getPaletteNoCanvas = function(sourceImageURL, colorCount, quality, done) {
var xhr = new XMLHttpRequest();
xhr.open('GET', sourceImageURL, true);
xhr.responseType = 'arraybuffer';
xhr.onload = function(e) {
if (this.status == 200) {
var uInt8Array = new Uint8Array(this.response);
var i = uInt8Array.length;
var biStr = new Array(i);
while (i--)
{ biStr[i] = String.fromCharCode(uInt8Array[i]);
}
if (typeof colorCount === 'undefined') {
colorCount = 10;
}
if (typeof quality === 'undefined' || quality < 1) {
quality = 10;
}
var pixels = uInt8Array;
var pixelCount = 152 * 152 * 4 // this should be width*height*4
// Store the RGB values in an array format suitable for quantize function
var pixelArray = [];
for (var i = 0, offset, r, g, b, a; i < pixelCount; i = i + quality) {
offset = i * 4;
r = pixels[offset + 0];
g = pixels[offset + 1];
b = pixels[offset + 2];
a = pixels[offset + 3];
// If pixel is mostly opaque and not white
if (a >= 125) {
if (!(r > 250 && g > 250 && b > 250)) {
pixelArray.push([r, g, b]);
}
}
}
// Send array to quantize function which clusters values
// using median cut algorithm
var cmap = MMCQ.quantize(pixelArray, colorCount);
var palette = cmap? cmap.palette() : null;
done.apply(this,[ palette ])
} // 200
};
xhr.send();
}
but it does not gives back the right RGB colors array.
[UPDATE]
Thanks to all the suggestions I got it working. Now a full example is available on Github,
The canvas element is being used as a convenient way to decode the image into an RGBA array. You can also use pure JavaScript libraries to do the image decoding.
jpgjs is a JPEG decoder and pngjs is a PNG decoder. It looks like the JPEG decoder will work with TVJS as is. The PNG decoder, however, looks like it's made to work in a Node or web browser environment, so you might have to tweak that one a bit.

Create a waveform of the full track with Web Audio API

Realtime moving Waveform
I'm currently playing with Web Audio API and made a spectrum using canvas.
function animate(){
var a=new Uint8Array(analyser.frequencyBinCount),
y=new Uint8Array(analyser.frequencyBinCount),b,c,d;
analyser.getByteTimeDomainData(y);
analyser.getByteFrequencyData(a);
b=c=a.length;
d=w/c;
ctx.clearRect(0,0,w,h);
while(b--){
var bh=a[b]+1;
ctx.fillStyle='hsla('+(b/c*240)+','+(y[b]/255*100|0)+'%,50%,1)';
ctx.fillRect(1*b,h-bh,1,bh);
ctx.fillRect(1*b,y[b],1,1);
}
animation=webkitRequestAnimationFrame(animate);
}
Mini question: is there a way to not write 2 times new Uint8Array(analyser.frequencyBinCount)?
DEMO
add a MP3/MP4 file and wait. (tested in Chrome)
http://jsfiddle.net/pc76H/2/
But there are many problems. I can't find a proper documentation of the various audio filters.
Also, if you look at the spectrum you will notice that after 70% or the range there is no data. What does that mean? that maybe from 16k hz to 20k hz is no sound? I would apply a text to the canvas to show the various HZ. but where??
I found out that the returned data is a power of 32 in length with a max of 2048
and the height is always 256.
BUT the real question is ... I want to create a moving waveform like in traktor.
I already did that some time ago with PHP it converts the file to low bitrate than extracts the data and coverts that to a image. i found the script somewhere...but I don't remember where...
note: needs LAME
<?php
$a=$_GET["f"];
if(file_exists($a)){
if(file_exists($a.".png")){
header("Content-Type: image/png");
echo file_get_contents($a.".png");
}else{
$b=3000;$c=300;define("d",3);
ini_set("max_execution_time","30000");
function n($g,$h){
$g=hexdec(bin2hex($g));
$h=hexdec(bin2hex($h));
return($g+($h*256));
};
$k=substr(md5(time()),0,10);
copy(realpath($a),"/var/www/".$k."_o.mp3");
exec("lame /var/www/{$k}_o.mp3 -f -m m -b 16 --resample 8 /var/www/{$k}.mp3 && lame --decode /var/www/{$k}.mp3 /var/www/{$k}.wav");
//system("lame {$k}_o.mp3 -f -m m -b 16 --resample 8 {$k}.mp3 && lame --decode {$k}.mp3 {$k}.wav");
#unlink("/var/www/{$k}_o.mp3");
#unlink("/var/www/{$k}.mp3");
$l="/var/www/{$k}.wav";
$m=fopen($l,"r");
$n[]=fread($m,4);
$n[]=bin2hex(fread($m,4));
$n[]=fread($m,4);
$n[]=fread($m,4);
$n[]=bin2hex(fread($m,4));
$n[]=bin2hex(fread($m,2));
$n[]=bin2hex(fread($m,2));
$n[]=bin2hex(fread($m,4));
$n[]=bin2hex(fread($m,4));
$n[]=bin2hex(fread($m,2));
$n[]=bin2hex(fread($m,2));
$n[]=fread($m,4);
$n[]=bin2hex(fread($m,4));
$o=hexdec(substr($n[10],0,2));
$p=$o/8;
$q=hexdec(substr($n[6],0,2));
if($q==2){$r=40;}else{$r=80;};
while(!feof($m)){
$t=array();
for($i=0;$i<$p;$i++){
$t[$i]=fgetc($m);
};
switch($p){
case 1:$s[]=n($t[0],$t[1]);break;
case 2:if(ord($t[1])&128){$u=0;}else{$u=128;};$u=chr((ord($t[1])&127)+$u);$s[]= floor(n($t[0],$u)/256);break;
};
fread($m,$r);
};
fclose($m);
unlink("/var/www/{$k}.wav");
$x=imagecreatetruecolor(sizeof($s)/d,$c);
imagealphablending($x,false);
imagesavealpha($x,true);
$y=imagecolorallocatealpha($x,255,255,255,127);
imagefilledrectangle($x,0,0,sizeof($s)/d,$c,$y);
for($d=0;$d<sizeof($s);$d+=d){
$v=(int)($s[$d]/255*$c);
imageline($x,$d/d,0+($c-$v),$d/d,$c-($c-$v),imagecolorallocate($x,255,0,255));
};
$z=imagecreatetruecolor($b,$c);
imagealphablending($z,false);
imagesavealpha($z,true);
imagefilledrectangle($z,0,0,$b,$c,$y);
imagecopyresampled($z,$x,0,0,0,0,$b,$c,sizeof($s)/d,$c);
imagepng($z,realpath($a).".png");
header("Content-Type: image/png");
imagepng($z);
imagedestroy($z);
};
}else{
echo $a;
};
?>
The script works... but you are limited to a max image size of 4k pixels.
so you have not a nice waveform if it should rappresent only some milliseconds.
What do i need to store/create a realtime waveform like the traktors app or this php script? btw the traktor has also a colored waveform(the php script not).
EDIT
I rewrote your script that it fits my idea... it's relatively fast.
As you can see inside the function createArray i push the various lines into an object with the key as x coordinate.
I'm simply taking the the highest number.
here is where we could play with the colors.
var ajaxB,AC,B,LC,op,x,y,ARRAY={},W=1024,H=256;
var aMax=Math.max.apply.bind(Math.max, Math);
function error(a){
console.log(a);
};
function createDrawing(){
console.log('drawingArray');
var C=document.createElement('canvas');
C.width=W;
C.height=H;
document.body.appendChild(C);
var context=C.getContext('2d');
context.save();
context.strokeStyle='#121';
context.globalCompositeOperation='lighter';
L2=W*1;
while(L2--){
context.beginPath();
context.moveTo(L2,0);
context.lineTo(L2+1,ARRAY[L2]);
context.stroke();
}
context.restore();
};
function createArray(a){
console.log('creatingArray');
B=a;
LC=B.getChannelData(0);// Float32Array describing left channel
L=LC.length;
op=W/L;
for(var i=0;i<L;i++){
x=W*i/L|0;
y=LC[i]*H/2;
if(ARRAY[x]){
ARRAY[x].push(y)
}else{
!ARRAY[x-1]||(ARRAY[x-1]=aMax(ARRAY[x-1]));
// the above line contains an array of values
// which could be converted to a color
// or just simply create a gradient
// based on avg max min (frequency???) whatever
ARRAY[x]=[y]
}
};
createDrawing();
};
function decode(){
console.log('decodingMusic');
AC=new webkitAudioContext
AC.decodeAudioData(this.response,createArray,error);
};
function loadMusic(url){
console.log('loadingMusic');
ajaxB=new XMLHttpRequest;
ajaxB.open('GET',url);
ajaxB.responseType='arraybuffer';
ajaxB.onload=decode;
ajaxB.send();
}
loadMusic('AudioOrVideo.mp4');
Ok, so what i would do is to load the sound with an XMLHttpRequest, then decode it using webaudio, then display it 'carefully' to have the colors you are searching for.
I just made a quick version, copy-pasting from various of my projects, it is quite working, as you might see with this picture :
The issue is that it is slow as hell. To have (more) decent speed, you'll have to do some computation to reduce the number of lines to draw on the canvas, because at 441000 Hz, you very quickly get too many lines to draw.
// AUDIO CONTEXT
window.AudioContext = window.AudioContext || window.webkitAudioContext ;
if (!AudioContext) alert('This site cannot be run in your Browser. Try a recent Chrome or Firefox. ');
var audioContext = new AudioContext();
var currentBuffer = null;
// CANVAS
var canvasWidth = 512, canvasHeight = 120 ;
var newCanvas = createCanvas (canvasWidth, canvasHeight);
var context = null;
window.onload = appendCanvas;
function appendCanvas() { document.body.appendChild(newCanvas);
context = newCanvas.getContext('2d'); }
// MUSIC LOADER + DECODE
function loadMusic(url) {
var req = new XMLHttpRequest();
req.open( "GET", url, true );
req.responseType = "arraybuffer";
req.onreadystatechange = function (e) {
if (req.readyState == 4) {
if(req.status == 200)
audioContext.decodeAudioData(req.response,
function(buffer) {
currentBuffer = buffer;
displayBuffer(buffer);
}, onDecodeError);
else
alert('error during the load.Wrong url or cross origin issue');
}
} ;
req.send();
}
function onDecodeError() { alert('error while decoding your file.'); }
// MUSIC DISPLAY
function displayBuffer(buff /* is an AudioBuffer */) {
var leftChannel = buff.getChannelData(0); // Float32Array describing left channel
var lineOpacity = canvasWidth / leftChannel.length ;
context.save();
context.fillStyle = '#222' ;
context.fillRect(0,0,canvasWidth,canvasHeight );
context.strokeStyle = '#121';
context.globalCompositeOperation = 'lighter';
context.translate(0,canvasHeight / 2);
context.globalAlpha = 0.06 ; // lineOpacity ;
for (var i=0; i< leftChannel.length; i++) {
// on which line do we get ?
var x = Math.floor ( canvasWidth * i / leftChannel.length ) ;
var y = leftChannel[i] * canvasHeight / 2 ;
context.beginPath();
context.moveTo( x , 0 );
context.lineTo( x+1, y );
context.stroke();
}
context.restore();
console.log('done');
}
function createCanvas ( w, h ) {
var newCanvas = document.createElement('canvas');
newCanvas.width = w; newCanvas.height = h;
return newCanvas;
};
loadMusic('could_be_better.mp3');
Edit : The issue here is that we have too much data to draw. Take a 3 minutes mp3, you'll have 3*60*44100 = about 8.000.000 line to draw. On a display that has, say, 1024 px resolution, that makes 8.000 lines per pixel...
In the code above, the canvas is doing the 'resampling', by drawing lines with low-opacity and in 'ligther' composition mode (e.g. pixel's r,g,b will add-up).
To speed-up things, you have to re-sample by yourself, but to get some colors, it's not just a down-sampling, you'll have to handle a set (within a performance array most probably) of 'buckets', one for each horizontal pixel (so, say 1024), and in every bucket you compute the cumulated sound pressure, the variance, min, max and then, at display time, you decide how you will render that with colors.
For instance :
values between 0 positiveMin are very clear. (any sample is below that point).
values between positiveMin and positiveAverage - variance are darker,
values between positiveAverage - variance and positiveAverage + variance are darker,
and values between positiveAverage+variance and positiveMax lighter .
(same for negative values)
That makes 5 colors for each bucket, and it's still quite some work, for you to code and for the browser to compute.
I don't know if the performance could get decent with this, but i fear the statistics accuracy and the color coding of the software you mention can't be reached on a browser (obviously not in real-time), and that you'll have to make some compromises.
Edit 2 :
I tried to get some colors out of stats but it quite failed. My guess, now, is that the guys at tracktor also change color depending on frequency.... quite some work here....
Anyway, just for the record, the code for an average / mean variation follows.
(variance was too low, i had to use mean variation).
// MUSIC DISPLAY
function displayBuffer2(buff /* is an AudioBuffer */) {
var leftChannel = buff.getChannelData(0); // Float32Array describing left channel
// we 'resample' with cumul, count, variance
// Offset 0 : PositiveCumul 1: PositiveCount 2: PositiveVariance
// 3 : NegativeCumul 4: NegativeCount 5: NegativeVariance
// that makes 6 data per bucket
var resampled = new Float64Array(canvasWidth * 6 );
var i=0, j=0, buckIndex = 0;
var min=1e3, max=-1e3;
var thisValue=0, res=0;
var sampleCount = leftChannel.length;
// first pass for mean
for (i=0; i<sampleCount; i++) {
// in which bucket do we fall ?
buckIndex = 0 | ( canvasWidth * i / sampleCount );
buckIndex *= 6;
// positive or negative ?
thisValue = leftChannel[i];
if (thisValue>0) {
resampled[buckIndex ] += thisValue;
resampled[buckIndex + 1] +=1;
} else if (thisValue<0) {
resampled[buckIndex + 3] += thisValue;
resampled[buckIndex + 4] +=1;
}
if (thisValue<min) min=thisValue;
if (thisValue>max) max = thisValue;
}
// compute mean now
for (i=0, j=0; i<canvasWidth; i++, j+=6) {
if (resampled[j+1] != 0) {
resampled[j] /= resampled[j+1]; ;
}
if (resampled[j+4]!= 0) {
resampled[j+3] /= resampled[j+4];
}
}
// second pass for mean variation ( variance is too low)
for (i=0; i<leftChannel.length; i++) {
// in which bucket do we fall ?
buckIndex = 0 | (canvasWidth * i / leftChannel.length );
buckIndex *= 6;
// positive or negative ?
thisValue = leftChannel[i];
if (thisValue>0) {
resampled[buckIndex + 2] += Math.abs( resampled[buckIndex] - thisValue );
} else if (thisValue<0) {
resampled[buckIndex + 5] += Math.abs( resampled[buckIndex + 3] - thisValue );
}
}
// compute mean variation/variance now
for (i=0, j=0; i<canvasWidth; i++, j+=6) {
if (resampled[j+1]) resampled[j+2] /= resampled[j+1];
if (resampled[j+4]) resampled[j+5] /= resampled[j+4];
}
context.save();
context.fillStyle = '#000' ;
context.fillRect(0,0,canvasWidth,canvasHeight );
context.translate(0.5,canvasHeight / 2);
context.scale(1, 200);
for (var i=0; i< canvasWidth; i++) {
j=i*6;
// draw from positiveAvg - variance to negativeAvg - variance
context.strokeStyle = '#F00';
context.beginPath();
context.moveTo( i , (resampled[j] - resampled[j+2] ));
context.lineTo( i , (resampled[j +3] + resampled[j+5] ) );
context.stroke();
// draw from positiveAvg - variance to positiveAvg + variance
context.strokeStyle = '#FFF';
context.beginPath();
context.moveTo( i , (resampled[j] - resampled[j+2] ));
context.lineTo( i , (resampled[j] + resampled[j+2] ) );
context.stroke();
// draw from negativeAvg + variance to negativeAvg - variance
// context.strokeStyle = '#FFF';
context.beginPath();
context.moveTo( i , (resampled[j+3] + resampled[j+5] ));
context.lineTo( i , (resampled[j+3] - resampled[j+5] ) );
context.stroke();
}
context.restore();
console.log('done 231 iyi');
}
Based on the top answer, I have controlled that by reducing number of lines want to draw and little canvas function call placement. see following code for your reference.
// AUDIO CONTEXT
window.AudioContext = (window.AudioContext ||
window.webkitAudioContext ||
window.mozAudioContext ||
window.oAudioContext ||
window.msAudioContext);
if (!AudioContext) alert('This site cannot be run in your Browser. Try a recent Chrome or Firefox. ');
var audioContext = new AudioContext();
var currentBuffer = null;
// CANVAS
var canvasWidth = window.innerWidth, canvasHeight = 120 ;
var newCanvas = createCanvas (canvasWidth, canvasHeight);
var context = null;
window.onload = appendCanvas;
function appendCanvas() { document.body.appendChild(newCanvas);
context = newCanvas.getContext('2d'); }
// MUSIC LOADER + DECODE
function loadMusic(url) {
var req = new XMLHttpRequest();
req.open( "GET", url, true );
req.responseType = "arraybuffer";
req.onreadystatechange = function (e) {
if (req.readyState == 4) {
if(req.status == 200)
audioContext.decodeAudioData(req.response,
function(buffer) {
currentBuffer = buffer;
displayBuffer(buffer);
}, onDecodeError);
else
alert('error during the load.Wrong url or cross origin issue');
}
} ;
req.send();
}
function onDecodeError() { alert('error while decoding your file.'); }
// MUSIC DISPLAY
function displayBuffer(buff /* is an AudioBuffer */) {
var drawLines = 500;
var leftChannel = buff.getChannelData(0); // Float32Array describing left channel
var lineOpacity = canvasWidth / leftChannel.length ;
context.save();
context.fillStyle = '#080808' ;
context.fillRect(0,0,canvasWidth,canvasHeight );
context.strokeStyle = '#46a0ba';
context.globalCompositeOperation = 'lighter';
context.translate(0,canvasHeight / 2);
//context.globalAlpha = 0.6 ; // lineOpacity ;
context.lineWidth=1;
var totallength = leftChannel.length;
var eachBlock = Math.floor(totallength / drawLines);
var lineGap = (canvasWidth/drawLines);
context.beginPath();
for(var i=0;i<=drawLines;i++){
var audioBuffKey = Math.floor(eachBlock * i);
var x = i*lineGap;
var y = leftChannel[audioBuffKey] * canvasHeight / 2;
context.moveTo( x, y );
context.lineTo( x, (y*-1) );
}
context.stroke();
context.restore();
}
function createCanvas ( w, h ) {
var newCanvas = document.createElement('canvas');
newCanvas.width = w; newCanvas.height = h;
return newCanvas;
};
loadMusic('could_be_better.mp3');
this is a bit old, sorry to bump, but it's the only post about displaying a full waveform with the Web Audio Api and I'd like to share what method i used.
This method is not perfect but it only goes through the displayed audio and it only goes over it once. it also succeeds in displaying an actual waveform for short files or big zoom :
and a convincing loudness chart for bigger files dezoomed :
here is what it's like at middle zoom, kind of pleasant too:
notice that both zooms use the same algorythm.
I still struggle about scales (the zoomed waveform is bigger than the dezoomed one (though not so bigger than displayed on the images)
this algorythm i find is quite efficient (i can change zoom on 4mn music and it redraws flawlessly every 0.1s)
function drawWaveform (audioBuffer, canvas, pos = 0.5, zoom = 1) {
const canvasCtx = canvas.getContext("2d")
const width = canvas.clientWidth
const height = canvas.clientHeight
canvasCtx.clearRect(0, 0, width, height)
canvasCtx.fillStyle = "rgb(255, 0, 0)"
// calculate displayed part of audio
// and slice audio buffer to only process that part
const bufferLength = audioBuffer.length
const zoomLength = bufferLength / zoom
const start = Math.max(0, bufferLength * pos - zoomLength / 2)
const end = Math.min(bufferLength, start + zoomLength)
const rawAudioData = audioBuffer.getChannelData(0).slice(start, end)
// process chunks corresponding to 1 pixel width
const chunkSize = Math.max(1, Math.floor(rawAudioData.length / width))
const values = []
for (let x = 0; x < width; x++) {
const start = x*chunkSize
const end = start + chunkSize
const chunk = rawAudioData.slice(start, end)
// calculate the total positive and negative area
let positive = 0
let negative = 0
chunk.forEach(val =>
val > 0 && (positive += val) || val < 0 && (negative += val)
)
// make it mean (this part makes dezommed audio smaller, needs improvement)
negative /= chunk.length
positive /= chunk.length
// calculate amplitude of the wave
chunkAmp = -(negative - positive)
// draw the bar corresponding to this pixel
canvasCtx.fillRect(
x,
height / 2 - positive * height,
1,
Math.max(1, chunkAmp * height)
)
}
}
To use it :
async function decodeAndDisplayAudio (audioData) {
const source = audioCtx.createBufferSource()
source.buffer = await audioCtx.decodeAudioData(audioData)
drawWaveform(source.buffer, canvas, 0.5, 1)
// change position (0//start -> 0.5//middle -> 1//end)
// and zoom (0.5//full -> 400//zoomed) as you wish
}
// audioData comes raw from the file (server send it in my case)
decodeAndDisplayAudio(audioData)

Blending two ImageData into one ImageData with an offset in Javascript

I'm trying to blend two ImageData objects into a single object in order to obtain result similar to the pictures shown in this link
The following is the Javascript code that has the two ImageData
var redImage = copy.getImageData((SCREEN_WIDTH - VIDEO_WIDTH)/2,(SCREEN_HEIGHT - VIDEO_HEIGHT)/2,VIDEO_WIDTH,VIDEO_HEIGHT);
var bluImage = copy.getImageData((SCREEN_WIDTH - VIDEO_WIDTH)/2,(SCREEN_HEIGHT - VIDEO_HEIGHT)/2,VIDEO_WIDTH,VIDEO_HEIGHT);
var redData = redImage.data;
var blueData = blueImage.data;
// Colorize red
for(var i = 0; i < redData.length; i+=4) {
redData[i] -= (redData[i] - 255);
}
redImage.data = redData;
// Draw the pixels onto the visible canvas
disp.putImageData(redImage,(SCREEN_WIDTH - VIDEO_WIDTH)/2 - 25,(SCREEN_HEIGHT - VIDEO_HEIGHT)/2);
// Colorize cyan
for(var i = 1; i < blueData.length; i+=4) {
blueData[i] -= (blueData[i] - 255);
blueData[i+1] -= (blueData[i+1] - 255);
}
blueImage.data = blueData;
// Draw the pixels onto the visible canvas
disp.putImageData(blueImage,(SCREEN_WIDTH - VIDEO_WIDTH)/2 + 25,(SCREEN_HEIGHT - VIDEO_HEIGHT)/2);
How do i merge/blend the redData and blueData before putting it on the canvas ?
The formula you can use to mix two images is fairly simple:
newPixel = imageMainPixel * mixFactor + imageSecPixel * (1 - mixFactor)
Example assuming both buffers are of equal length:
var mixFactor = 0.5; //main image is dominant
//we're using the red buffer as main buffer for this example
for(var i = 0; i < redData.length; i+=4) {
redData[i] = redData[i] * mixFactor + blueData[i] * (1 - mixFactor);
redData[i+1] = redData[i+1] * mixFactor + blueData[i+1] * (1 - mixFactor);
redData[i+2] = redData[i+2] * mixFactor + blueData[i+2] * (1 - mixFactor);
}
Now your red buffer contains the mixed image.
To add an offset you can simply redraw the images with an offset value, for example:
var offset = 20; //pixels
copy.drawImage(originalImage, -offset, 0); // <--
var redImage = copy.getImageData( /*...*/ );
copy.drawImage(originalImage, offset, 0); // -->
var bluImage = copy.getImageData( /*...*/ );
If you have not onlyImageDataobjects, but also sourcecanvaselements, you can use this method.
You can obtain base64-encoded image data by callingtoDataURLcanvas method. Then you can createImageelement from that data and then paste that image to destination canvas viadrawImage.
Example code:
function mergeImageData(callback, sources) {
var canvas = document.createElement('canvas'),
context,
images = Array.prototype.slice.call(arguments, 1).map(function(canvas) {
var img = new Image();
img.onload = onLoad;
img.src = canvas.toDataURL();
return img;
}
),
imgCounter = 0,
widths = [],
heights = [];
function onLoad() {
widths.push(this.width);
heights.push(this.height);
if (++imgCounter == images.length) {
merge();
};
};
function merge() {
canvas.width = Math.max.apply(null, widths);
canvas.height = Math.max.apply(null, heights);
context = canvas.getContext('2d');
images.forEach(function(img) {
context.drawImage(img, 0, 0, img.width, img.height);
}
);
callback(context.getImageData(0, 0, canvas.width, canvas.height));
};
};
what about functions of setting the transmission format 3d - from format full side by side to anaglyph, alternating rows, alternating columns, chessboard, original side by side and 2d from 3d ?

HTML5 canvas putImageData seems to mess with pixels not getting expected results

I am trying to draw an isometric square with some custom code to build the pixel data up and then put it on a canvas with putImageData.
But I'm not getting the expected results, after a quick look through the raw pixel data of the canvas it seems all the pixel data I built up is getting messed with.
What I want from the below code is a red diamond on a black background. Where am i going wrong?
var Drawing = {};
Drawing.DrawIsoMetricSquare = function(cRenderContext, x, y, iWidth, cColor) {
var iHeight = iWidth / 2;
var iYPos = Math.floor(iHeight / 2) + 1;
var iXPos = 0;
var iRenderGirth = 1;
cPixelData = cRenderContext.createImageData(iWidth, iHeight);
var bExpand = true;
while (iXPos != iWidth) {
var iCurrentRenderGirth = 0;
while (iCurrentRenderGirth != iRenderGirth) {
var iY = iYPos + iCurrentRenderGirth;
//Draw first pixel then second
Drawing.ColorPixelAtPos(cPixelData.data, iXPos, iY, iWidth, cColor);
Drawing.ColorPixelAtPos(cPixelData.data, iXPos + 1, iY, iWidth, cColor);
iCurrentRenderGirth++;
}
//Move to next Render Start
iYPos = bExpand ? (iYPos - 1) : (iYPos + 1);
iXPos += 2;
iRenderGirth = bExpand ? (iRenderGirth + 2) : (iRenderGirth - 2);
bExpand &= iRenderGirth < iHeight;
}
cRenderContext.putImageData(cPixelData, x, y);
};
Drawing.XYPosToPixelPos = function(x, y, iWidth) {
return (x + y * iWidth) * 4;
};
Drawing.ColorPixelAtPos = function(cPixelData, x, y, iWidth, cColor) {
var iPixPos = Drawing.XYPosToPixelPos(x, y, iWidth);
cPixelData[iPixPos++] = cColor.r;
cPixelData[iPixPos++] = cColor.g;
cPixelData[iPixPos++] = cColor.b;
cPixelData[iPixPos] = 1; //Fixed alpha for now
};
var eCanvas = $("<canvas></canvas>");
eCanvas[0].width = 50;
eCanvas[0].height = 50;
$("#render").append(eCanvas);
var cRenderContext = eCanvas[0].getContext('2d');
cRenderContext.fillStyle = "rgba(1, 1, 1, 1)";
cRenderContext.fillRect(0, 0, 50, 50);
Drawing.DrawIsoMetricSquare(cRenderContext, 0, 0, 42, {
r: 255,
g: 0,
b: 0
});
JSFiddle example here
The problems were
a math error which you already fixed.
RGBA specifiers go from 0..255 not 0..1
You need to fill your pixel data with black opaque pixels (they are white by default).
I added a few lines of code and made a new fiddle for you.
http://jsfiddle.net/bsssq/1/
Now you should see the red diamond on the black square.

Categories