I'm using the pixi.js render engine for my current project in javascript. I'm loading a spritesheet defined in json, using the assetloader. Problem is I need to create sprites or movieclips well after the onComplete event that the assetloader uses, finishes. However the texture cache seems to not be accessible after that point. Here is some of the code below demonstrating the problem I'm coming across.
var spriteSheet = [ "test.json" ];
loader = new PIXI.AssetLoader(spriteSheet); // only using the flappy bird sprite sheet as a test
loader.onComplete = OnAssetsLoaded;
loader.load();
function OnAssetsLoaded() {
var sprite = PIXI.Sprite.fromFrame("someFrame.png"); //this works
}
var sprite2 = PIXI.Sprite.fromFrame("someFrame2.png"); //This does not work, tells me "someFrame2" is not in the texture cache
The sprite sheet must be fully loaded before the images get stored in the cache. Once the Sprite sheet has loaded, those assets will exist in the cache until you delete them.
The reason your code above fails is because the line var sprite2... executes before the sprite sheet has finished loading.
This example will continuously add a new Sprite to the stage every second.
//build stage
var stage = new PIXI.Stage(0x66FF99);
var renderer = PIXI.autoDetectRenderer(400, 300);
document.body.appendChild(renderer.view);
//update renderer
requestAnimFrame( animate );
function animate() {
requestAnimFrame( animate );
renderer.render(stage);
}
//Flag will prevent Sprites from being created until the Sprite sheet is loaded.
var assetsReadyFlag = false;
//load sprite sheet
var loader = new PIXI.AssetLoader([ "test.json" ]);
loader.onComplete = function(){
assetsReadyFlag = true;
};
loader.load();
//Add a new bird every second
setInterval( addBird, 1000);
function addBird()
{
//assets must be loaded before creating sprites
if(!assetsReadyFlag) return;
//create Sprite
var bird = PIXI.Sprite.fromFrame("bird.png");
bird.anchor.x = bird.anchor.y = 0.5;
bird.x = Math.random() * stage.width;
bird.y = Math.random() * height;
stage.addChild(bird);
};
Related
I'm using Three.js in a website project for a construction company. They want some 360° photos (photospheres) that I made using my phone (Google pixel 5). They also want to show a 3D representation of one of their projects so using Three.js seems to be the best solution.
Here is what it looks like in Google Photos:
Google Photos screenshot
And what it looks like in Three.js:
Three.js screenshot
You can see the colors are really bad (contrast, white balance, idk) compared to the original version.
It's the first time I use Three.js so here is my code:
Scene:
async connectedCallback() {
// Scene
this.scene = new THREE.Scene();
// Background equirectangular texture
const background_img = this.getAttribute('background');
if (background_img) this.loadBackground(background_img);
// Camera
this.camera = new THREE.PerspectiveCamera(60, this.clientWidth / this.clientHeight, 1, 5000);
// Lights
// this.scene.add(new THREE.HemisphereLight(0xffeeb1, 0x080820, 0));
const spotLight = new THREE.SpotLight(0xffa95c, 5);
spotLight.position.set(20, 20, 20);
spotLight.castShadow = true;
spotLight.shadow.bias = -0.0001;
spotLight.shadow.mapSize.width = 1024 * 4;
spotLight.shadow.mapSize.height = 1024 * 4;
this.scene.add(spotLight);
// Renderer
this.renderer = new THREE.WebGLRenderer({ antialias: true });
this.renderer.toneMapping = THREE.ReinhardToneMapping;
this.renderer.toneMappingExposure = 2.3;
this.renderer.shadowMap.enabled = true;
this.renderer.setPixelRatio(devicePixelRatio);
this.renderer.setSize(this.clientWidth, this.clientHeight);
this.appendChild(this.renderer.domElement);
// Orbit controls
this.controls = new OrbitControls(this.camera, this.renderer.domElement);
this.controls.autoRotate = true;
// Resize event
addEventListener('resize', e => this.resize());
// Load model
const url = this.getAttribute('model');
if (url) this.loadModel(url);
// Animate
this.resize();
this.animate();
// Resize again in .1s
setTimeout(() => this.resize(), 100);
// Init observer
new IntersectionObserver(entries => this.classList.toggle('visible', entries[0].isIntersecting), { threshold: 0.1 }).observe(this);
}
Background (photosphere):
loadBackground(src) {
const equirectangular = new THREE.TextureLoader().load(src);
equirectangular.mapping = THREE.EquirectangularReflectionMapping;
// Things Github Copilot suggested, removing it does not change colors so I thing it's not the problem
equirectangular.magFilter = THREE.LinearFilter;
equirectangular.minFilter = THREE.LinearMipMapLinearFilter;
equirectangular.format = THREE.RGBFormat;
equirectangular.encoding = THREE.sRGBEncoding;
equirectangular.anisotropy = 16;
this.scene.background = equirectangular;
}
As Marquizzo said in a comment:
You’re changing the renderer’s tone mapping and the exposure. It’s normal for the results to be different when you make modifications to the color output.
I kept saying the problem is not the lines Github Copilot suggested because I got the same result when removing those but I tried removing one line at a time and it turns out that this line was the problem:
equirectangular.format = THREE.RGBFormat;
Now my result is a bit dark compared to Google Photos's one but the colors are finally accurate! I still have a lot of things to learn haha.
Three.js screenshot after removing the problematic line
Thank you Marquizzo!
So I was trying to extract the pixels from my PIXI.js canvas, and I inspected them, and while the canvas on the screen had an image on it, the pixels extracted were completely black...
So then I investagated, and here is what I found (i need help)
Background info:
setup:
this.canvas = $('#canvas');
this.canvasDOM = this.canvas[0];
this.renderer = PIXI.autoDetectRenderer(0, 0, {
view: this.canvasDOM,
resolution: 1
});
Getting Image:
chooseImage(src) {
const self = this;
const image = new Image();
image.src = src;
image.onload = function() {
const base = new PIXI.BaseTexture(image);
const texture = new PIXI.Texture(base);
const sprite = new PIXI.Sprite(texture);
sprite.position.set(0, 0);
self.currentImage = sprite;
self.render();
};
}
rendering image:
render() {
if (this.currentImage) {
this.updateDimensions();
const stage = new PIXI.Container();
stage.addChild(this.currentImage);
stage.filters = this.gatherFilters();
const { width, height } = this.currentImage;
stage.filterArea = new PIXI.Rectangle(0, 0, width, height);
this.renderer.render(stage);
}
}
ok, so you can ignore the code which is before the rendering function, but in particular, the third last line of the rendering function is:
this.renderer.render(stage);
Now, this works perfectly fine! the image pops up and is a bright jellyfish on the canvas, but there is one small issue:
If i add this to the end of the render() function:
render() {
if (this.currentImage) {
this.updateDimensions();
const stage = new PIXI.Container();
stage.addChild(this.currentImage);
stage.filters = this.gatherFilters();
const { width, height } = this.currentImage;
stage.filterArea = new PIXI.Rectangle(0, 0, width, height);
this.renderer.render(stage);
const actualCanvas = this.renderer.extract.canvas; // the actual canvas on the screen
console.log(actualCanvas.toDataURL());
}
}
It returns a long string, and if replace
console.log(actualCanvas.toDataURL());
with: (the download lib)
download(actualCanvas.toDataURL('image/png'), 'image.png', 'image/png');
It downloads the jellyfish image!
now, in another function, I was going to have the image downloaded as either a png or jpg (and the array of possible download types was in another class and decided dynamically, so I couldn't simply cache the pixels after rendering)
to show u my issue, I will change the above code to:
setTimeout(() => download(actualCanvas.toDataURL('image/png'), 'image.png', 'image/png'), 1 /* One second delay */);
now the image is blank; black., unlike before when it worked
the only thing that changed was the delay I added.
I further inspected this, and i realised that if i did
setTimeout(() => console.log(this.renderer.extract.pixels()), 1); // extract the pixels after some delay for testing
then they were all black, but if i removed the delay, it worked again
again, in my case, I can't simply cache the image for my own reasons (don't ask)
So i was wandering if there was a workaround so that after some delay or later on the pixels when I extract them are not just black and work like it does without the delay.
Any help is greatly appreciated!
It seems some kind of browser protection to me. Maybe non-event-driven code protection accessing some resources or maybe non-https webapps protection (did you tried your app thru https even without certificate?), or maybe it can be even some kind of crossdomain protection (I doubt about it, but always present in the to-check list). Maybe you can deploy a fiddle or similar to test. Hope it helps!
How do we know the end of this process can be done?
material.map = new THREE.Texture( canvas );
material.map.needsUpdate = true;
Because if not then it is complete snapshot is sometime black result
var snapshotData = renderer.domElement.toDataURL(strMime);
What can be do successfully changed material callback?
Thank you :)
You can try putting it inside the onload funtion for checking if the texture is loaded or not
var textureLoader = new THREE.TextureLoader();
var texture = textureLoader.load(texturePath, function()
{
//the code below executes only after the texture is successfully loaded
mesh.material.map = texture;
mesh.needsUpdate = true;
//write the code to be executed after the texture is mapped
})
I hope this is helpful.
How i can draw a lot of image in canvas?
I have a lot of images url array and need output it. How to do with good perfomance.
me example code (jsfiddle: http://jsfiddle.net/6sunguw4/):
$(document).ready(function () {
var bgCanvas = document.getElementById("bgCanvas");
var bgCtx = bgCanvas.getContext("2d");
bgCanvas.width = window.innerWidth;
bgCanvas.height = window.innerHeight + 200;
var array = new Array();
array[1] = 'https://developer.chrome.com/webstore/images/calendar.png';
array[2] = 'http://www.w3schools.com/html/html5.gif';
array[3] = 'http://www.linosartele.lt/wp-content/uploads/2014/08/images-9.jpg';
img0 = new Image();
img0.onload = function() {
bgCtx.drawImage(img0, 0,0, 100, 100);
}
img0.src = array[1];
img2 = new Image();
img2.onload = function() {
bgCtx.drawImage(img2, 100,0, 100, 100);
}
img2.src = array[2];
img3 = new Image();
img3.onload = function() {
bgCtx.drawImage(img3, 200,0,100,100);
}
img3.src = array[3];
});
Here's code to load all images from the URLs you put into your array without having to hand code 2000 times new Image/.onload/.drawImage (I call the array imageURLs in the example code below):
// image loader
// put the paths to your images in imageURLs[]
var imageURLs=[];
// push all your image urls!
imageURLs.push('https://developer.chrome.com/webstore/images/calendar.png');
imageURLs.push('http://www.w3schools.com/html/html5.gif');
imageURLs.push('http://www.linosartele.lt/wp-content/uploads/2014/08/images-9.jpg');
// the loaded images will be placed in imgs[]
var imgs=[];
var imagesOK=0;
loadAllImages(start);
function loadAllImages(callback){
for (var i=0; i<imageURLs.length; i++) {
var img = new Image();
imgs.push(img);
img.onload = function(){
imagesOK++;
if (imagesOK>=imageURLs.length ) {
callback();
}
};
img.onerror=function(){alert("image load failed");}
img.crossOrigin="anonymous";
img.src = imageURLs[i];
}
}
function start(){
// the imgs[] array now holds fully loaded images
// the imgs[] are in the same order as imageURLs[]
bgCtx.drawImage(imgs[0], 000,0, 100, 100);
bgCtx.drawImage(imgs[1], 100,0, 100, 100);
bgCtx.drawImage(imgs[2], 200,0, 100, 100);
}
For better loading performance:
Browsers typically download only 6-8 files at a time from one domain. So your 2000 images would require 2000/8 == 250 separate sequential calls to your domain to load.
You can combine your 2000 into one or more spritesheets (preferably 6 or less spritesheets). That way the browser can download the 1-6 spritesheets containing your 2000 images in one trip.
You can use the extended version of context.drawImage to pull any desired image from the spritesheet. For example, assume an image you need is located at [200,200] on your spritesheet and is 100x100 in size. You can draw that image on your canvas like this:
bgCtx.drawImage(spritesheet,
200,200,100,100 // clip the image from the spritesheet at [200,200] with size 100x100
125,250,100,100 // draw the clipped image on the canvas at [125,250]
);
There's not much you can do with the code itself. drawImage seems pretty optimized, and it's the raw amount of images what could slow things down.
One thing you can maybe do, depending on your goal, is to prepare composite images. For example, those 3 images could be easily converted into a single PNG image, and then it would require only one drawImage call. However, if you plan to shift their places or some effects, I'm afraid you're stuck with what you have.
I'm using three.js to create a minecraft texture editor, similar to this. I'm just trying to get the basic click-and-paint functionality down, but I can't seem to figure it out. I currently have textures for each face of each cube and apply them by making shader materials with the following functions.
this.createBodyShaderTexture = function(part, update)
{
sides = ['left', 'right', 'top', 'bottom', 'front', 'back'];
images = [];
for (i = 0; i < sides.length; i++)
{
images[i] = 'img/'+part+'/'+sides[i]+'.png';
}
texCube = new THREE.ImageUtils.loadTextureCube(images);
texCube.magFilter = THREE.NearestFilter;
texCube.minFilter = THREE.LinearMipMapLinearFilter;
if (update)
{
texCube.needsUpdate = true;
console.log(texCube);
}
return texCube;
}
this.createBodyShaderMaterial = function(part, update)
{
shader = THREE.ShaderLib['cube'];
shader.uniforms['tCube'].value = this.createBodyShaderTexture(part, update);
shader.fragmentShader = document.getElementById("fshader").innerHTML;
shader.vertexShader = document.getElementById("vshader").innerHTML;
material = new THREE.ShaderMaterial({fragmentShader: shader.fragmentShader, vertexShader: shader.vertexShader, uniforms: shader.uniforms});
return material;
}
SkinApp.prototype.onClick =
function(event)
{
event.preventDefault();
this.change(); //makes texture file a simple red square for testing
this.avatar.remove(this.HEAD);
this.HEAD = new THREE.Mesh(new THREE.CubeGeometry(8, 8, 8), this.createBodyShaderMaterial('head', false));
this.HEAD.position.y = 10;
this.avatar.add(this.HEAD);
this.HEAD.material.needsUpdate = true;
this.HEAD.dynamic = true;
}
Then, when the user clicks any where on the mesh, the texture file itself is update using canvas. The update occurs, but the change isn't showing up in the browser unless the page is refreshed. I've found plenty of examples of how to change the texture image to a new file, but not on how to show changes in the same texture file during runtime, or even if it's possible. Is this possible, and if not what alternatives are there?
When you update a texture, whether its based on canvas, video or loaded externally, you need to set the following property on the texture to true:
If an object is created like this:
var canvas = document.createElement("canvas");
var canvasMap = new THREE.Texture(canvas)
var mat = new THREE.MeshPhongMaterial();
mat.map = canvasMap;
var mesh = new THREE.Mesh(geom,mat);
After the texture has been changed, set the following to true:
cube.material.map.needsUpdate = true;
And next time you render the scene it'll show the new texture.
Here is all the basics of what you have to know about "updating" stuff in three.js: https://threejs.org/docs/#manual/introduction/How-to-update-things