Below is my code
var uploadIDImage = {
IDPos: {},
IDNeg: {}
};
var FR = new FileReader();
FR.onload = function(e) {
console.log("123");
console.log(e);
$("#UploadIDPos img").remove();
$("#UploadIDPos i").hide();
$('#UploadIDPos').prepend('<img id="IDPosImg" style="width: 140px; height: 80px;"/>');
var img = document.getElementById('IDPosImg');
img.src = FR.result;
uploadIDImage.IDPos.Files = FR.result.split(",")[1];
console.log("11111");
};
if(e.target.files[0]) {
FR.readAsDataURL(e.target.files[0]);
if(originalIDPosSize === undefined) {
var size = Math.round(e.target.files[0].size / 1024 / 1024);
originalIDPosSize = size;
}
else {
totalSize = totalSize + originalIDPosSize;
var size = Math.round(e.target.files[0].size / 1024 / 1024);
}
var remainSize = totalSize - size;
console.log("Remain size : " + remainSize);
$("#remain-size").text(totalSize - size);
totalSize = remainSize;
}
console.log("22222");
console.log(uploadIDImage.IDPos.Files);
What I got from my console.log is first print "22222" and undefined and then "111111".
Why "11111" not print first?
When you do
FR.onload = function(e) {... }
you are setting a callback on the FileReader which is called when the reading operation has successfully completed.
Now you script proceeds and runs console.log("22222");
After a while the callback is invoked and you see the 11111.
Your section of code FR.onload = function(e) { ... } is just defining a handler for the FR object. The FileReader methods like readAsDataURL() are asynchronous -- your program continues after it executes the FR.readAsDataURL(...) statement.
Then later, when the file reading is done, then the function you specified for FR.onload runs. It is indeterminate whether this happens before or after your console.log("22222"); statement.
Related
I have long file I need to parse. Because it's very long I need to do it chunk by chunk. I tried this:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize)
{
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + 1);
reader.onload = foo;
reader.readAsText(blob);
})( file, i );
}
}
After running it I see only the first chunk in the console. If I change 'console.log' to jquery append to some div I see only first chunk in that div. What about other chunks? How to make it work?
FileReader API is asynchronous so you should handle it with block calls. A for loop wouldn't do the trick since it wouldn't wait for each read to complete before reading the next chunk.
Here's a working approach.
function parseFile(file, callback) {
var fileSize = file.size;
var chunkSize = 64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var readEventHandler = function(evt) {
if (evt.target.error == null) {
offset += evt.target.result.length;
callback(evt.target.result); // callback for handling read chunk
} else {
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function(_offset, length, _file) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readEventHandler;
r.readAsText(blob);
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
You can take advantage of Response (part of fetch) to convert most things to anything else blob, text, json and also get a ReadableStream that can help you read the blob in chunks đź‘Ť
var dest = new WritableStream({
write (str) {
console.log(str)
}
})
var blob = new Blob(['bloby']);
(blob.stream ? blob.stream() : new Response(blob).body)
// Decode the binary-encoded response to string
.pipeThrough(new TextDecoderStream())
.pipeTo(dest)
.then(() => {
console.log('done')
})
Old answer (WritableStreams pipeTo and pipeThrough was not implemented before)
I came up with a interesting idéa that is probably very fast since it will convert the blob to a ReadableByteStreamReader probably much easier too since you don't need to handle stuff like chunk size and offset and then doing it all recursive in a loop
function streamBlob(blob) {
const reader = new Response(blob).body.getReader()
const pump = reader => reader.read()
.then(({ value, done }) => {
if (done) return
// uint8array chunk (use TextDecoder to read as text)
console.log(value)
return pump(reader)
})
return pump(reader)
}
streamBlob(new Blob(['bloby'])).then(() => {
console.log('done')
})
The second argument of slice is actually the end byte. Your code should look something like:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize) {
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + start);
reader.onload = foo;
reader.readAsText(blob);
})(file, i);
}
}
Or you can use this BlobReader for easier interface:
BlobReader(blob)
.readText(function (text) {
console.log('The text in the blob is', text);
});
More information:
README.md
Docs
Revamped #alediaferia answer in a class (typescript version here) and returning the result in a promise. Brave coders would even have wrapped it into an async iterator…
class FileStreamer {
constructor(file) {
this.file = file;
this.offset = 0;
this.defaultChunkSize = 64 * 1024; // bytes
this.rewind();
}
rewind() {
this.offset = 0;
}
isEndOfFile() {
return this.offset >= this.getFileSize();
}
readBlockAsText(length = this.defaultChunkSize) {
const fileReader = new FileReader();
const blob = this.file.slice(this.offset, this.offset + length);
return new Promise((resolve, reject) => {
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
this.offset += result.length;
this.testEndOfFile();
resolve(result);
}
else {
reject(target.error);
}
};
fileReader.readAsText(blob);
});
}
testEndOfFile() {
if (this.isEndOfFile()) {
console.log('Done reading file');
}
}
getFileSize() {
return this.file.size;
}
}
Example printing a whole file in the console (within an async context)
const fileStreamer = new FileStreamer(aFile);
while (!fileStreamer.isEndOfFile()) {
const data = await fileStreamer.readBlockAsText();
console.log(data);
}
Parsing the large file into small chunk by using the simple method:
//Parse large file in to small chunks
var parseFile = function (file) {
var chunkSize = 1024 * 1024 * 16; //16MB Chunk size
var fileSize = file.size;
var currentChunk = 1;
var totalChunks = Math.ceil((fileSize/chunkSize), chunkSize);
while (currentChunk <= totalChunks) {
var offset = (currentChunk-1) * chunkSize;
var currentFilePart = file.slice(offset, (offset+chunkSize));
console.log('Current chunk number is ', currentChunk);
console.log('Current chunk data', currentFilePart);
currentChunk++;
}
};
var images;
function preloadTrial(actor, event) {
return new Promise(function(res) {
var i = 0;
images = [];
var handler = function(resolve, reject) {
var img = new Image;
var source = '/static/videos/' + actor + '/' + event + '/' + i + '.png';
img.onload = function() {
i++;
resolve(img);
}
img.onerror = function() {
reject()
}
img.src = source;
}
var _catch = function() { res(images) }
var operate = function(value) {
if (value) images.push(value);
new Promise(handler).then(operate).catch(_catch);
}
operate();
})
}
function playSequence(time){
var delta = (time - currentTime) / 1000;
currentFrame += (delta * FPS);
var frameNum = Math.floor(currentFrame);
if (frameNum >= numFramesPlay) {
currentFrame = frameNum = 0;
return;
}else{
requestAnimationFrame(playSequence);
currentImage.src = images[frameNum];
currentTime = time;
console.log("display"+currentImage.src);
}
};
function rightNow() {
if (window['performance'] && window['performance']['now']) {
return window['performance']['now']();
} else {
return +(new Date());
}
};
currentImage = document.getElementById("instructionImage");
// Then use like this
preloadTrial('examples', 'ex1').then(function(value) {
playSequence(currentTime=rightNow());
});
I wrote a Javascript function that is suppose to load a directory full of numbered .png files. However, I do not know the number of items inside the directory beforehand. So I made a function that continues to store images until the source gives me an error. But when I run the code the program does not even enter the .onload and .onerror functions, resulting in an infinite loop.
Edit: This is my current code. It appears that images are correctly assigned and pushed into the array images. But when I attempt to load it onto a img tag (currentImage.src) and run playSequence, it does not display.
You could use promises to handle the pre-loading of the images.
Chain the resolves on the onload event and reject onerror to end the cycle.
function preloadImages(baseurl, extension, starter) {
return new Promise(function(res) {
var i = starter;
var images = [];
// Inner promise handler
var handler = function(resolve, reject) {
var img = new Image;
var source = baseurl + i + '.' + extension;
img.onload = function() {
i++;
resolve(img);
}
img.onerror = function() {
reject('Rejected after '+ i + 'frames.');
}
img.src = source;
}
// Once you catch the inner promise you resolve the outer one.
var _catch = function() { res(images) }
var operate = function(value) {
if (value) images.push(value);
// Inner recursive promises chain.
// Stop with the catch resolving the outer promise.
new Promise(handler).then(operate).catch(_catch);
}
operate();
})
}
To simulate a video player, you can draw on a HTML5 canvas.
function play(canvas, imagelist, refreshRate, frameWidth, frameHeight) {
// Since we're using promises, let's promisify the animation too.
return new Promise(function(resolve) {
// May need to adjust the framerate
// requestAnimationFrame is about 60/120 fps depending on the browser
// and the refresh rate of the display devices.
var ctx = canvas.getContext('2d');
var ts, i = 0, delay = 1000 / refreshRate;
var roll = function(timestamp) {
if (!ts || timestamp - ts >= delay) {
// Since the image was prefetched you need to specify the rect.
ctx.drawImage(imagelist[i], 0, 0, frameWidth, frameHeight);
i++;
ts = timestamp;
}
if (i < imagelist.length)
requestAnimationFrame(roll);
else
resolve(i);
}
roll();
})
}
To test I used ffmpeg to cut a video with the following command:
ffmpeg -i input.mp4 -ss 00:00:14.435 -vframes 100 %d.png
And I used devd.io to quickly create a static folder containing the script and images and a basic index.html.
imageroller.js - with the above code.
var preload = preloadImages('/static/videos/examples/testvid/', 'png', 1);
preload.then(function(value) {
console.log('starting play');
var canvas = document.getElementById("canvas");
play(canvas, value, 24, 720, 400) // ~480p 24fps
.then(function(frame){
console.log('roll finished after ' + frame + ' frames.')
})
});
While the preloading of the images was pretty slow, if you keep the number of frames to an acceptable level you can make some nice loops.
I haven't tested the snippet below (and there are probably cleaner solutions) but the idea should be correct. Basically we have a recursive function loadImages(), and we pass in the images array and a callback function. We wait for our current image to load; if it loads, we push it into images and call loadImages() again. If it throws an error, we know we are finished loading, so we return our callback function. Let me know if you have any questions.
function preloadTrial(actor, event) {
let images = [];
loadImages(images, actor, event, function () {
// code to run when done loading
});
};
function loadImages (images, actor, event, callback) {
let img = new Image();
let i = images.length;
let source ='/static/videos/'+actor+'/'+event+'/'+i+'.png';
img.onload = function() {
images.push(img);
return loadImages(images, actor, event, callback);
}
img.onerror = function() {
return callback(images);
}
img.src = source;
}
The optimal solution would be to provide a server-side API that tells you beforehand, how many Images there are in the directories.
If that is not possible, you should load the images one after the other to prevent excess requests to the server. In this case i would put the image loading code in a separate function and call it if the previous image was loaded successfully, like so:
function loadImage(actor, event, i, loadCallback, errorCallback) {
var image = new Image();
var source ='/static/videos/'+actor+'/'+event+'/'+i+'.png';
image.onload = loadCallback;
image.onerror = errorCallback;
image.src = source;
return image;
}
and then call this function in your while loop and in the loadCallback.
I'm currently implementing an upload for files. Because I've to handle huge files here and there I've started to slice files and send them in 1mb chunks which works great as long as file are <~500MB after that it seems that memory isn't freed anyone randomly and I can't figure out what I'm missing here.
Prepare chunks
var sliceCount = 0;
var sendCount = 0;
var fileID = generateUUID();
var maxChunks = 0;
var userNotified = false;
function parseFile(file)
{
var fileSize = file.size;
var chunkSize = 1024 * 1024;//64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var numberOfChunks = fileSize / chunkSize;
maxChunks = Math.ceil(numberOfChunks);
// gets called if chunk is read into memory
var readEventHandler = function (evt)
{
if (evt.target.error == null) {
offset += evt.target.result.byteLength;
sendChunkAsBinary(evt.target.result);
}
else
{
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function (_offset, length, _file)
{
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
sliceCount++;
console.log("Slicecount: " + sliceCount);
r.onload = readEventHandler;
r.readAsArrayBuffer(blob);
blob = null;
r = null;
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
Send Chunks
function sendChunkAsBinary(chunk)
{
var progressbar = $("#progressbar"), bar = progressbar.find('.uk-progress-bar');
// create XHR instance
var xhr = new XMLHttpRequest();
// send the file through POST
xhr.open("POST", 'upload.php', true);
var progressHandler = function (e)
{
// get percentage of how much of the current file has been sent
var position = e.loaded || e.position;
var total = e.total || e.totalSize;
var percentage = Math.round((sendCount / maxChunks) * 100);
// set bar width to keep track of progress
bar.css("width", percentage + "%").text(percentage + "%");
}
// let's track upload progress
var eventSource = xhr.upload || xhr;
eventSource.addEventListener("progress", progressHandler);
// state change observer - we need to know when and if the file was successfully uploaded
xhr.onreadystatechange = function ()
{
if (xhr.readyState == 4)
{
if (xhr.status == 200)
{
eventSource.removeEventListener("progress", progressHandler);
if (sendCount == maxChunks && !userNotified)
{
userNotified = true;
notifyUserSuccess("Datei hochgeladen!");
setTimeout(function ()
{
progressbar.addClass("uk-invisible");
bar.css("width", "0%").text("0%");
}, 250);
updateDocList();
}
}
else
{
notifyUser("Fehler beim hochladen der Datei!");
}
}
};
var blob;
if (typeof window.Blob == "function") {
blob = new Blob([chunk]);
} else {
var bb = new (window.MozBlobBuilder || window.WebKitBlobBuilder || window.BlobBuilder)();
bb.append(chunk);
blob = bb.getBlob();
}
sendCount++;
var formData = new FormData();
formData.append("chunkNumber", sendCount);
formData.append("maxChunks", maxChunks);
formData.append("fileID", fileID);
formData.append("chunkpart", blob);
xhr.send(formData);
progressbar.removeClass("uk-invisible");
console.log("Sendcount: " + sendCount);
}
If I attach to the debugger within Visual Studio 2015 it take a bit but soon I get an OutOfMemoryException in the send function at exactly this line: blob = new Blob([chunk]);. It's all the time the same line where the exception occures.
As soon as the Exception happens I get POST [...]/upload.php net::ERR_FILE_NOT_FOUND however I still got the chunks in my php-file.
Here's a Timeline-graph of my error
What I dont understand, I'm not able to see increasing memory inside the Task-Manager (a few mb of course but not close to 16gb ram I got).
So can anyone tell me where this leak comes from? What am I missing?
I have long file I need to parse. Because it's very long I need to do it chunk by chunk. I tried this:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize)
{
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + 1);
reader.onload = foo;
reader.readAsText(blob);
})( file, i );
}
}
After running it I see only the first chunk in the console. If I change 'console.log' to jquery append to some div I see only first chunk in that div. What about other chunks? How to make it work?
FileReader API is asynchronous so you should handle it with block calls. A for loop wouldn't do the trick since it wouldn't wait for each read to complete before reading the next chunk.
Here's a working approach.
function parseFile(file, callback) {
var fileSize = file.size;
var chunkSize = 64 * 1024; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var readEventHandler = function(evt) {
if (evt.target.error == null) {
offset += evt.target.result.length;
callback(evt.target.result); // callback for handling read chunk
} else {
console.log("Read error: " + evt.target.error);
return;
}
if (offset >= fileSize) {
console.log("Done reading file");
return;
}
// of to the next chunk
chunkReaderBlock(offset, chunkSize, file);
}
chunkReaderBlock = function(_offset, length, _file) {
var r = new FileReader();
var blob = _file.slice(_offset, length + _offset);
r.onload = readEventHandler;
r.readAsText(blob);
}
// now let's start the read with the first block
chunkReaderBlock(offset, chunkSize, file);
}
You can take advantage of Response (part of fetch) to convert most things to anything else blob, text, json and also get a ReadableStream that can help you read the blob in chunks đź‘Ť
var dest = new WritableStream({
write (str) {
console.log(str)
}
})
var blob = new Blob(['bloby']);
(blob.stream ? blob.stream() : new Response(blob).body)
// Decode the binary-encoded response to string
.pipeThrough(new TextDecoderStream())
.pipeTo(dest)
.then(() => {
console.log('done')
})
Old answer (WritableStreams pipeTo and pipeThrough was not implemented before)
I came up with a interesting idéa that is probably very fast since it will convert the blob to a ReadableByteStreamReader probably much easier too since you don't need to handle stuff like chunk size and offset and then doing it all recursive in a loop
function streamBlob(blob) {
const reader = new Response(blob).body.getReader()
const pump = reader => reader.read()
.then(({ value, done }) => {
if (done) return
// uint8array chunk (use TextDecoder to read as text)
console.log(value)
return pump(reader)
})
return pump(reader)
}
streamBlob(new Blob(['bloby'])).then(() => {
console.log('done')
})
The second argument of slice is actually the end byte. Your code should look something like:
function parseFile(file){
var chunkSize = 2000;
var fileSize = (file.size - 1);
var foo = function(e){
console.log(e.target.result);
};
for(var i =0; i < fileSize; i += chunkSize) {
(function( fil, start ) {
var reader = new FileReader();
var blob = fil.slice(start, chunkSize + start);
reader.onload = foo;
reader.readAsText(blob);
})(file, i);
}
}
Or you can use this BlobReader for easier interface:
BlobReader(blob)
.readText(function (text) {
console.log('The text in the blob is', text);
});
More information:
README.md
Docs
Revamped #alediaferia answer in a class (typescript version here) and returning the result in a promise. Brave coders would even have wrapped it into an async iterator…
class FileStreamer {
constructor(file) {
this.file = file;
this.offset = 0;
this.defaultChunkSize = 64 * 1024; // bytes
this.rewind();
}
rewind() {
this.offset = 0;
}
isEndOfFile() {
return this.offset >= this.getFileSize();
}
readBlockAsText(length = this.defaultChunkSize) {
const fileReader = new FileReader();
const blob = this.file.slice(this.offset, this.offset + length);
return new Promise((resolve, reject) => {
fileReader.onloadend = (event) => {
const target = (event.target);
if (target.error == null) {
const result = target.result;
this.offset += result.length;
this.testEndOfFile();
resolve(result);
}
else {
reject(target.error);
}
};
fileReader.readAsText(blob);
});
}
testEndOfFile() {
if (this.isEndOfFile()) {
console.log('Done reading file');
}
}
getFileSize() {
return this.file.size;
}
}
Example printing a whole file in the console (within an async context)
const fileStreamer = new FileStreamer(aFile);
while (!fileStreamer.isEndOfFile()) {
const data = await fileStreamer.readBlockAsText();
console.log(data);
}
Parsing the large file into small chunk by using the simple method:
//Parse large file in to small chunks
var parseFile = function (file) {
var chunkSize = 1024 * 1024 * 16; //16MB Chunk size
var fileSize = file.size;
var currentChunk = 1;
var totalChunks = Math.ceil((fileSize/chunkSize), chunkSize);
while (currentChunk <= totalChunks) {
var offset = (currentChunk-1) * chunkSize;
var currentFilePart = file.slice(offset, (offset+chunkSize));
console.log('Current chunk number is ', currentChunk);
console.log('Current chunk data', currentFilePart);
currentChunk++;
}
};
I am using HTML 5 FileReader to read more that one file asynchronously.
I would like to keep track of loading of individual files as more that one files can be added at once.
Now i created div with background 0% for each image, but i am not clear about how to pass this division's id or reference in the onprogress event so that i can track progress and update the div content dynamically.
In simple terms let me know about how to ensure that i am updating the correct progress control associated with the file, when multiple files are uploaded simultaneously? I am not getting my JS right.
var up_file = document.getElementById('multiple_file');
if(up_file.files)
{
for(var x=0; x <up_file.files.length; x++)
{
//console.log(up_file.files.length);
var file = up_file.files[x];
var loadingDiv = document.createElement("div");
var container = document.getElementById('loading_container');
loadingDiv.className = "loading";
loadingDiv.id ="loading_" + x;
loadingDiv.innerHTML = '0%';
container.appendChild(loadingDiv);
var reader = new FileReader();
reader.onprogress = function (evt) {
if (evt.lengthComputable) {
console.dir(evt);
// evt.loaded and evt.total are ProgressEvent properties
var loaded = (evt.loaded / evt.total);
if (loaded < 1) {
console.log(loaded);
}
}
}
var img = document.createElement("img");
img.className = "hide";
reader.onload = (function(aimg) {
return function(e) {
LIB.addEvent(aimg, "load", function(){
var scale = 1;
scale = aimg.width / 200;
aimg.width = aimg.width / scale;
aimg.className = "show";
}, false);
aimg.src = e.target.result;
var li = document.createElement("li");
li.appendChild(aimg);
document.getElementById('img_packet').appendChild(li);
};
})(img);
reader.readAsDataURL(file);
}
}
loadingDiv is still visible inside the onprogress function as a closure is formed. The problem is that it's in a loop, so by the time onprogress is called, loadingDiv will probably have been assigned a new value.
To get around this you can use an extra closure to take a copy of the current value of loadingDiv:
reader.onprogress= function(myloadingdiv) {
return function(evt) {
if (evt.lengthComputable)
myloadingdiv.innerHTML= evt.loaded/evt.total*100+'%';
};
}(loadingDiv);
In ECMAScript Fifth Edition, the bind() method will does this for you more cleanly:
reader.onprogress= function(myloadingdiv, evt) {
if (evt.lengthComputable)
myloadingdiv.innerHTML= evt.loaded/evt.total*100+'%';
}.bind(loadingDiv);
For browsers that don't support bind() yet, you can patch in an implementation thus:
if (!('bind' in Function.prototype)) {
Function.prototype.bind= function(owner) {
var that= this;
if (arguments.length<=1) {
return function() {
return that.apply(owner, arguments);
};
} else {
var args= Array.prototype.slice.call(arguments, 1);
return function() {
return that.apply(owner, arguments.length===0? args : args.concat(Array.prototype.slice.call(arguments)));
};
}
};
}