I'm currently reading csv headers using following js code -
var r = new FileReader();
r.onload = function(e) {
var contents = e.target.result;
contents = contents.split(/\r\n|\n/);
var i = 0,
flag = false,
headers;
while (i < contents.length && flag == false) {
if (contents[i] != '') {
headers = contents[i]
flag = true;
}
i++;
}
vm.fileHeaders = headers.split(",").map(function(item) { return item.trim() && item.replace(/['"]+/g, ''); });
return true;
};
r.readAsText(item);
This code reads the whole file first & then returns headers. But it takes more time for large sized files. I want to modify this code that will only read the header & not the whole data, So that it'll take less time for large files too.
Found PapaParse as a quick solution!
Papa.parse(item, {
worker: true,
skipEmptyLines: true,
step: function(results, parser) {
parser.abort();
vm.fileHeaders = results.data[0] || undefined;
results = null;
return vm.fileHeaders;
},
complete: function(results){
results = null;
}
});
Related
I stored some jpeg files (exactly 350, same files same size. Total: 336.14 MB) as Blob in IndexedDB. It took around 1 second to complete the transaction. Then I read all the data from IndexedDB to an array and again sored to IndexedDB. But this time it takes around 15 Seconds. I observed this as a consistent behavior. Anything wrong here? I used performance.now() to get the time difference
Files: 350,
Size of each: 937 KB,
Browser: Chrome and Chromium Edge
//Open
var dbOpen = indexedDB.open(INDEXED_DB_NAME, INDEXED_DB_VERSION);
dbOpen.onupgradeneeded = function (e) {
console.log("onupgradeneeded");
var store = e.currentTarget.result.createObjectStore(
IMAGE_DATA_STORE, { autoIncrement: true });
};
dbOpen.onsuccess = function (e) {
image_data_db = dbOpen.result;
console.log("indexed DB opened");
};
//Initial Write
var inputFiles = document.getElementById('inputFiles');
for (var i = 0; i < inputFiles.files.length; i++) {
let file = inputFiles.files[i];
var b = new Blob([file], { type: file.type });
fileblobs.push(b);
}
StoreIdb(fileblobs); // < First write
//StoreIdb()
t0 = performace.now();
var trx = image_data_db.transaction(IMAGE_DATA_STORE, 'readwrite');
var imagestore = trx.objectStore(IMAGE_DATA_STORE);
for (i = 0; i < fileblobs.length; i++) {
request = imagestore.add(fileblobs[i]);
request.onsuccess = function (e) {
console.log('added');
};
request.onerror = function (e) {
console.error("Request Error", this.error);
};
}
trx.onabort = function (e) {
console.error("Exception:", this.error, this.error.name);
};
trx.oncomplete = function (e) {
console.log('completed');
t1 = performance.now();
timetaken = t1 - t0;
}
//Read
var objectStore = image_data_db.transaction(IMAGE_DATA_STORE).objectStore(IMAGE_DATA_STORE);
objectStore.openCursor().onsuccess = function (e) {
var cursor = e.target.result;
if (cursor) {
blobArray.push(cursor.value.blob);
cursor.continue();
}
else
{
// completed
}
}
// blobArray will be used for second time << Second Write
I figured it out. First time it was storing file instance blob.
I ve changed file instance blob to Array buffer just to want to ensure data type similar in both cases. Now it is taking same time.
for (var i = 0; i < inputFiles.files.length; i++) {
let file = inputFiles.files[i];
file.arrayBuffer().then((arrayBuffer) => {
let blob = new Blob([new Uint8Array(arrayBuffer)], {type: file.type });
blobs.push(blob);
if ( blobs.length == inputFiles.files.length){
callback(blobs);
}
});
}
I am working on a function that will write data to a remote server in chunks using a 3rd party API. Through some help on Stack Overflow I was able to accomplish this, where it is now working as expected. The problem is that I can only get a single 16kb chunk to write as I will need to advance the pos of where the next bytes are written to.
The initial write starts at 0 easily enough. Due to my unfamiliarity with this though, I am unsure if the next pos should just be 16 or what. If it helps, the API call writeFileChunk() takes 3 parameters, filepath (str), pos (int64), and data (base64 encoded string).
reader.onload = function(evt)
{
// Get SERVER_ID from URL
var server_id = getUrlParameter('id');
$("#upload_status").text('Uploading File...');
$("#upload_progress").progressbar('value', 0);
var chunkSize = 16<<10;
var buffer = evt.target.result;
var fileSize = buffer.byteLength;
var segments = Math.ceil(fileSize / chunkSize); // How many segments do we need to divide into for upload
var count = 0;
// start the file upload
(function upload()
{
var segSize = Math.min(chunkSize, fileSize - count * chunkSize);
if (segSize > 0)
{
$("#upload_progress").progressbar('value', (count / segments));
var chunk = new Uint8Array(buffer, count++ * chunkSize, segSize); // get a chunk
var chunkEncoded = btoa(String.fromCharCode.apply(null, chunk));
// Send Chunk data to server
$.ajax({
type: "POST",
url: "filemanagerHandler.php",
data: { 'action': 'writeFileChunk', 'server_id': server_id, 'filepath': filepath, 'pos': 0, 'chunk': chunkEncoded },
dataType: 'json',
success: function(data)
{
console.log(data);
setTimeout(upload, 100);
},
error: function(XMLHttpRequest, textStatus, errorThrown)
{
alert("Status: " + textStatus); alert("Error: " + errorThrown); alert("Message: " + XMLHttpRequest.responseText);
}
});
}
else
{
$("#upload_status").text('Finished!');
$("#upload_progress").progressbar('value', 100);
getDirectoryListing(curDirectory);
}
})()
};
The current position for the file on client side would be represented by this line, or more specifically the second argument at the pre-incremental step:
var chunk = new Uint8Array(buffer, count++ * chunkSize, segSize);
though, in this case it advances (count++) before you can reuse it so if you need the actual position (below as pos) you can extract it by simply rewriting the line into:
var pos = count++ * chunkSize; // here chunkSize = 16kb
var chunk = new Uint8Array(buffer, pos, segSize);
Here each position update will increment 16kb as that is the chunk-size. For progress then it is calculated pos / fileSize * 100. This of course assuming using the unencoded buffer size.
The only special case is the last chunk, but when there are no more chunks left to read the position should be equal to the file length (fileSize) so it should be pretty straight-forward.
When the ajax call return the server should have the same position unless something went wrong (connection, write access change, disk full etc.).
You can use Filereader API to read the chunks and send it to your remote server.
HTML
<input type="file" id="files" name="file" /> Read bytes:
<span class="readBytesButtons">
<button>Read entire file in chuncks</button>
</span>
Javascript
// Post data to your server.
function postChunk(obj) {
var url = "https://your.remote.server";
return new Promise(function(resolve, reject) {
var xhr = new XMLHttpRequest();
xhr.open('post', url, true);
xhr.responseType = 'json';
xhr.onload = function() {
var status = xhr.status;
if (status == 200) {
resolve(xhr.response);
} else {
reject(status);
}
};
var params = "";
// check that obj has the proper keys and create the url parameters
if (obj.hasOwnProperty(action) && obj.hasOwnProperty(server_id) && obj.hasOwnProperty(filepath) && obj.hasOwnProperty(pos) && obj.hasOwnProperty(chunk)) {
params += "action="+obj[action]+"&server_id="+obj[server_id]+"&filepath="+obj[filepath]+"&pos="+obj[pos]+"&chunk="+obj[chunk];
}
if(params.length>0) {
xhr.send(params);
} else {
alert('Error');
}
});
}
// add chunk to "obj" object and post it to server
function addChunk(reader,obj,divID) {
reader.onloadend = function(evt) {
if (evt.target.readyState == FileReader.DONE) { // DONE == 2
obj.chunk = evt.target.result;
console.log(obj);
document.getElementById(divID).textContent +=
['Sending bytes: ', obj.pos*16000, ' - ', ((obj.pos*16000)+(obj.pos+1)*obj.chunk.length),
'\n'].join('');
// post data to server
postChunk(obj).then(function(data) {
if(data!=="" && data!==null && typeof data!=="undefined") {
// chunk was sent successfully
document.getElementById(divID).textContent +=
['Sent bytes: ', obj.pos*16000, ' - ', ((obj.pos*16000)+(obj.pos+1)*obj.chunk.length),'\n'].join('');
} else {
alert('Error! Empty response');
}
}, function(status) {
alert('Resolve Error');
});
}
};
}
// read and send Chunk
function readChunk() {
var files = document.getElementById('files').files;
if (!files.length) {
alert('Please select a file!');
return;
}
var file = files[0];
var size = parseInt(file.size);
var chunkSize = 16000;
var chunks = Math.ceil(size/chunkSize);
var start,stop = 0;
var blob = [];
for(i=0;i<chunks;i++) {
start = i*chunkSize;
stop = (i+1)*chunkSize-1;
if(i==(chunks-1)) {
stop = size;
}
var reader = new FileReader();
blob = file.slice(start, stop);
reader.readAsBinaryString(blob);
var obj = {action: 'writeFileChunk', server_id: 'sid', filepath: 'path', pos: i, chunk: ""};
var div = document.createElement('div');
div.id = "bytes"+i;
document.body.appendChild(div);
addChunk(reader,obj,div.id);
}
}
// Check for the various File API support.
if (window.File && window.FileReader && window.FileList && window.Blob) {
console.log(' Great success! All the File APIs are supported.');
} else {
alert('The File APIs are not fully supported in this browser.');
}
document.querySelector('.readBytesButtons').addEventListener('click', function(evt) {
if (evt.target.tagName.toLowerCase() == 'button') {
readChunk();
}
}, false);
You can check this example in this Fiddle
I am trying to load some data from my JSON file using AJAX. The file is called external-file.json. Here is the code, it includes other parts that haven't got to do with the data loading.The part I'm not sure of begins in the getViaAjax funtion. I can't seem to find my error.
function flip(){
if(vlib_front.style.transform){
el.children[1].style.transform = "";
el.children[0].style.transform = "";
} else {
el.children[1].style.transform = "perspective(600px) rotateY(-180deg)";
el.children[0].style.transform = "perspective(600px) rotateY(0deg)";
}
}
var vlib_front = document.getElementById('front');
var el = document.getElementById('flip3D');
el.addEventListener('click', flip);
var word = null; var explanation = null;
var i=0;
function updateDiv(id, content) {
document.getElementById(id).innerHTML = content;
document.getElementById(id).innerHTML = content;
}
updateDiv('the-h',word[i]);
updateDiv('the-p',explanation[i])
function counter (index, step){
if (word[index+step] !== undefined) {
index+=step;
i=index;
updateDiv('the-h',word[index]);
updateDiv('the-p',explanation[index]);
}
}
var decos = document.getElementById('deco');
decos.addEventListener('click', function() {
counter(i,-1);
}, false);
var incos = document.getElementById('inco');
incos.addEventListener('click', function() {
counter(i,+1);
}, false);
function getViaAjax("external-file.json", callback) { // url being the url to external File holding the json
var r = new XMLHttpRequest();
r.open("GET", "external-file.json", true);
r.onload = function() {
if(this.status < 400 && this.status > 199) {
if(typeof callback === "function")
callback(JSON.parse(this.response));
} else {
console.log("err");// server reached but gave shitty status code}
};
}
r.onerror = function(err) {console.log("error Ajax.get "+url);console.log(err);}
r.send();
}
function yourLoadingFunction(jsonData) {
word = jsonData.words;
explanation = jsonData.explanation;
updateDiv('the-h',word[i]);
updateDiv('the-p',explanation[i])
// then call whatever it is to trigger the update within the page
}
getViaAjax("external-file.json", yourLoadingFunction)
As #light said, this:
function getViaAjax("external-file.json", callback) { // url being the url to external File holding the json
var r = new XMLHttpRequest();
r.open("GET", "external-file.json", true);
Should be:
function getViaAjax(url, callback) { // url being the url to external File holding the json
var r = new XMLHttpRequest();
r.open("GET", url, true);
I built up a quick sample that I can share that might help you isolate your issue. Stand this up in a local http-server of your choice and you should see JSON.parse(xhr.response) return a javascript array containing two objects.
There are two files
data.json
index.html
data.json
[{
"id":1,
"value":"foo"
},
{
"id":2,
"value":"bar"
}]
index.html
<html>
<head>
</head>
<body onload="so.getJsonStuffs()">
<h1>so.json-with-ajax</h1>
<script type="application/javascript">
var so = (function(){
function loadData(data){
var list = document.createElement("ul");
list.id = "data-list";
data.forEach(function(element){
var item = document.createElement("li");
var content = document.createTextNode(JSON.stringify(element));
item.appendChild(content);
list.appendChild(item);
});
document.body.appendChild(list);
}
var load = function()
{
console.log("Initializing xhr");
var xhr = new XMLHttpRequest();
xhr.onload = function(e){
console.log("response has returned");
if(xhr.status > 200
&& xhr.status < 400) {
var payload = JSON.parse(xhr.response);
console.log(payload);
loadData(payload);
}
}
var uri = "data.json";
console.log("opening resource request");
xhr.open("GET", uri, true);
xhr.send();
}
return {
getJsonStuffs : load
}
})();
</script>
</body>
</html>
Running will log two Javascript objects to the Dev Tools console as well as add a ul to the DOM containing a list item for every object inside the data.json array
I am experiencing high memory consumption on my Node.js app, when loading ~100MB zip files one after the other it is keeping them in memory as a "NodeBufferReader". The library I am using is called JSZip and is found here: https://stuk.github.io/jszip/
If I access the same zip file twice then it doesn't increase memory usage but for every 'extra' .zip file I access the memory increases by approx the size of the .zip file. The files I am accessing are all around 100MB or larger so as you can imagine this has the potential to get rather large, rather quickly.
The Node.js application is a websocket server that reads files from within .zip files and returns them back to the requestor as base64 data. The function in question is here:
function handleFileRequest(args, connection_id) {
var zipIndex = 0,
pathLen = 0,
zip_file = "",
zip_subdir = "";
try {
if (args.custom.file.indexOf(".zip") > -1) {
// We have a .zip directory!
zipIndex = args.custom.file.indexOf(".zip") + 4;
pathLen = args.custom.file.length;
zip_file = args.custom.file.substring(0, zipIndex);
zip_subdir = args.custom.file.substring(zipIndex + 1, pathLen);
fs.readFile(zip_file, function (err, data) {
if (!err) {
zipObj.load(data);
if (zipObj.file(zip_subdir)) {
var binary = zipObj.file(zip_subdir).asBinary();
var base64data = btoa(binary);
var extension = args.custom.file.split('.').pop();
var b64Header = "data:" + MIME[extension] + ";base64,";
var tag2 = args.custom.tag2 || "unset";
var tag3 = args.custom.tag3 || "unset";
var rargs = {
action: "getFile",
tag: args.tag,
dialogName: connections[connection_id].dialogName,
custom: {
file: b64Header + base64data,
tag2: tag2,
tag3: tag3
}
};
connections[connection_id].sendUTF(JSON.stringify(rargs));
rargs = null;
binary = null;
base64data = null;
} else {
serverLog(connection_id, "Requested file doesn't exist");
}
} else {
serverLog(connection_id, "There was an error retrieving the zip file data");
}
});
} else {
// File isn't a .zip
}
} catch (e) {
serverLog(connection_id, e);
}
}
Any help would be much appreciated in getting rid of this problem - Thanks!
Working Code Example
function handleFileRequest(args, connection_id) {
var zipIndex = 0,
pathLen = 0,
f = "",
d = "";
try {
if (args.custom.file.indexOf(".zip") > -1) {
// We have a .zip directory!
zipIndex = args.custom.file.indexOf(".zip") + 4;
pathLen = args.custom.file.length;
f = args.custom.file.substring(0, zipIndex);
d = args.custom.file.substring(zipIndex + 1, pathLen);
fs.readFile(f, function (err, data) {
var rargs = null,
binary = null,
base64data = null,
zipObj = null;
if (!err) {
zipObj = new JSZip();
zipObj.load(data);
if (zipObj.file(d)) {
binary = zipObj.file(d).asBinary();
base64data = btoa(binary);
var extension = args.custom.file.split('.').pop();
var b64Header = "data:" + MIME[extension] + ";base64,";
var tag2 = args.custom.tag2 || "unset";
var tag3 = args.custom.tag3 || "unset";
rargs = {
action: "getFile",
tag: args.tag,
dialogName: connections[connection_id].dialogName,
custom: {
file: b64Header + base64data,
tag2: tag2,
tag3: tag3
}
};
connections[connection_id].sendUTF(JSON.stringify(rargs));
} else {
serverLog(connection_id, "Requested file doesn't exist");
}
} else {
serverLog(connection_id, "There was an error retrieving the zip file data");
}
rargs = null;
binary = null;
base64data = null;
zipObj = null;
});
} else {
// Non-Zip file
}
} catch (e) {
serverLog(connection_id, e);
}
}
If you use the same JSZip instance to load each and every file, you will keep everything in memory : the load method doesn't replace the existing content.
Try using a new JSZip instance each time :
var zipObj = new JSZip();
zipObj.load(data);
// or var zipObj = new JSZip(data);
I'm creating a website for mobile phones that resizes photos and uploads them.
$('#ImgPreview canvas').each(function(pIndex) {
vFormData.append(pIndex, canvasToJpegBlob($(this)[0]), vIssueId +'-attachment0'+ pIndex +'.jpg');
});
$.ajax({
url: '/api/ob/issuefileupload',
data: vFormData,
processData: false,
contentType: false,
type: 'POST'
}).done(function(pData) {
window.location = '/issue?id='+ vIssueId;
}).fail(function(pJqXHR) {
alert(My.Strings.UploadFailed);
});
This works in Chrome for Android and in Safari on iOS, but in the native Android browser, the files have a content-length of 0 and name Blob + a UID. When the file is added to the formdata the size also seems rather large (900k opposed to 50k in Chrome).
The canvasToJpegBlob function:
function canvasToJpegBlob(pCanvas) {
var vMimeType = "image/jpeg",
vDataURI,
vByteString,
vBlob,
vArrayBuffer,
vUint8Array, i,
vBlobBuilder;
vDataURI = pCanvas.toDataURL(vMimeType, 0.8);
vByteString = atob(vDataURI.split(',')[1]);
vArrayBuffer = new ArrayBuffer(vByteString.length);
vUint8Array = new Uint8Array(vArrayBuffer);
for (i = 0; i < vByteString.length; i++) {
vUint8Array[i] = vByteString.charCodeAt(i);
}
try {
vBlob = new Blob([vUint8Array.buffer], {type : vMimeType});
} catch(e) {
window.BlobBuilder = window.BlobBuilder ||
window.WebKitBlobBuilder ||
window.MozBlobBuilder ||
window.MSBlobBuilder;
if (e.name === 'TypeError' && window.BlobBuilder) {
vBlobBuilder = new BlobBuilder();
vBlobBuilder.append(vUint8Array.buffer);
vBlob = vBlobBuilder.getBlob(vMimeType);
} else if (e.name === 'InvalidStateError') {
vBlob = new Blob([vUint8Array.buffer], {type : vMimeType});
} else {
alert(My.Strings.UnsupportedFile);
}
}
return vBlob;
}
Is there any way to get this working in the native Android browser?
I also ran into this problem and needed to come up with a more generic solution as in some cases I won't have control over the server-side code.
Eventually I reached a solution that is almost completely transparent. The approach was to polyfill the broken FormData with a blob that appends data in the necessary format for multipart/form-data. It overrides XHR's send() with a version that reads the blob into a buffer that gets sent in the request.
Here's the main code:
var
// Android native browser uploads blobs as 0 bytes, so we need a test for that
needsFormDataShim = (function () {
var bCheck = ~navigator.userAgent.indexOf('Android')
&& ~navigator.vendor.indexOf('Google')
&& !~navigator.userAgent.indexOf('Chrome');
return bCheck && navigator.userAgent.match(/AppleWebKit\/(\d+)/).pop() <= 534;
})(),
// Test for constructing of blobs using new Blob()
blobConstruct = !!(function () {
try { return new Blob(); } catch (e) {}
})(),
// Fallback to BlobBuilder (deprecated)
XBlob = blobConstruct ? window.Blob : function (parts, opts) {
var bb = new (window.BlobBuilder || window.WebKitBlobBuilder || window.MSBlobBuilder);
parts.forEach(function (p) {
bb.append(p);
});
return bb.getBlob(opts ? opts.type : undefined);
};
function FormDataShim () {
var
// Store a reference to this
o = this,
// Data to be sent
parts = [],
// Boundary parameter for separating the multipart values
boundary = Array(21).join('-') + (+new Date() * (1e16*Math.random())).toString(36),
// Store the current XHR send method so we can safely override it
oldSend = XMLHttpRequest.prototype.send;
this.append = function (name, value, filename) {
parts.push('--' + boundary + '\nContent-Disposition: form-data; name="' + name + '"');
if (value instanceof Blob) {
parts.push('; filename="'+ (filename || 'blob') +'"\nContent-Type: ' + value.type + '\n\n');
parts.push(value);
}
else {
parts.push('\n\n' + value);
}
parts.push('\n');
};
// Override XHR send()
XMLHttpRequest.prototype.send = function (val) {
var fr,
data,
oXHR = this;
if (val === o) {
// Append the final boundary string
parts.push('--' + boundary + '--');
// Create the blob
data = new XBlob(parts);
// Set up and read the blob into an array to be sent
fr = new FileReader();
fr.onload = function () { oldSend.call(oXHR, fr.result); };
fr.onerror = function (err) { throw err; };
fr.readAsArrayBuffer(data);
// Set the multipart content type and boudary
this.setRequestHeader('Content-Type', 'multipart/form-data; boundary=' + boundary);
XMLHttpRequest.prototype.send = oldSend;
}
else {
oldSend.call(this, val);
}
};
}
And just use it like so, calling fd.append(name, value) as normal afterwards:
var fd = needsFormDataShim ? new FormDataShim() : new FormData();
How about trying to draw it on canvas, using matrix to scale it to the size you wish and then sending it to server using canvas.toDataURL. Check out this question.
i use this to fix the problem:
// not use blob, simply use key value
var form = new FormData();
// get you content type and raw data from data url
form.append( 'content_type', type);
form.append( 'content', raw);