Why does the following javascript snippet produce an empty zip archive? - javascript

The snippet below is using zip.js to create a zip archive, ZippyMcZip.zip or the Strings in contents. contents is an array of values. e.g.
{name:"my name",contents:"contents to write to file."}
The archive is being created, however apart from the manifest it is empty:
$ unzip -l ZippyMcZip.zip
Archive: ZippyMcZip.zip
Length Date Time Name
-------- ---- ---- ----
0 05-01-13 16:41 File1.txt
0 05-01-13 16:41 File2.txt
-------- -------
0 2 files
Does anyone have any pointers as to why the archive would contain empty files?
saveAs is provided by FileSaver.js, I don't think it is an issue as the file is being written to the HD and it being used elsewhere.
function createZip(contents) {
function onProgress(a,b) {
console.log("current",a, "end",b);
}
function onEnd() {
console.log("on End");
}
zip.workerScriptsPath = "/js/zip/";
zip.useWebWorkers = false;
var zipper = (function() {
var zipWriter;
return {
addTexts: function(files) {
function add(text) {
zipWriter.add(text.name,
new zip.TextReader(text.contents),onEnd,onProgress);
}
zip.createWriter(new zip.BlobWriter(), function(writr) {
zipWriter = writr;
});
_.foreach(files, add);
},
getBlob: function(callback) {
zipWriter.close(callback);
}
};
})();
zipper.addTexts(contents);
zipper.getBlob(function(blob) {
saveAs(blob, "ZippyMcZip.zip");
});
}

You have 2 issues related to the asynchronous nature of zip.js API.
First, you try to write multiple files in parallel: it won't work. So, instead of iterating with the synchronous _.foreach function, you have to call the add function recursively in the onEnd callback of zipWriter.add method (cf. [1]).
Then, you also have to wait for this content to be written before calling zipWriter.close method, so you have to define a callback parameter (cf. [2]) in the signature of addTexts method. It is called when the recursive process is finished.
Here is your code with these 2 fixes:
function createZip(contents) {
function onProgress(a, b) {
console.log("current", a, "end", b);
}
zip.workerScriptsPath = "/js/zip/";
zip.useWebWorkers = false;
var zipper = (function() {
var zipWriter;
return {
addTexts : function(files, callback /* [2] new parameter */) {
function add(fileIndex) {
if (fileIndex < files.length) {
zipWriter.add(files[fileIndex].name,
new zip.TextReader(files[fileIndex].contents), function() {
add(fileIndex + 1); /* [1] add the next file */
}, onProgress);
} else {
callback() /* [2] no more files to add: callback is called */;
}
}
zip.createWriter(new zip.BlobWriter(), function(writer) {
zipWriter = writer;
add(0); /* [1] add the first file */
});
},
getBlob : function(callback) {
zipWriter.close(callback);
}
};
})();
zipper.addTexts(contents, function() {
zipper.getBlob(function(blob) {
saveAs(blob, "ZippyMcZip.zip");
});
});
}

Related

ng2-file-upload single upload override file upload

I do the demo base on this tutorial: https://github.com/valor-software/ng2-file-upload. I want to single file upload but without remove file button. By adding the File A, after that I add the File B. File A will be replaced by file B. here is my uploader:
this.uploader = new FileUploader(
{
url: this.baseURL,
allowedFileType: ["xls"],
maxFileSize: 5,
queueLimit: 1
});
Please advice me
As Arun Muthiyarkath suggested, you can use the onAfterAddingFile, but the shorter code would be:
ngOnInit() {
this.uploader.onAfterAddingFile = (fileItem: FileItem) => {
if (this.uploader.queue.length > 1) {
this.uploader.removeFromQueue(this.uploader.queue[0]);
}
};
}
Source: https://github.com/valor-software/ng2-file-upload/issues/703
Probably you can use onAfterAddingFile callback of the library provided function. Below is the sample code. This is always override old file with latest file and queue will always contain one item which is the latest file.
ngOnInit() {
this.uploader.onAfterAddingFile = (fileItem: FileItem) => this.onAfterAddingFile(fileItem)
}
onAfterAddingFile(fileItem: FileItem) {
let latestFile = this.uploader.queue[this.uploader.queue.length-1]
this.uploader.queue = [];
this.uploader.queue.push(latestFile);
}
this.uploader.onAfterAddingFile = (fileItem: FileItem) => {
if (this.uploader.queue.length > 0) {
this.uploader.queue = [fileItem];
}
};

Read a large file N lines at a time in Node.JS

I have a file with 65,000,000 lines, that is about 2gb in size.
I want to read this file in N lines at a time, perform a db insert operation, and then read the next N, with N being, say, 1000 in this case. Insert order doesn't matter, so synchronous is fine.
What's the best way of doing this? I've only found was to either load in 1 line at a time, or methods that read the whole file into memory. Sample code below, that I've been using to read the file one line at a time. :
var singleFileParser = (file, insertIntoDB) => {
var lr = new LineByLineReader(file);
lr.on('error', function(err) {
// 'err' contains error object
console.error(err);
console.error("Error reading file!");
});
lr.on('line', function(line) {
insertIntoDB(line);
// 'line' contains the current line without the trailing newline character.
});
lr.on('end', function() {
// All lines are read, file is closed now.
});
};
Lines can only be parsed one at a time by someone. So, if you want 10 at once, then you just collect them one at a time until you have collected 10 and then process the 10.
I did not think Jarek's code quite worked right so here's a different version that collects 10 lines into an array and then calls dbInsert():
var tenLines = [];
lr.on('line', function(line) {
tenLines.push(line);
if (tenLines.length === 10) {
lr.pause();
dbInsert(<yourSQL>, function(error, returnVal){
if (error) {
// some sort of error handling here
}
tenLines = [];
lr.resume();
});
}
});
// process last set of lines in the tenLines buffer (if any)
lr.on('end', function() {
if (tenLines.length !== 0) {
// process last set of lines
dbInsert(...);
}
});
Jarek's version seems to call dbInsert() on every line event rather than only every 10th line event and did not process any left over lines at the end of the file if they weren't a perfect multiple of 10 lines long.
Something like this should do
var cnt = 0;
var tenLines = [];
lr.on('line', function(line) {
tenLines.push(line);
if (++cnt >= 10) {
lr.pause();
// prepare your SQL statements from tenLines
dbInsert(<yourSQL>, function(error, returnVal){
cnt = 0;
tenLines = [];
lr.resume();
});
}
});
This is my solution inside an async function:
let multipleLines = [];
const filepath = '<file>';
const numberLines = 50;
const lineReader = require('readline').createInterface({
input: require('fs').createReadStream(filepath)
});
// process lines by numberLines
for await (const line of lineReader) {
multipleLines.push(line);
if (multipleLines.length === numberLines) {
await dbInsert();
multipleLines = [];
}
}
// process last set of lines (if any)
if (multipleLines.length !== 0) {
await dbInsert();
}
This is my solution using built modules rather then NPM's line-by-line package
var lineReader = require('readline').createInterface({
input: require('fs').createReadStream(fileToRead)
});
lines = []
lineReader.on('line', function (line) {
lines.push(line.split(","));
if (lines.length == 10) {
makeCall()
lines = []
}
});
lineReader.on('close', function () {
if (lines.length !== 0) {
makeCall()
lines = []
}
});
function makeCall() {
//Do something with lines
}

cordova file check write asynchronously

I use cordovaFile method to test if file exists in several directories. When I find one, I do some actions and break the for loop.
But, cordovaFile.checkFile (like writeFile or others methods) works asynchronously, so the CLI displays me (for console.log) :
1 890383 log ===== file_created =====
2 890389 log object
3 890395 log null
5 890492 log ===== file_created =====
6 890494 log object
7 890496 log null
9 890556 log ===== file_created =====
10 890558 log object
11 890559 log null
13 890626 log ===== file_created =====
14 890627 log object
15 890628 log null
17 890671 log ===== file_created =====
18 890672 log object
19 890674 log null
Is there a way to run cordova.checkFile synchronously ?
My code (originnaly in controllers) :
dir_list = [
cordova.file.applicationDirectory,
cordova.file.externalRootDirectory,
cordova.file.externalDataDirectory,
cordova.file.applicationStorageDirectory,
cordova.file.dataDirectory
];
for (index = 0; index < dir_list.length; ++index) {
current_dir = dir_list[index];
file_created = null;
$cordovaFile.checkFile(current_dir, filename)
.then(function (success) {
file_created = true;
}, function (error) {
file_created = false;
}
console.log('===== file_created =====');
console.log(typeof file_created);
console.log(JSON.stringify(file_created));
if (file_created) {
// Some actions
break;
}
});
Solution inspired by Aaron Franco's answer
// This function will run by backup function
$scope.write_file_list = function(file_list, filename, content) {
if (file_list.length) {
current_dir = file_list[0];
$cordovaFile.writeFile(current_dir, filename, content, true).then(
function(result) {
// Action if file saved
}, function(err) {
file_list.splice(0, 1);
$scope.write_file_list(file_list, filename, content);
});
}
else {
// Action if none file saved
}
};
// This function is called by controller
$scope.backup = function() {
dir_list = [
cordova.file.applicationDirectory,
cordova.file.externalRootDirectory,
cordova.file.externalDataDirectory,
cordova.file.applicationStorageDirectory,
cordova.file.dataDirectory
];
content = 'content file';
filename = 'test.txt';
$scope.write_file_list(dir_list, filename, content);
}
You should execute your code inside a method that can be called after the file is created.
dir_list = [
cordova.file.applicationDirectory,
cordova.file.externalRootDirectory,
cordova.file.externalDataDirectory,
cordova.file.applicationStorageDirectory,
cordova.file.dataDirectory
];
for (index = 0; index < dir_list.length; ++index) {
current_dir = dir_list[index];
$cordovaFile.checkFile(current_dir, filename)
.then(function (success) {
console.log('===== file_created =====');
// do some stuff here that deals with the created file.
}, function (error) {
// do something with error
}
});
By doing it this way, there is no need to flag the file creation. You know the file was created when your success function is called.
Cordova is designed to run on the main thread of your application, so anything you do synchronously will block that thread. This is why Cordova apps are written asynchronously. That and the fact that JavaScript is an Async language to begin with.
The alternative would be to use a promise.
<script src="https://www.promisejs.org/polyfills/promise-7.0.4.min.js"></script>
You should be able to use it in this way to check one file.
dir_list = [
cordova.file.applicationDirectory,
cordova.file.externalRootDirectory,
cordova.file.externalDataDirectory,
cordova.file.applicationStorageDirectory,
cordova.file.dataDirectory
];
function checkIfFileExist(filename) {
return new Promise(function(resolve, reject){
for (index = 0; index < dir_list.length; ++index) {
current_dir = dir_list[index];
$cordovaFile.checkFile(current_dir, filename).then(resolve, reject);
}
});
}
checkIfFileExist(filename).then(function (successData) {
// file exists here
}, function (err) {
// file doesn't exist here
});
That should allow you to check one at a time. For more details on using promises:
http://ramkulkarni.com/blog/using-javascript-promises-with-cordova/

How to cleanup files after multiple nested tests in Mocha, pass or fail

I just started using Mocha. I'm writing a test that requires file cleanup, whether the test succeeds or not. Here's what I'm kind of looking for:
describe('example',function(){
fs.writeFile(file,'foo', function(){
it('should succeed', function(done){
done();
describe('example2', function(){
it('should do something awesome', function(done){
done();
});
})
}
}
});
});
});
afterAllTests(function(){
fs.unlink(file);
})
What I'm looking for is to delete all my created files during the test. Right now, I delete them after my last test completes. But if any test fails, the files don't get deleted. I did look into after(), but it only runs after my first test. The only other option that I can see is doing afterEvery(), and setting a variable like var testFinished = false; at the beginning and setting the value to true once completed. Is this the best way?
Thanks!
I wrote a file system wrapper that tracks the files written and can then delete all of them. Note: I only wrapped the functions I was using. You can easily add more.
// trackedfs.js
var fs = require('fs');
function deleteNoFail(filePath) {
if (filePath && fs.existsSync(filePath)) {
if (fs.lstatSync(filePath).isDirectory()) {
fs.rmdirSync(filePath);
} else {
fs.unlinkSync(filePath);
}
}
};
function makeTrackedFS(options) {
options = options || {};
var f = options.fs || fs;
var files = [];
var folders = [];
var links = [];
function addFile(file) {
files.push(file);
}
function addFolder(folder) {
folders.push(folder);
}
function addLink(link) {
links.push(link);
};
// Expose these in case we want to manually add stuff
// like if we shell out to another program we can add
// its output
f.addFile = addFile;
f.addFolder = addFolder;
f.addLink = addLink;
f.mkdirSync = function(origFn) {
return function() {
addFolder(arguments[0]);
return origFn.apply(f, arguments);
};
}(f.mkdirSync);
f.symlinkSync = function(origFn) {
return function() {
addLink(arguments[1]);
return origFn.apply(f, arguments);
};
}(f.symlinkSync);
f.writeFileSync = function(origFn) {
return function() {
addFile(arguments[0]);
return origFn.apply(f, arguments);
};
}(f.writeFileSync);
function deleteList(list) {
list.reverse(); // because we want files before dirs
list.forEach(function(entry) {
deleteNoFail(entry);
});
};
// call to delete everything
f.cleanup = function(options) {
deleteList(links);
deleteList(files);
deleteList(folders);
links = [];
files = [];
folders = [];
};
};
exports.makeTrackedFS = makeTrackedFS;
I can now wrap fs with
var trackedfs = require('trackedfs');
trackedfs.makeTrackedFS();
and set for cleanup with this
function cleanUpOnExit() {
fs.cleanup();
};
function cleanUpOnExitAndExit() {
cleanUpOnExit();
process.exit();
}
process.on('exit', cleanUpEncodersOnExit);
process.on('SIGINT', cleanUpEncodersOnExitAndExit);
process.on('uncaughtException', cleanUpEncodersOnExitAndExit);
Here's a test
var trackedfs = require('../lib/trackedfs');
var fs = require('fs');
trackedfs.makeTrackedFS();
function cleanUpOnExit() {
fs.cleanup();
};
function cleanUpOnExitAndExit() {
cleanUpOnExit();
process.exit();
}
process.on('exit', cleanUpOnExit);
process.on('SIGINT', cleanUpOnExitAndExit);
process.on('uncaughtException', cleanUpOnExitAndExit);
describe("trackedfs", function() {
it('makes a directory', function() {
fs.mkdirSync('foo');
});
it('writes a file', function() {
fs.writeFileSync("foo/bar", "hello");
});
});
Run it and there will be no foo folder or foo/bar file when done.
The one issue is that it probably can't delete files that are still opened so if you open a file and then crash your test it might not be able to delete it.

Javascript file dropping and reading directories - Asynchronous Recursion

So I'm trying to create a file dropper web application. Right now, a user can drop files on the screen and I can read them, including all the files in a directory that was dropped. But, I don't know when the script is done reading the files.
Some code:
This first function handles a 'drop' event and will loop through each file and send it to another function that will read its contents.
function readDrop( evt )
{
for( var i = 0; i < evt.dataTransfer.files.length; i++)
{
var entry = evt.dataTransfer.items[i].webkitGetAsEntry();
if(entry)
readContents(entry, "");
}
//Do stuff after all files and directories have been read.
}
This function is a recursive FileEntry reader. If it is a file, I will read the FileEntry. If it is a directory, it will loop through the contents and pass it through this function.
function readContents(entry, path)
{
if( entry.isFile )
{
readFileData( entry, path, function(fileData)
{
_MyFiles.push( fileData );
});
}
else if( entry.isDirectory )
{
var directoryReader = entry.createReader();
var path = path + entry.name;
directoryReader.readEntries(function(results)
{
for( var j = 0; j < results.length; j++ )
{
readContents(entry, path);
}
}, errorHandler)
}
}
And here is my function for reading the files. The callback just pushes the fileData object to a global array
function readFileData(entry, path, callback)
{
var fileData = {"name": entry.name, "size": 0, "path": path, "file": entry};
entry.file(function(file)
{
fileData["size"] = file.size;
callback( fileData );
}
}
I'm not sure where to go from here so that I can have a callback when all files and directories have been read.
The FileSystem API doesn't seem well suited for the task of a full recursive traversal, perhaps that's part of the reason why other vendors are not adopting it. Anyway, with an arcane combination of Promises I think I was able to accomplish this goal:
function traverse_directory(entry) {
let reader = entry.createReader();
// Resolved when the entire directory is traversed
return new Promise((resolve_directory) => {
var iteration_attempts = [];
(function read_entries() {
// According to the FileSystem API spec, readEntries() must be called until
// it calls the callback with an empty array. Seriously??
reader.readEntries((entries) => {
if (!entries.length) {
// Done iterating this particular directory
resolve_directory(Promise.all(iteration_attempts));
} else {
// Add a list of promises for each directory entry. If the entry is itself
// a directory, then that promise won't resolve until it is fully traversed.
iteration_attempts.push(Promise.all(entries.map((entry) => {
if (entry.isFile) {
// DO SOMETHING WITH FILES
return entry;
} else {
// DO SOMETHING WITH DIRECTORIES
return traverse_directory(entry);
}
})));
// Try calling readEntries() again for the same dir, according to spec
read_entries();
}
}, errorHandler );
})();
});
}
traverse_directory(my_directory_entry).then(()=> {
// AT THIS POINT THE DIRECTORY SHOULD BE FULLY TRAVERSED.
});
Following on from the answer by drarmstr, I modified the function to be compliant with the Airbnb ESLint standards, and wanted to make some further comments on its usage and results
Here's the new function:
function traverseDirectory(entry) {
const reader = entry.createReader();
// Resolved when the entire directory is traversed
return new Promise((resolve, reject) => {
const iterationAttempts = [];
function readEntries() {
// According to the FileSystem API spec, readEntries() must be called until
// it calls the callback with an empty array. Seriously??
reader.readEntries((entries) => {
if (!entries.length) {
// Done iterating this particular directory
resolve(Promise.all(iterationAttempts));
} else {
// Add a list of promises for each directory entry. If the entry is itself
// a directory, then that promise won't resolve until it is fully traversed.
iterationAttempts.push(Promise.all(entries.map((ientry) => {
if (ientry.isFile) {
// DO SOMETHING WITH FILES
return ientry;
}
// DO SOMETHING WITH DIRECTORIES
return traverseDirectory(ientry);
})));
// Try calling readEntries() again for the same dir, according to spec
readEntries();
}
}, error => reject(error));
}
readEntries();
});
}
here's a drop event handler:
function dropHandler(evt) {
evt.preventDefault();
const data = evt.dataTransfer.items;
for (let i = 0; i < data.length; i += 1) {
const item = data[i];
const entry = item.webkitGetAsEntry();
traverseDirectory(entry).then(result => console.log(result));
}
}
The result variable at the end contains an array, mirroring the tree structure of the folder you dragged and dropped.
For example, here is a git repo for my own site, ran through the above code:
Here's the Git repo for comparison https://github.com/tomjn/tomjn.com

Categories