I have have one page where I want to accept one file and 3-4 user inputs , I was able to achieve this using connect-multiparty middle-ware but the name of uploaded file is something gibberish with correct extension and uploaded files contents are too correct.
I want to achieve below things
Set name of file being uploaded
Create copy of file with different name if the file with same name exists in target directory
Set max limit on size and restrict type of file.
I searched on net but could not find any working example. My complete code is as below
var express = require('express');
var router = express.Router();
var fs = require('fs');
var multiparty = require('connect-multiparty');
var multipartyMiddleware = multiparty({
uploadDir : '../public/uploads'
});
router.post('/api/user/uploads', multipartyMiddleware, function(req, res) {
var file = req.files.file;
console.log(file.name);
console.log(file.type);
console.log(file);
console.log(req.body.test);
console.log("The file was saved!");
res.json({
success : 1
});
return;
});
module.exports = router;
You will have to rename the file after being copied using fs.rename(), or modify the source code of multiparty inside node_modules. Inside their code they have a function that does the renaming:
function uploadPath(baseDir, filename) {
var ext = path.extname(filename).replace(FILE_EXT_RE, '$1');
var name = randoString(18) + ext;
return path.join(baseDir, name);
}
I have done some modifications to their code so I could use it a little bit like multer:
https://gist.github.com/Edudjr/999c80df952458cc583272a5161b4d08
You would use it like so:
var EXT_RE = /(\.[_\-a-zA-Z0-9]{0,16}).*/g;
var options = {
uploadDir : path.join(__dirname,'../public/images'),
filename: function(filename, callback){
var name = filename.replace(EXT_RE, "");
callback(name+'-YEAH.png');
}
}
var form = new multiparty.Form(options);
They strongly advise you to save the files in the temp folder to prevent DoS on your server.
https://github.com/pillarjs/multiparty/issues/64
You can access it easily, I used this to get file name.
console.log(req.files.uploads.path.split('\\')[1]);
I am using uploads from Angular.
Related
As my self-learning with nodejs is still on, I'm of course struggling with some tasks and I'm coming to SO again crying for help.
I intend to create a task that reads the folders inside a specific folder where each folder has an index.html file.
So, when finding the index.html file, it will pass the path to that file to a tag as a link.
Example: mainFolder - subfolder - index.html
<ul><li>./mainFolder/subfolder/index.html</li></ul>
Basically, I started scratching the surface of this task and now I'm stuck.
/////////////create list of links
const path = require('path');
const fs = require('fs');
//joining path of directory
const directoryPath = path.join(__dirname, './dist/mainFolder');
//passing directoryPath and callback function
fs.readdir(directoryPath, function (err, files) {
//handling error
if (err) {
return console.log('Unable to scan directory: ' + err);
}
//listing all files using forEach
files.forEach(function (file) {
var listLinks = [file]
var mainInd = './dist/mainFolder';
mainInd.ul = document.createElement('ul');
var l;
mainInd.document.getElementById('previewList').appendChild(ul);
listLinks.forEach(renderLinkList);
function renderLinkList(element) {
var li = document.createElement('li');
//var a = document.createElement('a');
//a.setAttribute('href', 'https://www.mypage.com');
//li.setAttribute('class','item');
ul.appendChild(li);
//li.appendChild(a);
l = (document.createTextNode(element));
li.innerHTML=li.innerHTML;}console.log(file); });});
Any help will be appreciated.
Kind regards,
Fernando
You can use following code that -
reads all the files & folders inside the main folder
iterates through the files & folders
skips all files
and finally constructs file path to index.html file inside all sub folders
This snippet prints absolute file path for the index.html file.
You can make it relative file paths as well if needed.
const path = require("path");
const fs = require("fs");
const index_files = [];
const parent_directory_path = "";
const directoryPath = path.join(__dirname, parent_directory_path);
fs.readdir(directoryPath, function (error, files) {
if (!error) {
files.forEach(function (file) {
const currDirectoryPath = path.join(__dirname, file);
if (fs.existsSync(currDirectoryPath) && fs.lstatSync(currDirectoryPath).isDirectory()) {
index_files.push(currDirectoryPath + "/index.html");
}
});
console.log(index_files);
}
});
Update
As fs.readdir API provides all the files and folders inside main folder, we need a condition to skip files.
fs.existsSync(currDirectoryPath) && fs.lstatSync(currDirectoryPath).isDirectory() checks if the current file/folder exists and is a folder.
Also this snippet assumes parent directory is current directory. If you want to provide different parent directory path, set the parent directory path value in parent_directory_path variable.
I want to be able to drag and drop an excel file, but for some reason when declaring my workbook var workbook = XLSX.read(data, {type: rABS ? 'binary':'array'}); it says it's not defined.
I think I'm missing something to connect this index.js to server.js which has the var XLSX = require('xlsx'); in it. I've looked and looked online and haven't found the right fix. I would like to avoid using a module to require() inside of HTML.
What I think is the important code:
server.js:
var express = require("express");
var app = express();
var XLSX = require('xlsx');
var fs = require('fs');
var JSON = require('JSON');
var path = require('path');
index.js:
$(document).ready(function(){
var rABS = true; // true: readAsBinaryString ; false: readAsArrayBuffer
$excelHolder.on('drop', function(e){
e.preventDefault();
var files = e.originalEvent.dataTransfer.files;
var file = files[0];
var reader = new FileReader();
console.log("got to before reader");
reader.onload = function (e) {
console.log("got to reader.onload");
var data =e.target.result;
var workbook = XLSX.read(data, {type: rABS ? 'binary':'array'});
var sheet_name_list = workbook.SheetNames;
var excelObj = XLSX.utils.sheet_to_json(workbook.Sheets[sheet_name_list[0]]);
var json = JSON.stringify(excelObj);
var callback = "looks like it worked";
console.log("did it upload?");
fs.writeFile('excelfile.json', json, function(err){
(err) ? console.error(err) : console.log(callback.toString());
});
// preview?
};
if(rABS) reader.readAsBinaryString(file); else reader.readAsArrayBuffer(file);
});
}
index.html:
<div class="huge">22</div>
<div>Uploads!</div>
<input name="uploads[]" type="file" accept=".xls,.xlsx,.ods,.csv" style="display: none;" id="excelInput">
Any help is much appreciated.
I can see a few problems here:
fs and path are modules that are built into NodeJs, hence they are not available in the browser.
You'll need some kind of build tool for your JS if you want to use require for client-side code. Browserify and Webpack are good places to start.
If you don't want to get into that (It's complex so I wouldn't blame you!) you can add the XLSX module to the browser with a <script> tag: https://www.npmjs.com/package/xlsx#installation - it seems like it should work.
There are some examples on the XLSX GitHub page, one of which includes drag & drop and may help you get where you want? https://github.com/SheetJS/js-xlsx (And specifically https://github.com/SheetJS/js-xlsx/tree/master/demos/datagrid)
if you forget to add the library cdn or install it, he will generate this error
add this script or any new:
<script type="text/javascript" src="https://unpkg.com/xlsx#0.15.1/dist/xlsx.full.min.js"></script>
I am writing an Express app that takes in a base64 encoded string that represents an image. Right now, i'm not really sure how I can take that string and upload the image to AWS S3, so i'm reading in the encoded image string data, decoding it, writing a file using fs, and then trying to upload. I have this working for an endpoint that just takes in a raw file, and all of its content is correctly uploaded to AWS s3.
Now when I try to do what I described above, i'm able to upload to S3, but the file has 0kb and is empty, and i'm not sure why. I tested just taking the stringData and writing a file to a test file, and it works. However, when I try uploading to s3, the file shows but it's empty. Here is my code:
router.post('/images/tags/nutritionalInformation/image/base64encoded', function (req, res) {
console.log(req.body.imageString);
var base64Stream = req.body.imageString;
var imgDecodedBuffer = decodeBase64Image(base64Stream);
console.log(imgDecodedBuffer);
// write to image file
var prefix = guid().toString() + ".jpg";
var filePath = './uploads/' + prefix;
console.log(filePath);
fs.writeFile(filePath, imgDecodedBuffer.data, function(err) {
console.log(err);
});
var stream = fs.createReadStream(filePath);
console.log(stream);
return s3fsImpl.writeFile(prefix, stream).then(function () {
fs.unlink(filePath, function (err) {
if (err) {
console.error(err);
}
});
});
})
Here are the relevant import statements:
var fs = require('fs');
var s3fs = require('s3fs');
var multiparty = require('connect-multiparty'),
multipartyMidleware = multiparty();
var s3fsImpl = new s3fs('blahblah', {
accessKeyId: 'ACCESS_KEY_ID',
secretAccessKey: 'SECRET'
});
Any help would be greatly appreciated!
If you simply just pass in the buffer, which I presume is in your imgDecodedBuffer.data value, it should work.
In our meteor app, the client will upload some files using collectionFS-filesystem which i store it to an uploads folders in root directory of my app.
//My CFS uploads collection
uploads = new FS.Collection("uploads", {
stores: [new FS.Store.FileSystem("uploads", {path: "~/uploads"})]
});
Later, i want to save the files to the database using collectionFS-gridFS.
//My CFS grid collection
files = new FS.Collection("files", {
stores: [new FS.Store.GridFS("files")]
});
How do i read the data from the file on server so that i can store the file to db? Can i use the file from the CFS-filesystem collection to convert it to CFS-gridFS file in anyway?
Thanks in advance.
I had accept the answer by #perusopersonale. However, below is my approach which i used to achieve this, based on documentation from here and here
uploads.find(document_id).forEach(function (fileObj) {
var type = fileObj.type();
var name = fileObj.name();
var readStream = fileObj.createReadStream(fileObj.collectionName);
var newFile = new FS.File();
newFile.attachData(readStream, {type: type});
newFile.name(name);
files.insert(newFile);
});
I don't understand why you want to use both. However I have to implement something similar (read from a cfs filesystem, do something and then reinsert in another db), here a version modified that should accomplish what you are triyng to do:
var fileObj = uploads.findOne(objId);
var newName = "newfilename"; //file name
//fileObj.copies.uploads.key contains the filename for store :"uploads"
fs.readFile( "~/uploads/"+fileObj.copies.uploads.key, function (err, data) {
var newFile = new FS.File();
newFile.attachData(data,{type: 'application/octet-stream'}, function(error){
newFile.name( newName);
file.insert(newFile);
});
});
I have the following JS function which serves as a first prototype for a mozilla thunderbird extension.
The goal is to connect to a server and download a sample file, then unzipping it and storing the contents in the thunderbird profile folder.
Now this all works fine, except that the execution of the function stops after creating the zip file on the file system. So i have to restart the function again, in order to get the second part of the function executed which extracts the user.js file from the zip file.
Any ideas what the problem could be?
function downloadFile(httpLoc) {
// get profile directory
var file = Components.classes["#mozilla.org/file/directory_service;1"].
getService(Components.interfaces.nsIProperties).
get("ProfD", Components.interfaces.nsIFile);
var profilePath = file.path;
// change profile directory to native style
profilePath = profilePath.replace(/\\/gi , "\\\\");
profilePath = profilePath.toLowerCase();
// download the zip file
try {
//new obj_URI object
var obj_URI = Components.classes["#mozilla.org/network/io-service;1"].getService(Components.interfaces.nsIIOService).newURI(httpLoc, null, null);
//new file object
var obj_TargetFile = Components.classes["#mozilla.org/file/local;1"].createInstance(Components.interfaces.nsILocalFile);
//set to download the zip file into the profil direct
obj_TargetFile.initWithPath(profilePath + "\/" + "test.zip");
//if file the zip file doesn't exist, create it
if(!obj_TargetFile.exists()) {
alert("zip file wird erstellt");
obj_TargetFile.create(0x00,0644);
}
//new persitence object
var obj_Persist = Components.classes["#mozilla.org/embedding/browser/nsWebBrowserPersist;1"].createInstance(Components.interfaces.nsIWebBrowserPersist);
// with persist flags if desired ??
const nsIWBP = Components.interfaces.nsIWebBrowserPersist;
const flags = nsIWBP.PERSIST_FLAGS_REPLACE_EXISTING_FILES;
obj_Persist.persistFlags = flags | nsIWBP.PERSIST_FLAGS_FROM_CACHE;
//save file to target
obj_Persist.saveURI(obj_URI,null,null,null,null,obj_TargetFile);
} catch (e) {
alert(e);
} finally {
// unzip the user.js file to the profile direc
// creat a zipReader, open the zip file
var zipReader = Components.classes["#mozilla.org/libjar/zip-reader;1"]
.createInstance(Components.interfaces.nsIZipReader);
zipReader.open(obj_TargetFile);
//new file object, thats where the user.js will be extracted
var obj_UnzipTarget = Components.classes["#mozilla.org/file/local;1"].createInstance(Components.interfaces.nsILocalFile);
//set path for the user.js
obj_UnzipTarget.initWithPath(profilePath + "\/" + "user.js");
// if user.js doesn't exist, create it
if(!obj_UnzipTarget.exists()) {
alert("user.js wird erstellt");
obj_UnzipTarget.create(0x00,0644);
}
// extract the user.js out of the zip file, to the specified path
zipReader.extract("user.js", obj_UnzipTarget);
zipReader.close();
}
}
var hello = {
click: function() {
downloadFile("http://pse2.iam.unibe.ch/profiles/profile.zip");
},
};
saveURI is asynchronous, so you need to set the progress listener on the persist object to know when it has finished. Here's an example of setting a progress listener and later on there's a check to see whether the transfer has finished.