renameSync not working when referencing variables - javascript

I have created a script to migrate a file from gridfs to the new local C: drive and then rename the file since I am unable to use variables with writeFileSync to dynamically name the file with its extension included.
The issue I am running into is that when I am using any variables in the location path variable it will return an error no such file or directory. When I explicitly write out the string and use no variables, it will find it with no issue and rename the file correctly. Same path, just two different methods for how the path is created. Might be an issue caused by javascript's pass by reference and pass by value? No idea at this point.
I am debugging by console logging all paths as I go to confirm that the final destination string is correct and have already verified that it is exactly the same when using referenced variables as it is when written specifically as a single string.
fs.writeFileSync(__dirname + `/../public/uploads/${tempFile.metadata.parent}/${tempId}/tempfile`, data);
unprocessedPath1 = (__dirname + `/../public/uploads/${tempFile.metadata.parent}/${tempId}/tempfile`); //DOES NOT WORK
unprocessedPath2 = (__dirname + `/../public/uploads/${tempFile.metadata.parent}/${tempId}/${tempFile.filename}`); //DOES NOT WORK
//RETURNS: C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\tempfile
//RETURNS: C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\tempfile1
// unprocessedPath1 = `C:/Sites/CRM/public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/tempfile`; // WORKS
// unprocessedPath2 = `C:/Sites/CRM/public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/tempfile1`; // WORKS
//RETURNS: C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\tempfile
//RETURNS: C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\tempfile1
var correctPath1 = path.normalize(unprocessedPath1);
var correctPath2 = path.normalize(unprocessedPath2);
fs.renameSync(correctPath1, correctPath2, function(err) {
if ( err ) console.log('RENAME ERROR: ' + err);
});
RENAME ERROR: Error: ENOENT: no such file or directory, rename 'C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\tempfile' -> 'C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\2019-08-16T10:50.wav'
EDIT: Confirmed that file exists at the destination before rename attempt. Added a console.log(fs.existsSync(correctPath1)) line before rename.
true
RENAME ERROR: Error: ENOENT: no such file or directory, rename 'C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\tempfile' -> 'C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\2019-08-16T10:50.wav'
EDIT: Here is the entire request.
//DOWNLOAD GRIDFS DATABASE'S OLD FILES AND SAVE TO LOCAL DRIVE
router.get('/export/gridfs', middleware.isLoggedIn, (req, res) => {
gfs = Grid(conn.db, mongoose.mongo);
gfs.collection('uploads').find().toArray((err, files) => {
console.log("OPENING FILES OBJECT: ");
console.log(util.inspect(files, false, null, true /* enable colors */));
files.forEach(file => {
console.log("FOR EACH FILE");
tempFile = file;
console.log("CREATING FILE PRE SAVE");
File.create(tempFile, function(err, file){
console.log("FILE PRE CREATED");
if(err){
console.log("ERROR OCCURED: " + err);
console.log("SKIPPING FILE");
} else {
console.log("NO ERROR");
console.log(util.inspect(file, false, null, true /* enable colors */))
console.log(util.inspect(tempFile, false, null, true /* enable colors */))
console.log("MAKING NEW FOLDER IN LEAD ID USING FILE ID");
console.log('public/uploads/' + tempFile.metadata.parent +"/"+ file._id);
mkdirp('public/uploads/' + tempFile.metadata.parent +"/"+ file._id, function() {});
console.log("FOLDER CREATED");
console.log("File Before");
console.log(file);
console.log("OPENING file OBJECT: ");
console.log(util.inspect(file, {showHidden: false, depth: null}))
console.log("OPENING tempfile OBJECT: ");
console.log(util.inspect(tempFile, false, null, true /* enable colors */))
file.filename = tempFile.filename;
file.contentType = tempFile.mimetype;
file.fileLocation = `/public/uploads/${tempFile.metadata.parent}/${file._id}/${tempFile.filename}`;
file.metadata = { parent: tempFile.metadata.parent };
file.createdAt = tempFile.uploadDate;
console.log("filename: " + file.filename);
console.log("contentType" + file.contentType);
console.log("fileLocation" + file.fileLocation);
console.log("metadata: " + file.metadata);
console.log("File After");
console.log("FILE: " + file);
console.log("tempFile: " + tempFile);
console.log(tempFile.newFileName);
console.log(tempFile.originalname);
console.log(tempFile.mimetype);
console.log(tempFile.contentType);
console.log("File After");
console.log(file);
//save note
file.save();
console.log("File Saved");
const tempId = file._id;
console.log("tempId: " + tempId);
console.log("OPENING tempId OBJECT: ");
console.log(util.inspect(tempId, false, null, true /* enable colors */));
console.log("PROCESSING FILE");
console.log(util.inspect(file, false, null, true /* enable colors */));
console.log("Starting gridfs stream");
gfs.files.find({ _id: new ObjectId(file._id) }, (err, file) => {
// Check if file
console.log("CHECKING FOR FILE ENTRY");
if (!file || file.length === 0) {
return res.status(404).json({
err: 'No file exists'
});
}
console.log("FILE ENTRY FOUND");
let data = [];
let readstream = gfs.createReadStream({
filename: tempFile.filename
});
console.log("Creating read stream");
readstream.on('data', function(chunk) {
console.log("PUSHING CHUNK");
console.log(chunk);
data.push(chunk);
console.log("PUSHED CHUNK");
});
readstream.on('end', function() {
console.log("ENDING STREAM");
data = Buffer.concat(data);
console.log("WRITING TO LOCAL DRIVE");
var fileExt = path.extname(tempFile.filename);
console.log(fileExt)
fs.writeFileSync(__dirname + `/../public/uploads/${tempFile.metadata.parent}/${tempId}/tempfile`, data);
console.log("RETURNING FILE TO CLIENT");
console.log("RENAMING FILE AT LOCATION WITH EXTENSION");
var unprocessedPath1 = (__dirname + `/../public/uploads/${tempFile.metadata.parent}/${tempId}/tempfile`);
var unprocessedPath1String = `C:/Sites/CRM/public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/tempfile`; //works
console.log("Path1: " + unprocessedPath1);
console.log("Path1String: " + unprocessedPath1String);
var unprocessedPath2 = (__dirname + `/../public/uploads/${tempFile.metadata.parent}/${tempId}/${tempFile.filename}`);
// unprocessedPath2 = `C:/Sites/CRM/public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/tempfile1`; //works
console.log("Path2: " + unprocessedPath2);
var correctPath1 = path.normalize(unprocessedPath1);
console.log("NORMALIZED Path1: "+ correctPath1);
var correctPath2 = path.normalize(unprocessedPath2);
console.log("NORMALIZED Path2: " + correctPath2);
// correctPath1 = String(correctPath1);
// correctPath2 = String(correctPath2);
console.log(fs.existsSync(correctPath1))
console.log(fs.existsSync(unprocessedPath1String))
fs.rename(correctPath1, correctPath2, function(err) {
if ( err ) console.log('RENAME ERROR: ' + err);
console.log("RENAME COMPLETE");
});
});
readstream.on('error', function(err) {
console.log('An error occured!', err);
throw err;
});
res.send("EXPORTED");
});
}
});
});
});
});
EDIT: And here is the log.
OPENING FILES OBJECT:
[ { _id: 5d56ece48f6b3b09f0068f60,
length: 221228,
chunkSize: 261120,
uploadDate: 2019-08-16T17:50:30.212Z,
filename: '2019-08-16T10:50.wav',
md5: '47fbec41801f73efc53d7e8f73b4e596',
contentType: 'audio/wav',
metadata: { parent: '5d56ebd88f6b3b09f0068f5c' } } ]
FOR EACH FILE
CREATING FILE PRE SAVE
FILE PRE CREATED
NO ERROR
{ _id: 5d56ece48f6b3b09f0068f60,
filename: '2019-08-16T10:50.wav',
contentType: 'audio/wav',
metadata: { parent: '5d56ebd88f6b3b09f0068f5c' },
createdAt: 2019-08-22T02:50:55.594Z,
__v: 0 }
{ _id: 5d56ece48f6b3b09f0068f60,
length: 221228,
chunkSize: 261120,
uploadDate: 2019-08-16T17:50:30.212Z,
filename: '2019-08-16T10:50.wav',
md5: '47fbec41801f73efc53d7e8f73b4e596',
contentType: 'audio/wav',
metadata: { parent: '5d56ebd88f6b3b09f0068f5c' } }
MAKING NEW FOLDER IN LEAD ID USING FILE ID
public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60
FOLDER CREATED
File Before
{ _id: 5d56ece48f6b3b09f0068f60,
filename: '2019-08-16T10:50.wav',
contentType: 'audio/wav',
metadata: { parent: '5d56ebd88f6b3b09f0068f5c' },
createdAt: 2019-08-22T02:50:55.594Z,
__v: 0 }
OPENING file OBJECT:
{ _id: 5d56ece48f6b3b09f0068f60,
filename: '2019-08-16T10:50.wav',
contentType: 'audio/wav',
metadata: { parent: '5d56ebd88f6b3b09f0068f5c' },
createdAt: 2019-08-22T02:50:55.594Z,
__v: 0 }
OPENING tempfile OBJECT:
{ _id: 5d56ece48f6b3b09f0068f60,
length: 221228,
chunkSize: 261120,
uploadDate: 2019-08-16T17:50:30.212Z,
filename: '2019-08-16T10:50.wav',
md5: '47fbec41801f73efc53d7e8f73b4e596',
contentType: 'audio/wav',
metadata: { parent: '5d56ebd88f6b3b09f0068f5c' } }
filename: 2019-08-16T10:50.wav
contentTypeundefined
fileLocation/public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/2019-08-16T10:50.wav
metadata: { parent: '5d56ebd88f6b3b09f0068f5c' }
File After
FILE: { _id: 5d56ece48f6b3b09f0068f60,
filename: '2019-08-16T10:50.wav',
metadata: { parent: '5d56ebd88f6b3b09f0068f5c' },
createdAt: 2019-08-16T17:50:30.212Z,
__v: 0,
fileLocation: '/public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/2019-08-16T10:50.wav' }
tempFile: [object Object]
undefined
undefined
undefined
audio/wav
File After
{ _id: 5d56ece48f6b3b09f0068f60,
filename: '2019-08-16T10:50.wav',
metadata: { parent: '5d56ebd88f6b3b09f0068f5c' },
createdAt: 2019-08-16T17:50:30.212Z,
__v: 0,
fileLocation: '/public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/2019-08-16T10:50.wav' }
File Saved
tempId: 5d56ece48f6b3b09f0068f60
OPENING tempId OBJECT:
5d56ece48f6b3b09f0068f60
PROCESSING FILE
{ _id: 5d56ece48f6b3b09f0068f60,
filename: '2019-08-16T10:50.wav',
metadata: { parent: '5d56ebd88f6b3b09f0068f5c' },
createdAt: 2019-08-16T17:50:30.212Z,
__v: 0,
fileLocation: '/public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/2019-08-16T10:50.wav' }
Starting gridfs stream
CHECKING FOR FILE ENTRY
FILE ENTRY FOUND
Creating read stream
(node:3008) DeprecationWarning: GridStore is deprecated, and will be removed in a future version. Please use GridFSBucket instead
PUSHING CHUNK
<Buffer 52 49 46 46 24 60 03 00 57 41 56 45 66 6d 74 20 10 00 00 00 01 00 01 00 80 bb 00 00 00 ee 02 00 02 00 10 00 64 61 74 61 00 60 03 00 00 00 00 00 00 00 ... >
PUSHED CHUNK
(node:3008) DeprecationWarning: GridStore is deprecated, and will be removed in a future version. Please use GridFSBucket instead
PUSHING CHUNK
<Buffer 52 49 46 46 24 60 03 00 57 41 56 45 66 6d 74 20 10 00 00 00 01 00 01 00 80 bb 00 00 00 ee 02 00 02 00 10 00 64 61 74 61 00 60 03 00 00 00 00 00 00 00 ... >
PUSHED CHUNK
PUSHING CHUNK
<Buffer 52 49 46 46 24 60 03 00 57 41 56 45 66 6d 74 20 10 00 00 00 01 00 01 00 80 bb 00 00 00 ee 02 00 02 00 10 00 64 61 74 61 00 60 03 00 00 00 00 00 00 00 ... >
PUSHED CHUNK
ENDING STREAM
WRITING TO LOCAL DRIVE
.wav
RETURNING FILE TO CLIENT
RENAMING FILE AT LOCATION WITH EXTENSION
Path1: C:\Sites\CRM\routes/../public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/tempfile
Path1String: C:/Sites/CRM/public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/tempfile
Path2: C:\Sites\CRM\routes/../public/uploads/5d56ebd88f6b3b09f0068f5c/5d56ece48f6b3b09f0068f60/2019-08-16T10:50.wav
NORMALIZED Path1: C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\tempfile
NORMALIZED Path2: C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\2019-08-16T10:50.wav
true
true
RENAME ERROR: Error: ENOENT: no such file or directory, rename 'C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\tempfile' -> 'C:\Sites\CRM\public\uploads\5d56ebd88f6b3b09f0068f5c\5d56ece48f6b3b09f0068f60\2019-08-16T10:50.wav'
RENAME COMPLETE

Just saw the new code you added.
This looks to me like shared variables inside a loop using async operations.
Your loop is going to conflict with itself. The .forEach() doesn't wait for your async operations to finish so you'll have multiple iterations of the loop running and trying to use the same variables. You can fix by either stopping the parallel running of operations in the loop or by very carefully defining variables with let so they are unqiue to each iteration of the loop and using NO shared variables that are ever modified.
The reason there's a problem when you assign to a variable is that variable is in a shared scope and ALL the iterations of the loop are trying to use the same variable, some of them conflicting with one another.
All variables modified inside the .forEach() need to be declared internally to the .forEach() so they are unique and separate for each iteration of the loop. None of them should be declared at a higher scope.
The variable tempFile is part of the problem. It will be overwritten by subsequent iterations of the .forEach() loop BEFORE some of your asynchronous operations try to use it. You have so many nested async operations, I haven't studied every single variabled used inside an async callback to see which other ones may have this same issue.
So, the point where you do this:
var unprocessedPath1 = (__dirname + `/../public/uploads/${tempFile.metadata.parent}/${tempId}/tempfile`);
tempfile may well have been overwritten by the next iteration of the loop because this happens in deeply nested asynchronous callbacks that will get called after the .forEach() loop has already completed and other values have been potentially written to tempfile.
FYI, you show no declaration at all for the tempFile variable so perhaps its declared at a higher scope or an accidental module level variable. Just declaring it as:
let tempFile = file;
at the top of your .forEach() callback will give each invocation of the loop it's own copy of that variable and will fix at least this first issue.
As a simplified example of the problem, run this snippet:
const data = [1,2,3,4];
const base = "base";
let tempFile;
data.forEach(function(num) {
tempFile = base + num;
setTimeout(function() {
console.log(tempFile);
}, 1);
});
I use setTimeout() here for simplicity's sake as a simple asynchronous callback, but the issue is the same in your code because you're assigning to tempFile at each invocation of the loop and then referencing it in asynchronous callbacks.
console.log(tempFile); does not show the desired value because the .forEach() loop runs to its completion before a single async callback gets called (a product of the node.js event loop and how asynchronous operations use it).
Whereas, if you move the declaration of tempFile into the loop itself, there's a separate copy of that variable unique to each iteration of the loop and you get the desired output:
const data = [1,2,3,4];
const base = "base";
data.forEach(function(num) {
let tempFile = base + num;
setTimeout(function() {
console.log(tempFile);
}, 1);
});

Related

How do I get multiple images from aws-s3 in one request and return the base64 datastream? [MERN app]

I have a page in my app which has 3 images which are stored in AWS S3.
I've been able to get it to work for a single image, but I can't understand how to get all of them at once. As an aside, I'm using a POST route because I'm sending an array of S3 "key" values stored in my database when the images get uploaded to s3.
//fn to get the object from the bucket
async function getImage(img) {
const data = s3.getObject({
Bucket: bucketName,
Key: img
}).promise()
return data
}
//req which receives an array of Keys, of which I'm just using 1 for testing
//and converts encodes it into the datastream which I'm using for the src client-side.
router.post('/s3get', (req, res, next) => {
function encode(data) {
let buf = Buffer.from(data);
let base64 = buf.toString('base64');
return base64
}
getImage(req.body.images[0])
.then((img) => {
let imageb64 = encode(img.Body);
res.json({
image: imageb64
})
}).catch((e) => {
res.send(e)
})
}
This code is working for retrieving a single image but how do I get all 3?
AWS S3 does not have a built in method for retrieving multiple Objects at once that I am aware of, so I need to retrieve them all one by one, then combine the results.
But how do I do that within a single request, then send a json object back to my front-end containing the 3 image base64 encoded values? like:
res.json({
images: [img1, img2, img3]
})
I was experimenting with "Promise.all()" but can't seem to figure out how to work the encoding in there and deal with each one properly.
router.post('/s3get', (req, res, next) => {
const {images} = req.body
function encode(data) {
let buf = Buffer.from(data);
let base64 = buf.toString('base64');
return base64
}
const responses = await Promise.all(
images.map((img) => s3.getObject({
Bucket: 'energitransport',
Key: img
}).promise())
)
console.log(responses)
}
I was getting this back, but I was seeing the same data 3 times so first off I'm not sure why it's not iterating through each Key value. I confirmed each key value was present by console.log within the map function...so idk. Secondly I can't figure out how to work in the encoding in there and return a single json object.
[
{
AcceptRanges: 'bytes',
LastModified: 2022-09-16T07:15:32.000Z,
ContentLength: 22332,
ETag: '"notTheActualDataHere"',
ContentType: 'application/octet-stream',
Metadata: { fieldname: 'Testing Metadata' },
Body: <Buffer 89 50 4e 47 0d 0a 1a 0a 00 00 00 0d 49 48 44 52 00 00 00 fc 00 00 01 87 08 06 00 00 00 94 39 3f d1 00 00 00 01 73 52 47 42 00 ae ce 1c e9 00 00 00 04 ... 22282 more bytes>
},

Ionic Upload Error: advanced-http: "params" option needs to be an dictionary style object, <params: {[key: string]: string | string[]}>

I wanted to use only ionic plugin https://github.com/silkimen/cordova-plugin-advanced-http#uploadFile when uploading an camera/image file from device to API using the following methods.
While, it does not have any syntax error based on the documentation commented below.
The issue was related on the params, which I could not figure out why...
Error: advanced-http: "params" option needs to be an dictionary style
object,
Method for uploading image to api.
async uploadToAPI(imgEntry) {
var url = environment.api_host + "/activities/log/images"
var filePath = imgEntry.localURL;
var name = 'reports';
var body = {
user_hash: 'xxxxxxx',
activities_id: 1,
activities_log_id: 1
}
var headers = {
'Authorization': 'Bearer ' + environment.api_security_bearer
}
// (method) HTTP.uploadFile(url: string, body: any, headers: any, filePath: string | string[], name: string | string[]): Promise<any>
// #param url — The url to send the request to
// #param body — The body of the request
// #param headers — The headers to set for this request
// #param filePath — The local path(s) of the file(s) to upload
// #param name — The name(s) of the parameter to pass the file(s) along as
// #returns — returns a FileEntry promise that will resolve on success, and reject on failure
this.http.uploadFile(
url,
body,
headers,
filePath,
name,
).then((data) => {
console.log(data)
})
.catch(error => {
console.log(error)
})
}
What could I be missing as a mistake on the following code above?
PS: Only, wanted to use the ionic-native/http/ngx and nothing else.
import { HTTP } from '#ionic-native/http/ngx';
I had similar error like above, OP post, which it state "param" got error key:string..
After further debugging, the ionic capacitor-runtime check only for the value = String | Array only. And i realize my body contain long value. So change the value to String and everthing is work fine.
Just add String() for non-string value in your body. Same goes to headers if still error.
var body = {
user_hash: 'xxxxxxx',
activities_id: String(1),
activities_log_id: String(1)
}
I reported to the github repo from the original author, but I later found out a way to resolve this and still using ionic-native/http/ngx
Hope this will help others as well.
this.http.setDataSerializer('multipart')
formData.set('user_hash', 'xxxxx')
formData.set('activities_id', this.activities_id)
var url = environment.api_host + "/activities/log/images"
this.http.sendRequest(url, {
method: "post",
data: formData,
headers: this.headers,
timeout: 60,
})
.then(response => {
console.log(response);
})
.catch(error => {
console.log(error);
});
}
This works
params:
{}
body:
[Object: null prototype] {
user_hash:
'xxxxxx',
activities_id: '12' }
files:
[ { fieldname: 'reports',
originalname: '1590642736754.jpg',
encoding: '7bit',
mimetype: 'image/jpeg',
buffer:
<Buffer ff d8 ff e0 00 10 4a 46 49 46 00 01 01 00 00 48 00 48 00 00 ff e1 00 58 45 78 69 66 00 00 4d 4d 00 2a 00 00 00 08 00 02 01 12 00 03 00 00 00 01 00 01 ... >,
size: 10857214 } ]
I face same issue without Image upload. I just solve this issue by passing integer value in String. See my code
var url = this.baseurl;
var headers = { };
var params = {
"from_date": from_date,
"to_date": to_date,
"limit_start":0,
"limit":20,
}
this.http2.get(url,params,headers)
.then(data => {
const responceJSon = JSON.parse(data.data);
})
.catch(data => {
console.log("server Response in catch : ");
console.log(data);
});
Here I replace "limit_start":0, to "limit_start":"0", and "limit":20, to "limit":"20", and it Works !!! 🎉.
Final Params :
var params = {
"from_date": from_date,
"to_date": to_date,
"limit_start":"0",
"limit":"20",
}

Weird problem with express-fileupload module saving corrupt image files to my hard drive

I am setting up a database for a website where I need posts to be uploaded through an HTML form, which then gets passed to a route to put the form fields into a Schema data structure.
I am using express-fileupload to achieve this. The files I upload get saved to my database correctly, however, when writing the files to disk they end up being 0 bytes and corrupted.
Can someone please clear this up?
I've tried using other upload modules, however, none of them seem to be as easily implementable as express-fileupload.
POSTS.JS (ROUTE FILE)
router.post('/create', (req, res)=>
{
let filename = '';
if (!isEmpty(req.files))
{
let file = req.files.fileUpload;
filename = Date.now() + '-' + file.name;
let dirUploads = './public/uploads/';
file.mv(dirUploads + filename, (err) =>
{
if (err) throw err;
});
console.log(req.files.fileUpload);
}
}
CREATE.HANDLEBARS (HTML FILE)
<form action="/admin/posts/create" method="post" enctype="multipart/form-
data">
<!-- File upload section. -->
<div class="form-group">
<label for="file">Upload File</label>
<input name="fileUpload" type="file" class="form-control"
id="fileupload">
</div>
I expect the image file to be saved properly, as I've followed the correct implementation procedure.
Console output seems to verify that the file is properly saved to my database:
{ name: 'MENTORSHIPMARKETINGBLUE.png',
data:
<Buffer 89 50 4e 47 0d 0a 1a 0a 00 00 00 0d 49 48 44 52 00 00 01 f4 00
00 01 f4 08 06 00 00 00 cb d6 df 8a 00 00 20 00 49 44 41 54 78 9c ed dd
77 78 56 f5 fd ... >,
encoding: '7bit',
tempFilePath: '\\tmp\\tmp1550292689804',
truncated: false,
mimetype: 'image/png',
md5: [Function: md5],
mv: [Function: mv] }
{ status: 'public',
_id: 5c6796d1dc28613b845173e5,
title: 'weg',
allowComments: false,
body: 'weg',
file: '1550292689806-MENTORSHIPMARKETINGBLUE.png',
__v: 0 }
However, the files saved are indeed corrupt.
Exact same thing happened to me too. Sometimes when you are installing the package, it doesn't install properly. Simply uninstall it, and then install it again, and make sure you have good internet while doing it.
Hope this helps!

Retrieve MongoDB binData and display as <img> src

I have stored an image to my mongoDB collection, and it looks something like:
{
"photo" : {
"image" : BinData(0,"/9j/4AAQS......"),
"imageType": "image/jpeg"
}
}
My router looks like:
app.get('/userImage', function(req, res) {
var username = req.user.username;
User.getProfilePicture(username, function(err, image) {
if (err) {
res.end('Error fetching photo');
}
res.setHeader('Content-Type', image.imageType);
res.end(image.image.buffer, 'binary');
});
});
The model looks like:
exports.getProfilePicture = function(username, callback) {
var collection = db.get().collection('users');
collection.find({ 'username': username }).toArray(function(err, users) {
callback(err, users[0].photo);
});
};
And the ajax request :
$.ajax({
type: 'GET',
url: '/userImage',
success: function(data) {
console.log(data);
$image.src = data;
}
});
On console.log in the route, I do:
console.log(image);
And I get:
{ image:
Binary {
_bsontype: 'Binary',
sub_type: 0,
position: 42461,
buffer: <Buffer ff d8 ff e0 00 10 4a 46 49 46 00 01 02 00 00 01 00 01 00 00 ff db 00 43 00 05 03 04 04 04 03 05 04 04 04 05 05 05 06 07 0c 08 07 07 07 07 0f 0a 0b 09 ... > },
imageType: 'image/jpeg' }
There is something wrong happening here, although I have followed instructions from other resources and stackoverflow questions.
The "data" in the success function is empty, although the "image.image" is not empty. Can anybody help me out here?
From this console.log, can anybody tell me how I can get the image to be shown through in the html?
i think this is useful to u
this is my model
var ImageSchema = new mongoose.Schema({
"photo" : {
"image" : Buffer,
"imageType":{type:String}}});
var Image = mongoose.model('Error', ImageSchema);
module.exports = Image;
my data is stored like this
{
"photo" : {
"image" : BinData(0,"/9j/4AAQS......"),
"imageType": "image/jpeg"}}
get function
var Image = require(. / Image)
app.get('/image/:imageId', function(req, res) {
Image.findbyId(imageId, function(err, imagedata) {
if (err)
return err
res.end(imagedata.photo.image);
})});
finally ajax call
$.ajax({
type: 'GET',
url: '/image/123:',
success: function(data) {
console.log(data);
$image.src = data
}});
and res.end() will send directly image data dont use res.send()

MongoDB: JS Error: out of memory

I am trying to run group command via mongo shell
db.contract.group({
key:{id: 1},
initial: {v: []},
reduce: function(obj, prev){
prev.v.push(obj.name)
}
});
and see the following error on client
Thu Nov 17 12:12:49 uncaught exception: group command failed: {
"errmsg" : "exception: JS_NewObject failed: toJSObject2",
"code" : 13072,
"ok" : 0
}
and mongod logs says the following
Thu Nov 17 12:12:17 [initandlisten] connection accepted from 127.0.0.1:58509 #1
Thu Nov 17 12:12:49 [conn1] JS Error: out of memory
Thu Nov 17 12:12:49 [conn1] Assertion: 13072:JS_NewObject failed: toJSObject2
0x10008de9b 0x1001565bd 0x100156c9e 0x10037011e 0x10037204c 0x10034c4d6 0x10034d877 0x100180cc4 0x100184649 0x1002b9e89 0x1002c3f18 0x100433888 0x100446d74 0x7fff86e00fd6 0x7fff86e00e89
0 mongod 0x000000010008de9b _ZN5mongo11msgassertedEiPKc + 315
1 mongod 0x00000001001565bd _ZN5mongo9Convertor10toJSObjectEPKNS_7BSONObjEb + 1229
2 mongod 0x0000000100156c9e _ZN5mongo7SMScope9setObjectEPKcRKNS_7BSONObjEb + 78
3 mongod 0x000000010037011e _ZN5mongo12GroupCommand5groupESsRKSsRKNS_7BSONObjES3_SsSsPKcS3_SsRSsRNS_14BSONObjBuilderE + 2110
4 mongod 0x000000010037204c _ZN5mongo12GroupCommand3runERKSsRNS_7BSONObjERSsRNS_14BSONObjBuilderEb + 3676
5 mongod 0x000000010034c4d6 _ZN5mongo11execCommandEPNS_7CommandERNS_6ClientEiPKcRNS_7BSONObjERNS_14BSONObjBuilderEb + 1350
6 mongod 0x000000010034d877 _ZN5mongo12_runCommandsEPKcRNS_7BSONObjERNS_10BufBuilderERNS_14BSONObjBuilderEbi + 2151
7 mongod 0x0000000100180cc4 _ZN5mongo11runCommandsEPKcRNS_7BSONObjERNS_5CurOpERNS_10BufBuilderERNS_14BSONObjBuilderEbi + 52
8 mongod 0x0000000100184649 _ZN5mongo8runQueryERNS_7MessageERNS_12QueryMessageERNS_5CurOpES1_ + 10585
9 mongod 0x00000001002b9e89 _ZN5mongo13receivedQueryERNS_6ClientERNS_10DbResponseERNS_7MessageE + 569
10 mongod 0x00000001002c3f18 _ZN5mongo16assembleResponseERNS_7MessageERNS_10DbResponseERKNS_8SockAddrE + 1528
11 mongod 0x0000000100433888 _ZN5mongo10connThreadEPNS_13MessagingPortE + 616
12 mongod 0x0000000100446d74 thread_proxy + 132
13 libSystem.B.dylib 0x00007fff86e00fd6 _pthread_start + 331
14 libSystem.B.dylib 0x00007fff86e00e89 thread_start + 13
Thu Nov 17 12:12:49 [conn1] query staging.$cmd ntoreturn:1 command: { group: { key: { asset_id: 1.0 }, initial: { v: {} }, ns: "contract", $reduce: function (obj, prev) {
prev.v.push(obj.name);
} } } reslen:119 21013ms
I tried checking if the virtual memory is unlimited and it is
bash-3.2$ ulimit -a | egrep virtual\|open
open files (-n) 256
virtual memory (kbytes, -v) unlimited
so I am not sure how to fix this problem
Thank you
Mongo is grouping in memory, so if you have a large database with many different values for id things might get too big.
Also you are piling up all names for a given id in one array. There is also a limit for the maximum document size in Mongo which might be the reason for the out of memory error if you have many docs for a given id.
I guess the solution is to use map/reduce instead.

Categories