select _id from project where projectName = '***' order by viewCount desc limit 5
i'm still new to mongoose and have a somewhat intermediate SQL understanding, here's my attempt of it as i'm trying to retrieve the ObjectId of the sorted return
ProjectModel.find({id}).sort({viewCount: -1}).limit(5).exec(
function(err, projects) {
...
}
);
ProjectModel.find({projectName: '***'}, ["_id"]).sort({viewCount: -1}).limit(5).exec(
function(err, projects) {
...
}
);
Related
I have a setup with Node, Express, and using backbone. Everything works fine and I'm able to retrieve records from by MongoDB collections when they are simple as in getting employees by id or all employees. What I'm having trouble understanding is how to get collections from MongoDB that require a more complex query syntax like the following:
db.employees.aggregate(
[
{ $group : { _id : "$managerName", employees: { $push: "$fullName" } } }
]
)
I currently have the following syntax for extracting the data I want to expose as a json object and bind to the elements in my html page.
exports.findById = function(req, res) {
var id = parseInt(req.params.id);
db.collection('employees', function(err, collection) {
collection.findOne({'id': id}, function(err, item) {
res.jsonp(item);
});
});
};
I want to get a list of all Managers and they employees that report to them and then somehow bind this result set to individual divs that would list a Manager as the List heading, and then all the employees that report to them as list items. The result set will essentially consist of parents and childs. I want to do this dynamically using backbonejs.
Would I be doing something like
exports.findRelations = function(req, res) {
db.collection('employees', function(err, collection) {
collection.aggregate({ $group : { _id : "$managerName", employees:{$push: "$fullName" } } }, function(err, item) {
res.jsonp(item);
});
});
};
The aggregation pipeline for MongoDB requires that you pass the operations in an Array. This means the correct query would be:
db.collection('employees').aggregate([
{ $group : { _id : "$managerName", employees:{$push: "$fullName" } }
])
.toArray(function(err, managers){
if (err){
throw err;
}
res.jsonp(managers);
});
You'll find details for using the aggregation pipeline with the NodeJS MongoDB driver here: https://docs.mongodb.org/getting-started/node/aggregation/
How do i achieve the following relation in bookshelf.js
SELECT user_accounts.user_id, `event_id`
FROM `calendar_events`
JOIN user_accounts ON user_accounts.user_id = calendar_events.`created_by`
LIMIT 10
My Model
var CalendarEvent = bookshelf.Model.extend({
tableName: 'calendar_events',
hasTimestamps: ['created_on'],
user: function() {
return this.hasOne('UserAccount','user_id');
}
});
var UserAccount = bookshelf.Model.extend({
tableName: 'user_account'
});
If you wanted to get that exact style query, you could use the knex query builder and try to build a query to match your needs
have not tested this but should be something like this
CalendarEvent.forge().query(function(qb){
qb.join('user_accounts', 'user_accounts.user_id', '=', 'calendar_events.created_by');
//qb.where() //if you wanted
//qb.andWhere(); //and if you wanted more
qb.limit(10);
})
.fetchAll();
(you dont need the .where or .andWhere, just added those for fun)
It might be possible to do it purely in bookshelf, but i'm not sure of how at the moment.
I suppose you found the solution since 2016 but here is the answer
var CalendarEvent = bookshelf.Model.extend({
tableName: 'calendar_events',
hasTimestamps: ['created_on'],
user: function() {
return this.belongsTo(UserAccount, 'user_id', 'created_by');
}
});
var UserAccount = bookshelf.Model.extend({
tableName: 'user_account'
});
Use belongsTo() method because the foreign key is in the CalendarEvent Model and not in the UserAccount model. Append the name of the foreign key to belongsTo() parameters to specify the name of the foreign key.
Then, use the Bookshelf model like this :
CalendarEvent
.forge()
.fetchPage({ pageSize: 10, withRelated: ['user'] })
.then(data => {
// Data retrieval
})
.catch(exc => {
// Error management
});
The fetchPage method uses page and pageSize to create a pagination. You will retrieve a data.pagination object containing rows info (number to rows, page number, page size, page total count) and data.toJSON() will retrieve your models.
User model contian SubscriptionSchema and AccessToken Schema, I had defined this two plugin schemas with {capped : 234556} too.
var User = new Schema({
email : String
, firstName : String
, password : String
, isAdmin : Boolean
, lastSeen : Date
, subscriptions : [ SubscriptionSchema ]
, accessTokens : [ AccessToken ]
}, {
toObject : { virtuals : true }
, toJSON : { virtuals : true }
, capped : 234556
});
var streamTest = User.find().limit(1).tailable().stream();
When I try to run the above code, I still get the error:
MongoError: tailable cursor requested on non capped collection
That doesn't look like a correct usage of a capped collection or tailable stream. But perhaps a little code first to demonstrate a working example:
var async = require('async'),
mongoose = require('mongoose'),
Schema = mongoose.Schema;
var userSchema = new Schema({
email: String,
},{
capped: 2048
});
var User = mongoose.model( "User", userSchema );
mongoose.connect('mongodb://localhost/atest');
var stream;
async.waterfall(
[
function(callback) {
var user = new User({ email: "existing" });
user.save(function(err,doc) {
if (err) throw err;
callback();
});
},
function(callback) {
User.find({}).sort({ "$natural": -1 }).limit(1).exec(function(err,docs) {
if (err) throw err;
console.log( docs );
callback(err,docs[0]);
});
},
function(doc,callback) {
stream = User.find({ "_id": { "$gt": doc._id } }).tailable().stream();
stream.on("data",function(doc) {
console.log( "Streamed:\n%s", doc );
});
callback();
},
function(callback) {
async.eachSeries(
['one','two','three'],
function(item,callback) {
var user = new User({ email: item });
user.save(function(err,doc) {
if (err) throw err;
console.log( "Saved:\n%s", doc );
callback();
});
},
function(err) {
if (err) throw err;
callback();
}
);
}
]
);
First things first, there really needs to be something in the capped collection for anything to work. This presumes that the collection does not exist and it is going to be initialized as a capped collection. Then the first step is making sure something is there.
Generally when you want to "tail", you just want the new documents that are inserted to appear. So before setting up the tailable cursor you want to find the "last" document in the collection.
When you know the last document in the collection, the "tailable stream" is set up to look for anything that is "greater than" that document, which are the new documents. If you did not do this, your first "data" event on the stream would empty all of the current collection items. So the options to .sort() and .limit() here do not apply. Tailing cursors initialize and "follow".
Now that the streaming interface is set up and an listener established, you can add items to the stream. These will then log accordingly, yet as this is "evented", there is no particular sequence to the logging here as either the "save" or the "stream" data event may actually fire first.
Now onto your implementation. These two lines stand out:
, subscriptions : [ SubscriptionSchema ]
, accessTokens : [ AccessToken ]
Those contain embedded arrays, they are not "external" documents in another collection, even though it would not matter if it even did.The general problem here is that you are (at least in some way) introducing an array, which seems to imply some concept of "growth".
Unless your intention is to never "grow" these arrays and only ever insert the content with the new document and never update it, then this will cause problems with capped collections.
Documents in capped collections cannot "grow" beyond their initial allocated size. Trying to update where this happens will result in errors. If you think you are going to be smart about it and "pad" your documents, then this is likely to fail when replicated secondary hosts "re-sync". All documented with capped collections.
I am trying to retrieve a document from MongoDB. The document has the field "ownerId" that contain a binary UUID. Using the Mongo console if I launch the command
db.dataset.find({ownerId: BinData(3,"ZQ6EAOKbQdSnFkRmVUUAAA==")}).pretty()
it returns:
{
"_id" : BinData(3,"VQ6EAOKbQdSnFkRmVUUAAA=="),
"name" : "Twitter",
"objectType" : "Tweet",
"ownerId" : BinData(3,"ZQ6EAOKbQdSnFkRmVUUAAA==")
}
When I try to retrieve the document from my node.js program, it fails and does not return any document.
My program is:
var mongo = require('mongoskin');
var db = mongo.db("mongodb://192.168.1.100:27017/test", {native_parser:true});
function HexToBase64(g) {
...
}
var uuidstr = "650e8400e29b41d4a716446655450000";
console.info(uuidstr);
base64str = HexToBase64(uuidstr);
console.info(base64str);
db.collection('dataset').find( { ownerId:new mongo.Binary(base64str, 4) } ).toArray(function (err, items) {
if(err) {
var msg = "Error getting 'dataset' objects from database.";
console.info(msg + " " + err);
return;
}
console.info("OK");
console.info(items);
});
The output is:
650e8400e29b41d4a716446655450000
ZQ6EAOKbQdSnFkRmVUUAAA==
OK
[]
What am I doing wrong?
Firstly, since you're going to query the collection by OwnerId, I'd suggest to create an index there. You can achieve that in mongoskin easily enough:
// Create and index by ownerId
db.collection('dataset').ensureIndex([['ownerId', 1]], true, function(err, res) {
if (err) throw err;
});
Once you've done that, you can now do things like this:
db.collection('dataset').findOne({ ownerId: 'some-uuid' }, callback);
Further, if it's the ObjectId type you're after, I'd recommend you use the provided helper (see this other question) to achieve that:
var ownerId = mongoskin.ObjectID(uuidstr);
Otherwise, I'd go with a different approach, perhaps using node-uuid :
var uuid = require('node-uuid');
var ownerId = uuid.v4();
db.collection('dataset').findOne({ ownerId: ownerId }, callback);
Hope this helps,
i have a site with multiple projects. and each project has a different view count.
What i want to do is to retrieve an array of the top 5 viewed projects in this order [max, max-1, max-2, max-3, max-4].
here's the schema:
var mongoose = require('mongoose');
// defines the database schema for this object
var schema = mongoose.Schema({
projectName : String,
authorName : String,
viewCount : Number
comment : [{
id : String,
authorName : String,
authorEmailAddress : { type : String, index : true }
}]
});
})
// Sets the schema for model
var ProjectModel = mongoose.model('Project', schema);
// Create a project
exports.create = function (projectJSON) {
var project = new ProjectModel({
projectName : projectJSON.projectName ,
authorName : projectJSON.authorName,
viewCount : projectJSON.viewCount,
comment : [{
id : projectJSON.comments.id,
authorName : projectJSON.comments.authorName,
authorEmailAddress : projectJSON.authorEmailAddress
});
project.save(function(err) {
if (err) {
console.log(err);
}
else{
console.log("success");
}
});
}
Below is my attempt to retrieve an array of the top 5 viewed articles in this order [max, max-1, max-2, max-3, max-4]. Bearing in mind that articles view rank change in real time.
// because i am familiar with SQL, i start with a SQL query and convert it later to mongoose
SQL version:
SELECT MAX(viewCount) FROM project where projectName=1 --this only give the MAX when i want the top 5
mongoose version:
exports.getTopViewedProject = function(rank, callback)
ProjectModel.findOne({ projectName: 1 }).sort(viewCount, -1).run(
function(err, viewCOunt) {
var max = viewCount;
});
To get the top 5 articles for project 'name' by viewCount:
ProjectModel.find({projectName: 'name'}).sort({viewCount: -1}).limit(5).exec(
function(err, projects) {
...
}
);