I am learning node.js and have build a micro-app around an MVC architecture.
I have a router.js file which loads a controller based on the URI, which, in most cases, would load the views using the "fs" module. The views being the HTML elements making up the web page (basically head, body, & footer) as 3 separate files.
Here is the code for the controller:
var load_view = 'test.html';
function data(response, request, fs){
response.writeHead(200, {"Content-Type": "text/html"});
var count = 0;
var handler = function(error, content){
count++;
if(error) console.log(error);
else response.write(content);
if(count==3) response.end();
}
fs.readFile('view/elements/head.html', handler); // should load 1st
fs.readFile('view/'+load_view, handler); // should load 2nd
fs.readFile('view/elements/footer.html', handler);// should load 3rd
}
exports.data = data;
As you can see the HTML elements are supposed to load in order (head.html, then the particular view for this controller - test.html, then footer.html). But they sometimes do NOT.
They load in the "head, body, footer" order most of the time.
Sometimes they load as "head, footer, body".
Other times its "body, head, footer".
They never seem to load in any other configuration.
Please see screenshots attached.
Im am not sure what is happening here. Why are these files being loaded in any order but the one they are called??
Please note I am intentially not using a framework like Express.js for learning purposes
You cannot make assumptions about the ordering of responses for asynchronous requests. There are various factors that could affect this ordering (e.g. OS thread scheduling in the case of filesystem requests).
So the solution to this is to either call them serially by nesting callbacks or chaining Promises or by using a module like async, or bind your callbacks to include relevant contextual information so you know which file just got loaded in your callback. An example of the latter might be something like:
function data(res, req, fs) {
// Files to be loaded, in the order to be written to the response
var files = [
'view/elements/head.html',
'view/' + load_view,
'view/elements/footer.html'
];
// Start read requests, each with a callback that has an extra
// argument bound to it so it will be unshifted onto the callback's
// parameter list
for (var i = 0; i < files.length; ++i)
fs.readFile(files[i], handler.bind(null, i));
var count = 0;
function handler(index, err, content) {
// Make sure we don't send more than one response on error
if (count < 0)
return;
if (err) {
count = -1;
console.log('Error for file: ' + files[index]);
console.log(err);
res.writeHead(500);
return res.end();
}
// Reuse our `files` array by simply replacing filenames with
// their respective content
files[index] = content;
// Check if we've read all the files and write them in order if
// we are finished
if (++count === files.length) {
res.writeHead(200, { 'Content-Type': 'text/html' });
for (var i = 0; i < files.length; ++i)
res.write(files[i]);
res.end();
}
}
}
On a related note, you might look into using a templating system (that supports including partials (like headers and footers) and layouts) of some kind to avoid doing this kind of manual work.
Related
Link to a similar problem that has no answers, but written in C
I'm using NodeJS to parse output from ark-server-tools, which is a layer on top of SteamCMD. What I'd like to do is parse the progress of the update and assign it to a variable, which I'll return as a GET call that the client can use to check progress of the update.
I put the log results of an update into a file to run my code against, which I've put in a PasteBin for brevity.
update.js
app.get('/update', function(req, res) {
var toReturn;
var outputSoFar;
var total;
var startPos;
var endPos = 0;
//var proc = spawn('arkmanager', ['update', '--safe']);
var proc = spawn('./update-log.sh'); //for testing purposes
proc.stdout.on('data', function(data){
outputSoFar += data.toString();
//if server is already updated
if (outputSoFar.indexOf('Your server is already up to date!') !== -1) {
toReturn = 'Server is already up-to-date.';
}
//find update progress
if (outputSoFar.indexOf('progress:') !== -1) {
for(var line in outputSoFar.split('\n')){
console.log('found progress');
startPos = outputSoFar[line].indexOf('progress:', endPos) + 10; //get the value right after progress:_, which should be a number
endPos = outputSoFar[line].indexOf(' (', startPos); // find the end of this value, which is signified by space + (
console.log(outputSoFar[line].substring(startPos, endPos).trim());
updatePercent = outputSoFar[line].substring(startPos, endPos).trim(); //returned to the `checkUpdateProgress` endpoint
}
toReturn = 'Updating...';
}
});
proc.stderr.on('data', function(data){
console.log(data);
});
proc.on('close', function (code, signal) {
res.writeHead(200, {'Content-Type': 'text/plain'});
res.write(JSON.stringify(toReturn));
res.end();
});
}
/*
* Returns progress of an update
*/
app.get('/updateProgress', function(req, res){
console.log('updatePercent: ' + updatePercent);
res.send(JSON.stringify(updatePercent));
});
Couple questions:
1) Is this the best way to architect my RESTful API? One call for the action of updating and another for checking the progress of the update?
2) I'd love a better way to test the function, as echoing the console log returns the data in one piece, as opposed to a data stream. How do I do this?
3) I'm pretty sure the parsing function itself isn't quite right, but I'm having a hard time testing it because of #2.
If you want to take a look at the project in its entirety, here's the repo.
Thanks in advance for your help!
For one of your questions:
Is this the best way to architect my RESTful API? One call for the
action of updating and another for checking the progress of the
update?
As implemented now, I don't think your service can support concurrent requests correctly. updatePercent is a shared global variable. If i hit /update endpoint with a single client, it will start the ./update-log.sh command.
If I request /update again, it will start another update and overwrite the global updateProgress. There doesn't seem to be anything mapping an updatePercent with the correct process
Additionally, there could be serious performance issues to each request spawning a new process. Node might be able to handle hundreds or thousands of concurrent connections using a single thread, but each request is going to spawn a new process, just something to profile
For stekhn, here's the proper link: var location = "http://www.roblox.com/Trade/inventoryhandler.ashx?filter=0&userid=" + i + "&page=1&itemsPerPage=14";
I'm trying to create a Javascript script where I can search through a users inventory, detect if they have what I'm looking for in their inventory and output the userID if they have it.
If I type in bluesteel, I need a Javascript script which will search through http://snackyrite.com/site.ashx?userid=1 and detect if it has the text 'bluesteel' is on it - if it is, I need it to display the user id, which is 1.
You may be thinking that's easy and I can easily find the script for that - well there's a catch, my objective isn't only to get it to search userid=1, I need it to search from userid=1 up to userid=45356
If the word 'bluesteel' is found on userid=5, userid=3054 and userid=12 (these are just examples), I need it to display 5, 3054 and 12 (the ID's) on the same page where the script was ran from.
This is the script I've tried, but the userid won't increase (I'm not sure how to do that).
var location = http://snackyrite.com/site.ashx?userid=1;
if(location.indexOf("bluesteel") > -1) {
output.userid
}
I do apologize, Javascript isn't my best.
Use a loop:
for (var i = 1; i <=45356; i++) {
var loc = "http://snackyrite.com/site.ashx?userid="+i;
// get contents of location
if (contents.indexOf("bluesteel") > -1) {
console.log(i);
}
}
Since getting the contents will presumably use AJAX, the if will probably be in the callback function. See Javascript infamous Loop issue? for how to write the loop so that i will be preserved in the callback function.
This kind of web scraping can't be done in the Browser (client-side JavaScript).
I would suggest building a scraper with Node.js.
Install Node.js
Install request npm i request
Install cheerio npm i cheerio
Create a file scraper.js
Run node scraper.js
Code for scraper.js
// Import the scraping libraries
var request = require("request");
var cheerio = require("cheerio");
// Array for the user IDs which match the query
var matches = [];
// Do this for all possible users
for (var i = 1; i <= 45356; i++) {
var location = "http://snackyrite.com/site.ashx?userid="+i;
request(location, function (error, response, body) {
if (!error) {
// Load the website content
var $ = cheerio.load(body);
var bodyText = $("body").text();
// Search the website content for bluesteel
if (bodyText.indexOf("bluesteel") > -1) {
console.log("Found bluesteel in inventory of user ", i);
// Save the user ID, if bluesteel was found
matches.push(i);
}
// Something goes wrong
} else {
console.log(error.message);
}
});
console.log("All users with bluesteel in inventory: ", matches);
}
The above code seems kind of complicated, but I think this is the way it should be done. Of corse you can use any other scraping tool, library.
I am trying use only Node (no additional npms or express). Here is my current route handler:
function requestHandler(req, res) {
var localFolder = __dirname + '/views/',
page404 = localFolder + '404.html',
fileToServe = "";
if(/\/posts\/\d+/.test(req.url)){
fileToServe = __dirname + req.url.match(/\/posts\/\d+/) +'.json';
fs.stat(fileToServe, function(err, contents){
if(!err && contents){
res.end(templateEngine('post',fileToServe));
} else {
res.end(templateEngine('error', err))
}
});
} else if (/\/posts\//.test(req.url)){
} else if (/.+[^\W]$/.test(req.url)){
fileToServe = __dirname + '/views' + req.url.match(/\/.+[^\W]$/gi);
fs.stat(fileToServe, function(err, contents){
if(!err && contents){
res.end(fs.readFileSync(fileToServe));
} else {
res.end(templateEngine('error', err))
}
});
}
}
I have two questions:
In one of my views if have a <link> tag with a css file. When I go straight to the path, it is served (this is the regex that catches it: /.+[^\W]$/.test(req.url)). However, as mentioned, one of my views built from the template engine uses the css file.
How does the browser work when it sees the link tag? Does it send a GET request to my local server (node)? If it does, why doesn't my server send a response back? When I go directly to the link, it sends the response perfectly fine.
Furthermore, when I try going to the page that uses the css file in the link tag, it just hangs on an empty page. When I kill the server, it says it never received a response (once again, when I go to the link directly, I get the proper file).
Do I have to re-organize my routes? Serve static files separately?
How does the browser work when it sees the link tag? Does it send a GET request to my local server (node)?
Yes. Browser creates the full URL based on the current URL and makes an HTTP GET request like it does for any other resource.
If it does, why doesn't my server send a response back? When I go directly to the link, it sends the response perfectly fine.
All evidence suggests that your page which links to the css is not being captured in the handler if-blocks. Try putting a couple of console.logs, one right inside the requestHandler and the other inside in the block which is supposed to handle the page request. I think you will only see one log show up in the server console.
Basically I want to know if the 'success function' of jQuery's $.get() method is fired when the whole file has finished downloading. I suspect it is, but just want to make sure.
I'm using a series of $.get() requests to load files of a page (CSS, javascript, etc) while I display a 'loading screen'.
On each success callback of the request, I load the next file until finally they're all finished, and I remove my loading screen.
The issue is that randomly, usually on slow connections (site is designed for mobile browsing) the loading screen will disappear but the CSS for the site has not been applied until ~1-2 seconds later so the user will see a non-styled site briefly before the CSS is applied to buttons, etc.
Here is the code I'm using to loading my resources
if(!window.MyResourceLoader) window.MyResourceLoader = {};
// list of resources to load
MyResourceLoader.MyResources = new Array(
"include/resources/.....css",
"include/resources/.....css",
"include/modules/.......js",
"include/...............js",
"include/...............js");
// reverse the array so we load it in the order above
MyResourceLoader.MyResources.reverse();
MyResourceLoader.resourcesToLoad = MyResourceLoader.MyResources.length;
/**
* Recursive function - loads the resources
*/
MyResourceLoader.loadMyResources = function()
{
// exit condition - stop recurring if we've run out of resources to load
if(this.MyResources.length < 1)
{
$(".meter > span").each(function() {
$(this).stop().animate({
width: "100%"
}, 300);
});
return;
}
// get the next resource to load
var resource = this.MyResources.pop();
// if the resource is a css file, append it to the head
if(resource.match(/css$/))
{
// append timestamp to resource
resource += ("?" + new Date().getTime());
// ie support
if(document.createStyleSheet)
document.createStyleSheet(resource);
else
$("head").append($('<link type="text/css" rel="stylesheet" href="' + resource + '">'));
// recusivley load resources
this.loadMyResources();
}
else if(resource.match(/\.js$/))
{
// append timestamp to resource
resource += ("?" + new Date().getTime());
// if the resource is a js file, make a request for it
$.get(resource, function()
{
// on successful request of the file, make another call to load resource
MyResourceLoader.loadMyResources();
});
}
}
The final javascript file is a 'load end' .js file that does the following:
// fade out the loading screen
$('#webapp-loader').css('opacity',0);
setTimeout(function()
{
$('#webapp-loader').remove();
}, 1000);
So as you can see, there is even a 1 second buffer at the end of the loading before it removes the loading screen.
Yes $.get() downloads the whole file, as I commented above my issue here was that the resource loading is only appending the CSS file to the head of the document, which triggers the download of the CSS file - so the above code will not wait for the CSS in the head to finish downloading before it thinks it is 'done loading'
When loading resources by appending them into the DOM, they don't get loaded immediately. You have to listen for the onload to know if the element has loaded its contents. The problem is, unlike the script tag which has an almost reliable onload and onerror, the link tag doesn't. you can't check if the stylesheet has loaded.
I suggest you load the CSS via AJAX so you have a sense of when the CSS is loaded. You then put the returned CSS into a style tag in the page. As for JS, since you are using jQuery, you can use getScript.
Also, you can load them all at once, in parallel. That way, you'll load them faster. To keep track of them, you can use the promises each jQuery ajax function returns. You can then use when to check when all promises have their resources loaded already.
Also, you may have over-complicated your approach. Here's a shorter one:
//the resource list
var resources = [
'include/resources/.....css',
'include/resources/.....css',
'include/modules/.......js',
'include/...............js',
'include/...............js'
];
//collection of promises to listen
var promises = [];
var timestamp = '?' + new Date().getTime();
var current, ext, promise;
//load each resource
for(var i = 0; i < resources.length; ++i){
current = resources[i];
//RegExp-less extension extraction (assuming they end with extensions)
ext = current.substr(current.lastIndexOf('.')+1).toLowerCase();
//extension check
if(ext === 'css'){
promise = $.get(current + timestamp).done(function(css){
//when the CSS AJAX finishes, append
$('<style>').html(css).appendTo('head');
});
} else if(ext === 'js'){
promise = $.getScript(current + timestamp);
}
//push the promise
promises.push(promise);
}
//listen with all are done
$.when.apply(null,promises).done(function(){
//all done!
});
I tried to play a bit with node.js and wrote following code (it doesn't make sense, but that does not matter):
var http = require("http"),
sys = require("sys");
sys.puts("Starting...");
var gRes = null;
var cnt = 0;
var srv = http.createServer(function(req, res){
res.writeHeader(200, {"Content-Type": "text/plain"});
gRes = res;
setTimeout(output,1000);
cnt = 0;
}).listen(81);
function output(){
gRes.write("Hello World!");
cnt++;
if(cnt < 10)
setTimeout(output,1000);
else
gRes.end();
}
I know that there are some bad things in it (like using gRes globally), but my question is, why this code is blocking a second request until the first completed?
if I open the url it starts writing "Hello World" 10 times. But if I open it simultaneous in a second tab, one tab waits connecting until the other tab finished writing "Hello World" ten times.
I found nothing which could explain this behaviour.
Surely it's your overwriting of the gRes and cnt variables being used by the first request that's doing it?
[EDIT actually, Chrome won't send two at once, as Shadow Wizard said, but the code as is is seriously broken because each new request will reset the counter, and outstanding requests will never get closed].
Instead of using a global, wrap your output function as a closure within the createServer callback. Then it'll have access to the local res variable at all times.
This code works for me:
var http = require("http"),
sys = require("sys");
sys.puts("Starting...");
var srv = http.createServer(function(req, res){
res.writeHeader(200, {"Content-Type": "text/plain"});
var cnt = 0;
var output = function() {
res.write("Hello World!\n");
if (++cnt < 10) {
setTimeout(output,1000);
} else {
res.end();
}
};
output();
}).listen(81);
Note however that the browser won't render anything until the connection has closed because the relevant headers that tell it to display as it's downloading aren't there. I tested the above using telnet.
I'm not familiar with node.js but do familiar with server side languages in general - when browser send request to the server, the server creates a Session for that request and any additional requests from the same browser (within the session life time) are treated as the same Session.
Probably by design, and for good reason, the requests from same session are handled sequentially, one after the other - only after the server finish handling one request it will start handling the next.