Hi guys I have a problem that i don't really have idea how to solve. it's also a bit strange :/
Basically I have created this Lambda function to connect to a mysql DB using the node package 'mysql'.
If i run the function from command line on my pc using the command 'sls function run function1' and make different queries everything is fine.
But when I call the function from a web browser using the link, I have to refresh the page 2 times to get the right result because at the first refresh the server respond with the old result.
I have noticed that from the command line I always have different thredID while from webbrowser is always the same.
Also I don't close the connection in the lambda function code because everything is fine if i run the function from command line but from browser I can only make 2 queries and then I get a message that say that I cannot use a closed connection.
So it seems like Lambda store the old query result when I call it from web browser.
Obviously I'm making same stupid mistake but I don't know how to solve it.
Does anyone have an idea?
Thanks :)
'use strict';
//npm packages
var mysql=require('mysql');
var deasync = require('deasync');
//variables
var goNext=false; //use to synchronize deasync
var error=false; //it becomes TRUE if an error occured during the connection to the DB
var dataColumnTable; //the data thet you extract from the query to the DB
var errorMessage;
//----------------------------------------------------------------------------------------------------------------
//always same credentials
var connection = mysql.createConnection({
host : 'hostAddress',
user : 'Puser',
password : 'password',
port : '3306',
database : 'database1',
});
//----------------------------------------------------------------------------------------------------------------
module.exports.handler = function(event, context) {
var Email=event.email;
connection.query('SELECT City, Address FROM Person WHERE E_Mail=?', Email, function(err, rows) {
if(err){
console.log("Cannot connect to DB");
console.log(err);
error=true;
errorMessage=err;
}
else{
console.log("data from column acquired!");
dataColumnTable=rows;
}
//connection.end(function(err) {
// connection.destroy();
//});
//console.log("Connection closed!");
goNext=true;
});
require('deasync').loopWhile(function(){return goNext!=true;});
//----------------------------------------------------------------------------------------------------------------
if(error==true)
return callback('Error '+ errorMessage);
else
return callback(null,dataColumnTable); //return a JsonFile
//fine headler
};
Disclaimer: I'm not very familiar with AWS and/or AWS Lambda.
http://docs.aws.amazon.com/lambda/latest/dg/programming-model-v2.html states (emphasis mine):
Your Lambda function code must be written in a stateless style, and have no affinity with the underlying compute infrastructure. Your code should expect local file system access, child processes, and similar artifacts to be limited to the lifetime of the request. Persistent state should be stored in Amazon S3, Amazon DynamoDB, or another cloud storage service. Requiring functions to be stateless enables AWS Lambda to launch as many copies of a function as needed to scale to the incoming rate of events and requests. These functions may not always run on the same compute instance from request to request, and a given instance of your Lambda function may be used more than once by AWS Lambda.
Opening a connection and storing it in a variable outside your handler function is state. The connection will likely be closed between requests or even before your first request. Your lambda function may be reused (hence identical thread ids).
My assumption would be (and an attempt to solve this problem), that you need to create the connection on every request (i.e., inside your handler) and may not expect any value be as initialized or as on last request. (except for constants probably).
Related
The answer from Why shouldn't I use global variables in JavaScript for something that's constant? listed 8 issues with global variables, the # 2 is
If you have any asynchronous code that modifies globals or
timer-driven code that modifies globals and more than one asynchronous
operation can be in flight at the same time, the multiple async
operations can step on each other through the modification of the same
globals.
While the statement is easy to understand I came across a strange socket.io problem I can't figure out why: I find that if one global socket.io client is used for 2 connections (wrongly), the 2nd connection gets the 1st connection message. I create a sample project https://github.com/qiulang/2sockets to demonstrate the problem.
The server logic is simple, when client connects it needs to send a login message with user_id , if the server finds the same user_id login with a different socket.id, it decides that this is the case that the same user login from a different client so it will emit logoff message to the first socket.io client. When the client gets logoff message it will close its connection.
let records = {}
io.on("connection", (socket) => {
socket.on("login",({user_id,client}) =>{
let old_socket = records[user_id]
if ( old_socket ) {
log(`The same ${user_id} with ${old_socket} has logged in, let him log off first`)
io.to(old_socket).emit('logoff', 'another login')
}
records[user_id] = socket.id
})
socket.on("disconnect",(reason) => {
log(`${socket.id} disconnected with reason ${reason}`)
})
});
The client uses a global variable instead of function scope variables. But in main() there are 2 connections, then the second connection gets the logoff message wrongly.
function setupWS(client) {
// let socket = io(server, { transports: ['websocket']}) //Should use a function scope variable
socket = io(server, { transports: ['websocket']}) //wrong!! Should NOT use a global variable
socket.on('connect', function () {
log(`${client} connected to local ws server with ${socket.id}`)
socket.emit('login', { user_id, client })
})
socket.on('logoff', (msg) => {
log(`${socket.id}:${client} should be kicked off for: ${msg}`)
socket.close()
})
}
function main() {
setupWS('nodejs')
setTimeout(()=> {
log('open another connection in 5 seconds')
setupWS("browser")
},5000)
}
main()
When run the code from the client side, I will see log like,
nodejs connected to local ws server with ijqTzPl2SXHmB-U0AAAC
browser connected to local ws server with l7vCcbeOmVU5d_TSAAAE
l7vCcbeOmVU5d_TSAAAE:nodejs should be kicked off for: another login
From the server side, I will see log like,
The same 1_2_1000 with ijqTzPl2SXHmB-U0AAAC has logged in, let him log off first
l7vCcbeOmVU5d_TSAAAE disconnected with reason client namespace disconnect
So the server correctly sent to the 1st socket.id ijqTzPl2SXHmB-U0AAAC the logoff message while at the client side the log is l7vCcbeOmVU5d_TSAAAE:nodejs (NOT l7vCcbeOmVU5d_TSAAAE:browser) should be kicked off. And it is indeed the 2nd socket.id l7vCcbeOmVU5d_TSAAAE called close()
--- update ---
With jfriend00 answer I understand my problem. What I want to add is this problem was introduced by #6 problem in his answer Why shouldn't I use global variables in JavaScript for something that's constant?
A simple omission of the keyword "var" on a local variable makes it a
global variable and can confuse the heck out of code
Normally we require each node.js file adding "use strict" the first line, but now I realize running node --use_strict is much safer.
BTW, if I may add one more problem of using global variables to his excellent answer there, I will add To figure out where and how global variables are modified is quite painful.
When you call setupWs() the second time, it overwrites the global socket variable. Then, when a message comes in to the first connection, you log socket.id, but you aren't logging the connection that actually just got the message, you're logging from the socket global variable which now points to the 2nd connection.
So, your logging is at fault here. It will log socket.id from the same socket global variable, no matter which connection is actually getting a message. So, faulty logging is making it appear that a different connection is getting the message than is actually the case.
And, in addition to the logging, the two places you use socket inside a message handler are also referring to the wrong socket. So, you need to store the socket locally. If you want global access to the last socket you created, you can also store it globally.
function setupWS(client) {
const activeSocket = io(server, { transports: ['websocket']});
// store the latest socket globally so outside code can send
// messages on the latest socket we connected
socket = activeSocket;
activeSocket.on('connect', function () {
log(`${client} connected to local ws server with ${activeSocket.id}`)
activeSocket.emit('login', { user_id, client })
})
activeSocket.on('logoff', (msg) => {
log(`${activeSocket.id}:${client} should be kicked off for: ${msg}`)
activeSocket.close()
})
}
And, as you have seen and apparently know already, this is an example of the kind of problem using a global variable can create.
Let's say I am building a social app. I want to log into multiple accounts (one per browser instance) without an user interface (all via node), and by calling all respective endpoints to log in and start chatting.
The important part is to test when an user closed the tab or logs out or leaves the group and therefore the websocket's connection closes.
If I understand you correctly.
You would like to make a server-side event happen whenever a client connects or disconnects, without any html,css.... or any other user interface.
You can do it like this in node :
For connection you use :
Server.on("connection,function(ws){some stuff...})
The function that is called on connection will get the websocket that connected as parameter by default. I just use a lambda function here you can also call one that will then get the websocket as parameter.
For disconnection you put a function in the Server.on function to monitor when the client disconnected. Like this :
Server.on("connection,function(ws){
ws.onclose = function (ws) {
some stuff...
}
})
You can again replace the lambda function by another one.
Server is in my case equal to this :
const wsserver = require("ws").Server
const server = new wsserver({ port: someport })
But it can vary.
All you need to do apart from that is connect the client.
I do it like this but it can vary as well.
const ws = new WebSocket("ws://localhost:someport");
I hope this is helpful.
I write a Node.Js app and I use Socket.Io as the data transfer system, so requests should be particular to per user. How can I make this?
My actual code;
node:
io.on('connection', (socket) => {
socket.on('loginP', data => {
console.log(data);
})
})
js:
var socket = io('',{forceNew : false});
$("#loginbutton").click(function() {
var sessionInfo = {
name : $("#login input[name='username']").val(),
pass : $("#login input[name='pass']").val()
}
socket.emit("loginP", sessionInfo)
})
It returns one more data for per request and this is a problem for me. Can I make this on Socket.Io or should I use another module, and If I should, which module?
If I understand your question correctly (It's possible I don't), you want to have just one connection from each user's browser to your nodejs program.
On the nodejs side, your io.on('connection'...) event fires with each new incoming user connection, and gives you the socket for that specific connection. So, keep track of your sockets; you'll have one socket per user.
On the browser side, you should build your code to ensure it only calls
var socket = io(path, ...);
once for each path (your path is ''). TheforceNew option is for situations where you have multiple paths from one program.
Google Cloud SQL advertises that it's only $0.0150 per hour for the smallest machine type, and I'm being charged for every hour, not just hours that I'm connected. Is this because I'm using a pool? How do I setup my backend so that it queries the cloud db only when needed so I don't get charged for every hour of the day?
const mysql = require('mysql');
const pool = mysql.createPool({
host : process.env.SQL_IP,
user : 'root',
password : process.env.SQL_PASS,
database : 'mydb',
ssl : {
[redacted]
}
});
function query(queryStatement, cB){
pool.getConnection(function(err, connection) {
// Use the connection
connection.query(queryStatement, function (error, results, fields) {
// And done with the connection.
connection.destroy();
// Callback
cB(error,results,fields);
});
});
}
This is not so much about the pool as it is about the nature of Cloud SQL. Unlike App Engine, Cloud SQL instances are always up. I learned this the hard way one Saturday morning when I'd been away from the project for a week. :)
There's no way to spin them down when they're not being used, unless you explicitly go stop the service.
There's no way to schedule a service stop, at least within the GCP SDK. You could alway write a cron job, or something like that, that runs a little gcloud sql instances patch [INSTANCE_NAME] --activation-policy NEVER command at, for example, 6pm local time, M-F. I was too lazy to do that, so I just set a calendar reminder for myself to shut down my instance at the end of my workday.
Here's the MySQL Instance start/stop/restart page for the current SDK's docs:
https://cloud.google.com/sql/docs/mysql/start-stop-restart-instance
On an additional note, there is an ongoing 'Feature Request' in the GCP Platform to start/stop the Cloud SQL (2nd Gen), according to the traffic as well. You can also visit the link and provide your valuable suggestions/comments there as well.
I took the idea from #ingernet and created a cloud function which starts/stops the CloudSQL instance when needed. It can be triggered via a scheduled job so you can define when the instance goes up or down.
The details are here in this Github Gist (inspiration taken from here). Disclaimer: I'm not a python developer so there might be issues in the code, but at the end it works.
Basically you need to follow these steps:
Create a pub/sub topic which will be used to trigger the cloud function.
Create the cloud function and copy in the code below.
Make sure to set the correct project ID in line 8.
Set the trigger to Pub/Sub and choose the topic created in step 1.
Create a cloud scheduler job to trigger the cloud function on a regular basis.
Choose the frequency when you want the cloud function to be triggered.
Set the target to Pub/Sub and define the topic created in step 1.
The payload should be set to start [CloudSQL instance name] or stop [CloudSQL instance name] to start or stop the specified instance (e.g. start my_cloudsql_instance will start the CloudSQL instance with the name my_cloudsql_instance)
Main.py:
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import base64
from pprint import pprint
credentials = GoogleCredentials.get_application_default()
service = discovery.build('sqladmin', 'v1beta4', credentials=credentials, cache_discovery=False)
project = 'INSERT PROJECT_ID HERE'
def start_stop(event, context):
print(event)
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
print(pubsub_message)
command, instance_name = pubsub_message.split(' ', 1)
if command == 'start':
start(instance_name)
elif command == 'stop':
stop(instance_name)
else:
print("unknown command " + command)
def start(instance_name):
print("starting " + instance_name)
patch(instance_name, "ALWAYS")
def stop(instance_name):
print("stopping " + instance_name)
patch(instance_name, "NEVER")
def patch(instance, activation_policy):
request = service.instances().get(project=project, instance=instance)
response = request.execute()
dbinstancebody = {
"settings": {
"settingsVersion": response["settings"]["settingsVersion"],
"activationPolicy": activation_policy
}
}
request = service.instances().patch(
project=project,
instance=instance,
body=dbinstancebody)
response = request.execute()
pprint(response)
Requirements.txt
google-api-python-client==1.10.0
google-auth-httplib2==0.0.4
google-auth==1.19.2
oauth2client==4.1.3
Trying to automate sending mails to a queue of email ids stored in collection email. I am trying meteor for the first time so please pardon my lack of understanding if I have any.
I am using the following code (on isServer ) :
Meteor.methods({
'sendEmails': function () {
this.unblock();
Emails.find({status: "no"}).forEach(function (obj) {
var result = Meteor.http.call("GET", "http://someapidomain/email.php?email=" + obj.email);
console.log(result.content);
});
}
});
This code is called at Meteor.startup.
When this app is run, api is called and i get the results for 13 emails, sometimes for 5 emails, sometimes 2 emails and then nothing happens. Please help.
Let me know if more detail is required.
I would suggest doing
Emails.find({status: "no"}).fetch().forEach(...)
Note the fetch() in the chain, which ensures that all Mongo communication has completed prior to doing HTTP calls. find() alone returns a reactive cursor, which means you're mixing Mongo activity and HTTP activity which might not be playing nicely - just a theory.
Reference: http://docs.meteor.com/#/basic/Mongo-Collection-find