I am trying to build a connection between two computers on a local network, one using a slightly modified version of the Boost Asio C++ TCP asynchronous server sample, the other one using NodeJS.
tcp_client.js :
var net = require('net');
var HOST = '127.0.0.1';
var PORT = 14002;
var client = new net.Socket();
client.connect(PORT, HOST, function() {
console.log('CONNECTED TO: ' + HOST + ':' + PORT);
// Write a message to the socket as soon as the client is connected
//the server will receive it as message from the client
client.write('Hello');
});
// Add a 'data' event handler for the client socket
// data is what the server sent to this socket
client.on('data', function(data) {
var fs = require('fs');
fs.writeFile("test.txt", data, function(err) {
if(err) {
return console.log(err);
}
client.write("Data written"); // returns successful
console.log("The file was saved!");
});
});
// Add a 'close' event handler for the client socket
client.on('close', function() {
console.log('Connection closed');
});
tcpServer.cpp :
#include <ctime>
#include <iostream>
#include <fstream>
#include <string>
#include <unistd.h>
#include <boost/bind.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/asio.hpp>
extern string _coordinates;
using namespace std;
using boost::asio::ip::tcp;
std::string inline make_daytime_string() {
std:: time_t now = std::time(0);
return std::ctime(&now);
}
class tcp_connection
// Using shared_ptr and enable_shared_from_this
// because we want to keep the tcp_connection object alive
// as long as there is an operation that refers to it.
: public boost::enable_shared_from_this<tcp_connection>
{
public:
typedef boost::shared_ptr<tcp_connection> pointer;
static pointer create(boost::asio::io_service& io_service) {
cout << "Creates a pointer for the tcp connection" <<endl;
return pointer(new tcp_connection(io_service));
}
tcp::socket& socket() {
return socket_;
}
// Call boost::asio::async_write() to serve the data to the client.
// We are using boost::asio::async_write(),
// rather than ip::tcp::socket::async_write_some(),
// to ensure that the entire block of data is sent.
void start() {
while(1) {
start_read();
// This is going to read after every 1ms the _coordinates variable
usleep(1000);
m_message = _coordinates;
boost::asio::async_write(
socket_,
boost::asio::buffer(m_message),
boost::bind(
&tcp_connection::handle_write,
shared_from_this(),
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred
)
);
}
}
private:
tcp_connection(boost::asio::io_service& io_service)
: socket_(io_service)
{
}
void start_read() {
// Start an asynchronous operation to read a newline-delimited message.
// When read, handle_read should kick in
boost::asio::async_read_until(
socket_,
input_buffer_,
'\n',
boost::bind(
&tcp_connection::handle_read,
shared_from_this(),
boost::asio::placeholders::error
)
);
}
// When stream is received, handle the message from the client
void handle_read(const boost::system::error_code& ec) {
std::cout << "HANDLE_READ - line 101" << "\n";
messageFromClient_ = "";
if (!ec) {
// Extract the newline-delimited message from the buffer.
std::string line;
std::istream is(&input_buffer_);
std::getline(is, line);
// Empty messages are heartbeats and so ignored.
if (!line.empty()) {
messageFromClient_ += line;
std::cout << "Received: " << line << "\n";
}
start_read();
}
else {
std::cout << "Error on receive: " << ec.message() << "\n";
}
}
// handle_write() is responsible for any further actions
// for this client connection.
void handle_write(const boost::system::error_code& /*error*/, size_t /*bytes_transferred*/) {
m_message += "helloo\n";
}
tcp::socket socket_;
std::string m_message;
boost::asio::streambuf input_buffer_;
std::string messageFromClient_;
};
class tcp_server {
public:
// Constructor: initialises an acceptor to listen on TCP port 14002.
tcp_server(boost::asio::io_service& io_service)
: acceptor_(io_service, tcp::endpoint(tcp::v4(), 14002))
{
// start_accept() creates a socket and
// initiates an asynchronous accept operation
// to wait for a new connection.
start_accept();
}
private:
void start_accept() {
// creates a socket
cout << "creating a new socket for the communication" <<endl;
tcp_connection::pointer new_connection = tcp_connection::create(acceptor_.get_io_service());
// initiates an asynchronous accept operation
// to wait for a new connection.
acceptor_.async_accept(
new_connection->socket(),
boost::bind(
&tcp_server::handle_accept,
this,
new_connection,
boost::asio::placeholders::error
)
);
}
// handle_accept() is called when the asynchronous accept operation
// initiated by start_accept() finishes. It services the client request
void handle_accept(tcp_connection::pointer new_connection, const boost::system::error_code& error) {
if (!error) {
cout << "Starting the new tcp connection" <<endl;
new_connection->start();
}
// Call start_accept() to initiate the next accept operation.
start_accept();
}
tcp::acceptor acceptor_;
};
int inline launch_server() {
try {
boost::asio::io_service io_service;
tcp_server server(io_service);
// Run the io_service object to perform asynchronous operations.
io_service.run();
}
catch (std::exception& e) {
std::cerr << e.what() << std::endl;
}
return 0;
}
Sending the message from the C++ to NodeJS works (the async_write(..) in the start() while loop, which sends _coordinates every 1ms), but I can't manage to handle the messages coming from my NodeJS program :
When running (both programs on my computer, on localhost), the output of ss -tp | grep 14002 (14002 being the port), the Recv-Q/Send-Q of the NodeJS process are empty (and the socket.write(...) returns successful), while, for the C++ part, the Recv-Q is constantly growing and Send-Q is empty
Moreover, when running, all the cout .. of the handler_read() are not printed, which means that the async_read_until() function never calls the handler.
I tried all the overload versions of the async_read_until(), none of them works. And, as the messages are not of constant size, it seems that i have no choice but to use read_until.
I hope I didn't forget any useful information. Thank you for your help !
You are basically saturating your CPU with the infinite while loop in your tcp_connection::start method. Not only that it is saturating the CPU, it is also a bug in your design. Why would you want to continuously attach handlers for read and also send/write data to the socket in a infinite loop ? Most probably you want to write to the socket after receiving a request from the client.
Below are the methods I changed to make it work like a regular client-server:
void start() {
start_read();
// This is going to read after every 1ms the _coordinates variable
usleep(1000);
m_message = _coordinates;
}
void start_read() {
// Start an asynchronous operation to read a newline-delimited message.
// When read, handle_read should kick in
boost::asio::async_read_until(
socket_,
input_buffer_,
'\n',
boost::bind(
&tcp_connection::handle_read,
shared_from_this(),
boost::asio::placeholders::error
)
);
}
void handle_read(const boost::system::error_code& ec) {
std::cout << "HANDLE_READ - line 101" << "\n";
messageFromClient_ = "";
if (!ec) {
// Extract the newline-delimited message from the buffer.
std::string line;
std::istream is(&input_buffer_);
std::getline(is, line);
// Empty messages are heartbeats and so ignored.
if (!line.empty()) {
messageFromClient_ += line;
std::cout << "Received: " << line << "\n";
}
start_read();
}
else {
std::cout << "Error on receive: " << ec.message() << "\n";
}
start_read();
boost::asio::async_write(
socket_,
boost::asio::buffer(m_message),
boost::bind(
&tcp_connection::handle_write,
shared_from_this(),
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred
)
);
}
I am not sure what you are 'actually' trying to do based on the question as it stands now, but the above changes should be a good point to start with.
Related
Context
I am getting high memory usage with the vertx circuit breaker. I am just using httpbin.org to get all success responses. For individual requests, it works fine. While running a load test the JVM old gen utilization is spiking up.
Reproducer
I have the main verticle code, pasting it here itself:
public class CleanServer extends AbstractVerticle {
Logger logger = Logger.getLogger(CleanServer.class.getName());
#Override
public void start(Promise<Void> startPromise) throws Exception {
Router router = Router.router(vertx);
CircuitBreakerCache cbc = new CircuitBreakerCache(vertx);
router.route(HttpMethod.GET, "/get").handler(context -> {
List<String> domains = context.queryParam("user");
String domain = domains.get(0);
CircuitBreaker cb = cbc.getCircuitBreaker(domain + context.request().path());
HttpServerResponse serverResponse =
context.response().setChunked(true);
cb.executeWithFallback(promise -> {
WebClientOptions options = new WebClientOptions().setTryUseCompression(true).setTcpNoDelay(true).setTcpCork(true).setReceiveBufferSize(128).setConnectTimeout(400);
WebClient client = WebClient.create(vertx, options);
client.get(80, "httpbin.org", "/status/200")
.timeout(2000)
.send(ar -> {
if (ar.succeeded()) {
HttpResponse<Buffer> response = ar.result();
int statusCode = response.statusCode();
if (statusCode != 200) {
promise.fail(response.statusMessage());
} else {
serverResponse.end("Hello!!");
promise.complete();
}
} else {
promise.fail(ar.cause().getMessage());
}
});
}, v -> {
// Executed when the circuit is opened
logger.log(Level.INFO, domain + " Failed " + cb.state().toString() + " Error: Circuit open");
serverResponse.setStatusCode(200).setStatusMessage("Circuit Open").end("Circuit Open");
return context;
});
});
// Create the HTTP server
vertx.createHttpServer(new HttpServerOptions().setMaxInitialLineLength(10000))
// Handle every request using the router
.requestHandler(router)
// Start listening
.listen(8080)
// Print the port
.onSuccess(server ->
System.out.println(
"HTTP server started on port " + server.actualPort()
)
);
}
}
Circuit breaker options:
CircuitBreakerOptions()
.setMaxFailures(50)
.setTimeout(5000)
.setFallbackOnFailure(true)
.setResetTimeout(10000)));
Steps to reproduce
API used: http://localhost:8080/get?user=abc
When I hit the above API at 50 QPS for 30 minutes. The java heap is getting filled up.
Extra
<vertx.version>4.2.6</vertx.version>
JVM params used:
-XX:+UseG1GC -Xms4g -Xmx4g -XX:InitiatingHeapOccupancyPercent=70 -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=20 -XX:ConcGCThreads=5
JVM memory with the load test.
Error:
WARNING: Thread Thread[vert.x-eventloop-thread-3,5,main] has been blocked for 3050 ms, time limit is 2000 ms
I think I am blocking the thread somewhere but not sure where exactly as the code seems pretty simple as given in the documentation.
for the sake of simplicity, let's assume I have only one uWebSockets instance running on my server:
struct UserData
{
uWS::WebSocket<true, uWS::SERVER> *ws;
bool logged_in = false;
ID user_id;
};
uWS::SSLApp()
.ws<UserData>(
"/*",
{
.open =
[](auto *ws, auto *req) {
std::cout << "user with ip: " << ws->getRemoteAddress()
<< " connected" << std::endl;
},
.message =
[](auto *ws, std::string_view message,
uWS::OpCode opCode) {
auto userData = static_cast<UserData *>(ws->getUserData());
// give websocket pointer to a session
userData->ws = ws;
Session session;
session.process_message(userData, message);
}
.listen(9001,
[](auto *token) {
if (token)
std::cout << "listening on port 9001" << std::endl;
else
std::cout << "failed to listen on port 9001" << std::endl;
})
.run();
});
possible implementation of Session:
class Session {
process_message(UserData &userData, const std::string_view &message) {
std::this_thread::sleep_for(std::chrono::seconds(1));
}
}
inside the function Session::process_message, I have a code that takes a long time to finish.
How do I return control to the event loop in order for it to process some other sessions?
In other words, how do I design the program to be fully asynchronous/run session concurrently?
The library is asynchronous.
does it mean that the library will handle the other connections concurrently and I have nothing to worry about?
During the sleep, uWebSockets will not process other network connections.
Change the sleep to fast code that only gets a mutex, adds to a queue, and releases a mutex.
Make another thread (let's call it ProcessingThread) that gets the same mutex, removes messages from that queue, releases the mutex, and processes the messages. ProcessingThread may take as long as it needs to process messages without slowing down the uWebSockets thread.
In this example, https://github.com/kljensen/golang-html5-sse-example/blob/d4eba81ddea2b4191f039adb6929086ca6b39c0f/server.go#L130, you can block on a channel. But doing that in my HandleExecute results in the browser/client connection status to remain stuck in a pending state and nothing gets sent to the client.
If I remove the for loop and use a select statement with a default case writing back no data, then it works (execute writes and flushes data) except that the handler returns and the client re-connects every 5 seconds instead of maintaining the connection. When I do it that way, the client gets streamed data only on the reconnect intervals.
So users end up seeing a delay lasting anywhere less than 5 seconds depending on when HandleExecuteRequest gets hit. HandleExecute could be ready to write but the client may not have reconnected yet. I hope that makes sense.
How can I write these handlers to maintain a constant connection instead of reconnecting?
func (s *Server) HandleExecuteRequest(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
log.Printf("Error parsing request: %v", err)
fmt.Fprintf(w, "Error parsing request: %v", err)
return
}
decoder := schema.NewDecoder()
var f Form
err = decoder.Decode(&f, r.PostForm)
if err != nil {
log.Printf("Error decoding form: %v", err)
fmt.Fprintf(w, "Error decoding form: %v", err)
return
}
s.clientMap[r.RemoteAddr] <- buildCommand(&f)
}
func (s *Server) HandleExecute(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
s.addClient(r)
for {
cmd := <-s.clientMap[r.RemoteAddr]
execute(w, cmd)
}
}
Javascript
var source = new EventSource('<host>/execute');
source.onmessage = function(e) {
document.querySelector("code").innerHTML +="<span class='font-weight-bold' style='white-space: pre-wrap; padding-left: 200px;'>" + e.data + "</span>" + "<br>"
};
If I update HandleExecute to below, I get a continuous connection but then HandleExecuteRequest gets stuck in pending when that is hit. It could how I'm setting up the clients and handling the channels in this case.
func (s *Server) HandleExecute(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
s.addClient(r)
for {
select {
case cmd := <-s.clientMap[r.RemoteAddr]:
execute(w, cmd)
default:
fmt.Fprint(w, ": no data\n\n")
}
}
}
My issue had nothing to do with how the SSE handler was set up. My channel was not being setup correctly and was blocking connections.
I posted a question earlier asking why does my server (written in C++ and boost::asio) can't connect with a client (written in Javascript). Is the problem that the Javascript Websockets are different than boost::asio sockets ? Does boost::asio not support websockets ? What is the easiest way to work this out ?
Boost.Beast, now part of Boost, is built on top of Boost.Asio and works the way you expect. It comes with example code and documentation. Check it out here: www.boost.org/libs/beast
Here's a complete program that sends a message to the echo server:
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <cstdlib>
#include <iostream>
#include <string>
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from <boost/beast/websocket.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
// Sends a WebSocket message and prints the response
int main(int argc, char** argv)
{
try
{
// Check command line arguments.
if(argc != 4)
{
std::cerr <<
"Usage: websocket-client-sync <host> <port> <text>\n" <<
"Example:\n" <<
" websocket-client-sync echo.websocket.org 80 \"Hello, world!\"\n";
return EXIT_FAILURE;
}
std::string host = argv[1];
auto const port = argv[2];
auto const text = argv[3];
// The io_context is required for all I/O
net::io_context ioc;
// These objects perform our I/O
tcp::resolver resolver{ioc};
websocket::stream<tcp::socket> ws{ioc};
// Look up the domain name
auto const results = resolver.resolve(host, port);
// Make the connection on the IP address we get from a lookup
auto ep = net::connect(ws.next_layer(), results);
// Update the host_ string. This will provide the value of the
// Host HTTP header during the WebSocket handshake.
// See https://tools.ietf.org/html/rfc7230#section-5.4
host += ':' + std::to_string(ep.port());
// Set a decorator to change the User-Agent of the handshake
ws.set_option(websocket::stream_base::decorator(
[](websocket::request_type& req)
{
req.set(http::field::user_agent,
std::string(BOOST_BEAST_VERSION_STRING) +
" websocket-client-coro");
}));
// Perform the websocket handshake
ws.handshake(host, "/");
// Send the message
ws.write(net::buffer(std::string(text)));
// This buffer will hold the incoming message
beast::flat_buffer buffer;
// Read a message into our buffer
ws.read(buffer);
// Close the WebSocket connection
ws.close(websocket::close_code::normal);
// If we get here then the connection is closed gracefully
// The make_printable() function helps print a ConstBufferSequence
std::cout << beast::make_printable(buffer.data()) << std::endl;
}
catch(std::exception const& e)
{
std::cerr << "Error: " << e.what() << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
This is my first question.
I am trying to build a websocket server that can handle 50-100 concurrent users and send 50 messages per second.
On my dev machine it works for one user but when uploaded on the server it slows down more and more depending on the users.
Thanks for your time.
superwebsocket code sample:
public class SuperWs
{
WebSocketServer server;
WebSocketSession[] array = new WebSocketSession[100];
public SuperWs()
{
server = new WebSocketServer();
server.Setup(new ServerConfig
{
Port = 23023,
Name = "super web socket",
LogAllSocketException = true,
LogBasicSessionActivity = true,
});
server.NewSessionConnected += NewSessionConnected;
server.NewMessageReceived += NewMessageReceived;
server.SessionClosed += SessionClosed;
server.Start();
Task.Run(() =>
{
while (true)
{
Thread.Sleep(1000);
Stopwatch sw = new Stopwatch();
sw.Start();
for (int i = 0; i < 50; i++)
{
for (int j = 0; j < array.Length; j++)
{
if (array[j] != null)
array[j].Send(string.Format("Hello! ms: {0}, mes: {1}", sw.ElapsedMilliseconds, i));
}
}
sw.Stop();
}
});
}
private void SessionClosed(WebSocketSession session, CloseReason value)
{
Console.WriteLine("{0} {1}", session.SessionID, value);
}
void NewMessageReceived(WebSocketSession session, string value)
{
Console.WriteLine("{0} {1}", session.SessionID, value);
}
private int count;
private void NewSessionConnected(WebSocketSession session)
{
Console.WriteLine(session);
array[count++] = session;
}
}
First, when client and server are not in the same machine (like in your DEV environment), network will cause some delay. If you have 'n' clients connected, it will take 'n * delay' to send a message to all nodes. You should create a different loop per session, so basically on NewSessionConnected create a new loop that sends 50 messages each second and ends when the session is disconnected (rather than while(true)).
If you are going to have that parallel work, you should use async programming and use await Task.Delay(1000) rather than Thread.Sleep(1000), since the latest will block the thread, where the previous will allow the thread to go and do something else in the mean time. Unfortunately SuperWebSocket does not support async programming.
Pseudo-code:
private async void NewSessionConnected(WebSocketSession session)
{
while (session.IsConnected)
{
await Task.Delay(1000 / 50);
session.Send("Hello");
}
}
Also, you have to start thinking what are you going to do when nodes disconnect :)
As a side note, I develop a WebSocket framework that supports async programming, that amount of traffic should be no problem.