ChatServer
一个TCP服务器必然会有连接的接收,维持,收发数据等逻辑。那我们就要基于asio完成这个服务的搭建。主服务是这个样子的
#include "LogicSystem.h"
#include <csignal>
#include <thread>
#include <mutex>
#include "AsioIOServicePool.h"
#include "CServer.h"
#include "ConfigMgr.h"
using namespace std;
bool bstop = false;
std::condition_variable cond_quit;
std::mutex mutex_quit;
int main()
{
try {
auto &cfg = ConfigMgr::Inst();
auto pool = AsioIOServicePool::GetInstance();
boost::asio::io_context io_context;
boost::asio::signal_set signals(io_context, SIGINT, SIGTERM);
signals.async_wait([&io_context, pool](auto, auto) {
io_context.stop();
pool->Stop();
});
auto port_str = cfg["SelfServer"]["Port"];
CServer s(io_context, atoi(port_str.c_str()));
io_context.run();
}
catch (std::exception& e) {
std::cerr << "Exception: " << e.what() << endl;
}
}
CServer类的声明
#include <boost/asio.hpp>
#include "CSession.h"
#include <memory.h>
#include <map>
#include <mutex>
using namespace std;
using boost::asio::ip::tcp;
class CServer
{
public:
CServer(boost::asio::io_context& io_context, short port);
~CServer();
void ClearSession(std::string);
private:
void HandleAccept(shared_ptr<CSession>, const boost::system::error_code & error);
void StartAccept();
boost::asio::io_context &_io_context;
short _port;
tcp::acceptor _acceptor;
std::map<std::string, shared_ptr<CSession>> _sessions;
std::mutex _mutex;
};
构造函数中监听对方连接
CServer::CServer(boost::asio::io_context& io_context, short port):_io_context(io_context), _port(port),
_acceptor(io_context, tcp::endpoint(tcp::v4(),port))
{
cout << "Server start success, listen on port : " << _port << endl;
StartAccept();
}
接受连接的函数
void CServer::StartAccept() {
auto &io_context = AsioIOServicePool::GetInstance()->GetIOService();
shared_ptr<CSession> new_session = make_shared<CSession>(io_context, this);
_acceptor.async_accept(new_session->GetSocket(), std::bind(&CServer::HandleAccept, this, new_session, placeholders::_1));
}
AsioIOServicePool
从AsioIOServicePool中返回一个可用的iocontext构造Session,然后将接受的新链接的socket写入这个Session保管。
AsioIOServicePool已经在前面讲解很多次了,它的声明如下
#include <vector>
#include <boost/asio.hpp>
#include "Singleton.h"
class AsioIOServicePool:public Singleton<AsioIOServicePool>
{
friend Singleton<AsioIOServicePool>;
public:
using IOService = boost::asio::io_context;
using Work = boost::asio::io_context::work;
using WorkPtr = std::unique_ptr<Work>;
~AsioIOServicePool();
AsioIOServicePool(const AsioIOServicePool&) = delete;
AsioIOServicePool& operator=(const AsioIOServicePool&) = delete;
// 使用 round-robin 的方式返回一个 io_service
boost::asio::io_context& GetIOService();
void Stop();
private:
AsioIOServicePool(std::size_t size = std::thread::hardware_concurrency());
std::vector<IOService> _ioServices;
std::vector<WorkPtr> _works;
std::vector<std::thread> _threads;
std::size_t _nextIOService;
};
AsioIOServicePool具体实现
#include "AsioIOServicePool.h"
#include <iostream>
using namespace std;
AsioIOServicePool::AsioIOServicePool(std::size_t size):_ioServices(size),
_works(size), _nextIOService(0){
for (std::size_t i = 0; i < size; ++i) {
_works[i] = std::unique_ptr<Work>(new Work(_ioServices[i]));
}
//遍历多个ioservice,创建多个线程,每个线程内部启动ioservice
for (std::size_t i = 0; i < _ioServices.size(); ++i) {
_threads.emplace_back([this, i]() {
_ioServices[i].run();
});
}
}
AsioIOServicePool::~AsioIOServicePool() {
std::cout << "AsioIOServicePool destruct" << endl;
}
boost::asio::io_context& AsioIOServicePool::GetIOService() {
auto& service = _ioServices[_nextIOService++];
if (_nextIOService == _ioServices.size()) {
_nextIOService = 0;
}
return service;
}
void AsioIOServicePool::Stop(){
//因为仅仅执行work.reset并不能让iocontext从run的状态中退出
//当iocontext已经绑定了读或写的监听事件后,还需要手动stop该服务。
for (auto& work : _works) {
//把服务先停止
work->get_io_context().stop();
work.reset();
}
for (auto& t : _threads) {
t.join();
}
}
CServer的处理连接逻辑
void CServer::HandleAccept(shared_ptr<CSession> new_session, const boost::system::error_code& error){
if (!error) {
new_session->Start();
lock_guard<mutex> lock(_mutex);
_sessions.insert(make_pair(new_session->GetUuid(), new_session));
}
else {
cout << "session accept failed, error is " << error.what() << endl;
}
StartAccept();
}
Session层
上面的逻辑接受新链接后执行Start函数,新链接接受数据,然后Server继续监听新的连接
void CSession::Start(){
AsyncReadHead(HEAD_TOTAL_LEN);
}
先读取头部数据
void CSession::AsyncReadHead(int total_len)
{
auto self = shared_from_this();
asyncReadFull(HEAD_TOTAL_LEN, [self, this](const boost::system::error_code& ec, std::size_t bytes_transfered) {
try {
if (ec) {
std::cout << "handle read failed, error is " << ec.what() << endl;
Close();
_server->ClearSession(_uuid);
return;
}
if (bytes_transfered < HEAD_TOTAL_LEN) {
std::cout << "read length not match, read [" << bytes_transfered << "] , total ["
<< HEAD_TOTAL_LEN << "]" << endl;
Close();
_server->ClearSession(_uuid);
return;
}
_recv_head_node->Clear();
memcpy(_recv_head_node->_data, _data, bytes_transfered);
//获取头部MSGID数据
short msg_id = 0;
memcpy(&msg_id, _recv_head_node->_data, HEAD_ID_LEN);
//网络字节序转化为本地字节序
msg_id = boost::asio::detail::socket_ops::network_to_host_short(msg_id);
std::cout << "msg_id is " << msg_id << endl;
//id非法
if (msg_id > MAX_LENGTH) {
std::cout << "invalid msg_id is " << msg_id << endl;
_server->ClearSession(_uuid);
return;
}
short msg_len = 0;
memcpy(&msg_len, _recv_head_node->_data + HEAD_ID_LEN, HEAD_DATA_LEN);
//网络字节序转化为本地字节序
msg_len = boost::asio::detail::socket_ops::network_to_host_short(msg_len);
std::cout << "msg_len is " << msg_len << endl;
//id非法
if (msg_len > MAX_LENGTH) {
std::cout << "invalid data length is " << msg_len << endl;
_server->ClearSession(_uuid);
return;
}
_recv_msg_node = make_shared<RecvNode>(msg_len, msg_id);
AsyncReadBody(msg_len);
}
catch (std::exception& e) {
std::cout << "Exception code is " << e.what() << endl;
}
});
}
上面的逻辑里调用asyncReadFull读取整个长度,然后解析收到的数据,前两个字节为id,之后两个字节为长度,最后n个长度字节为消息内容。
//读取完整长度
void CSession::asyncReadFull(std::size_t maxLength, std::function<void(const boost::system::error_code&, std::size_t)> handler )
{
::memset(_data, 0, MAX_LENGTH);
asyncReadLen(0, maxLength, handler);
}
读取指定长度
//读取指定字节数
void CSession::asyncReadLen(std::size_t read_len, std::size_t total_len,
std::function<void(const boost::system::error_code&, std::size_t)> handler)
{
auto self = shared_from_this();
_socket.async_read_some(boost::asio::buffer(_data + read_len, total_len-read_len),
[read_len, total_len, handler, self](const boost::system::error_code& ec, std::size_t bytesTransfered) {
if (ec) {
// 出现错误,调用回调函数
handler(ec, read_len + bytesTransfered);
return;
}
if (read_len + bytesTransfered >= total_len) {
//长度够了就调用回调函数
handler(ec, read_len + bytesTransfered);
return;
}
// 没有错误,且长度不足则继续读取
self->asyncReadLen(read_len + bytesTransfered, total_len, handler);
});
}
读取头部成功后,其回调函数内部调用了读包体的逻辑
void CSession::AsyncReadBody(int total_len)
{
auto self = shared_from_this();
asyncReadFull(total_len, [self, this, total_len](const boost::system::error_code& ec, std::size_t bytes_transfered) {
try {
if (ec) {
std::cout << "handle read failed, error is " << ec.what() << endl;
Close();
_server->ClearSession(_uuid);
return;
}
if (bytes_transfered < total_len) {
std::cout << "read length not match, read [" << bytes_transfered << "] , total ["
<< total_len<<"]" << endl;
Close();
_server->ClearSession(_uuid);
return;
}
memcpy(_recv_msg_node->_data , _data , bytes_transfered);
_recv_msg_node->_cur_len += bytes_transfered;
_recv_msg_node->_data[_recv_msg_node->_total_len] = '\0';
cout << "receive data is " << _recv_msg_node->_data << endl;
//此处将消息投递到逻辑队列中
LogicSystem::GetInstance()->PostMsgToQue(make_shared<LogicNode>(shared_from_this(), _recv_msg_node));
//继续监听头部接受事件
AsyncReadHead(HEAD_TOTAL_LEN);
}
catch (std::exception& e) {
std::cout << "Exception code is " << e.what() << endl;
}
});
}
读取包体完成后,在回调中继续读包头。以此循环往复直到读完所有数据。如果对方不发送数据,则回调函数就不会触发。不影响程序执行其他工作,因为我们采用的是asio异步的读写操作。
当然我们解析完包体后会调用LogicSystem单例将解析好的消息封装为逻辑节点传递给逻辑层进行处理。
LogicSystem
我们在逻辑层处理
void LogicSystem::RegisterCallBacks() {
_fun_callbacks[MSG_CHAT_LOGIN] = std::bind(&LogicSystem::LoginHandler, this,
placeholders::_1, placeholders::_2, placeholders::_3);
}
void LogicSystem::LoginHandler(shared_ptr<CSession> session, const short &msg_id, const string &msg_data) {
Json::Reader reader;
Json::Value root;
reader.parse(msg_data, root);
std::cout << "user login uid is " << root["uid"].asInt() << " user token is "
<< root["token"].asString() << endl;
std::string return_str = root.toStyledString();
session->Send(return_str, msg_id);
}
并在构造函数中注册这些处理流程
LogicSystem::LogicSystem():_b_stop(false){
RegisterCallBacks();
_worker_thread = std::thread (&LogicSystem::DealMsg, this);
}
总结
到此,完成了ChatServer收到QT客户端发送过来的长链接请求,并解析读取的数据,将收到的数据通过tcp发送给对端。接下来还要做ChatServer到GateServer的token验证,判断是否合理,这个教给之后的文章处理。