学习brpc:echo服务

博客围绕 brpc C++ 的 Echo 客户端展开,介绍了同步客户端的 server 端和 client 端,以及异步客户端的情况,其中异步客户端的 server 端代码与同步客户端的 server 端一致。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Echo同步客户端

server 端

#include <gflags/gflags.h>
#include <json2pb/pb_to_json.h>
#include <brpc/server.h>
#include "butil/endpoint.h"
#include "echo.pb.h"

// flags,用于配置server
DEFINE_bool(echo_attachment, true, "Echo attachment as well");
DEFINE_int32(port, 8000, "TCP Port of this server");
DEFINE_string(listen_addr, "", "Server listen address, may be IPV4/IPV6/UDS."
            " If this is set, the flag port will be ignored");
DEFINE_int32(idle_timeout_s, -1, "Connection will be closed if there is no "
             "read/write operations during the last `idle_timeout_s'");

class EchoServiceImpl : public example::EchoService {
public:
    EchoServiceImpl() = default;
    virtual ~EchoServiceImpl() = default;


    // response完成后执行的回调
    static void CallAfterRpc(brpc::Controller* controller, 
                            const google::protobuf::Message* req, 
                            const google::protobuf::Message* res) {
        std::string req_str, res_str;

        // 此时cntl/req/res均没有被析构
        json2pb::ProtoMessageToJson(*req, &req_str, NULL);
        json2pb::ProtoMessageToJson(*res, &res_str, NULL);
        LOG(INFO) << "Got "<< "req:" << req_str
                    << "and res:" << res_str;
}


    void Echo (google::protobuf::RpcController* controller,
                       const example::EchoRequest* request,
                       example::EchoResponse* response,
                       google::protobuf::Closure* done) override {
        
        brpc::Controller* cntl = static_cast<brpc::Controller*>(controller); // 强转
        brpc::ClosureGuard done_guard(done); // RAII

        //日志
        LOG(INFO) << "Received request[log_id=" << cntl->log_id() 
                  << "] from " << cntl->remote_side() 
                  << " to " << cntl->local_side()
                  << ": " << request->message()
                  << " (attached=" << cntl->request_attachment() << ")";

        // 生成响应
        response->set_message("Echo: " + request->message());
        // 如果有捎带数据,也发送回去
        if(FLAGS_echo_attachment) {
            cntl->response_attachment().append(cntl->request_attachment());
        }
        // 设置response完成后的回调函数
        cntl->set_after_rpc_resp_fn(std::bind(EchoServiceImpl::CallAfterRpc,std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
    }

};


int main(int argc, char* argv[]) {
    // 初始化gflags
    GFLAGS_NS::ParseCommandLineFlags(&argc, &argv, true);
    brpc::Server server;
    EchoServiceImpl service_impl; 

    // 添加服务,,brpc::SERVER_OWNS_SERVICE 表示server是否拥有 service_impl,通常为false    
    if(server.AddService(&service_impl, brpc::SERVER_OWNS_SERVICE) != 0) {
        LOG(ERROR) << "Fail to add service";
        return -1;
    }

    // 监听节点, 默认监听所有地址
    butil::EndPoint point;
    if(FLAGS_listen_addr.empty()) {
        point = butil::EndPoint(butil::IP_ANY, FLAGS_port);
    } else {
        // 解析监听地址
        if(butil::str2endpoint(FLAGS_listen_addr.c_str(), &point) != 0) {
            LOG(ERROR) << "Invalid listen address:" << FLAGS_listen_addr;
            return -1;
        }
    }

    // 设置配置
    brpc::ServerOptions options;    
    options.idle_timeout_sec = FLAGS_idle_timeout_s; 

    // 开启server(异步的)
    if(server.Start(point, &options) != 0) {
        LOG(ERROR) << "Fail to start EchoServer";
        return -1;
    }

    // 等待直到退出
    server.RunUntilAskedToQuit(); 
    return 0;

}

client 端

#include <cstdio>
#include <gflags/gflags.h>
#include <butil/logging.h>
#include <butil/time.h>
#include <brpc/channel.h>
#include "echo.pb.h"


DEFINE_string(attachment, "attach", "Carry this along with requests");
DEFINE_string(protocol, "baidu_std", "Protocol type. Defined in src/brpc/options.proto");
DEFINE_string(connection_type, "", "Connection type. Available values: single, pooled, short");
DEFINE_string(server, "0.0.0.0:8000", "IP Address of server");
DEFINE_string(load_balancer, "", "The algorithm for load balancing");
DEFINE_int32(timeout_ms, 100, "RPC timeout in milliseconds");
DEFINE_int32(max_retry, 3, "Max retries(not including the first RPC)"); 
DEFINE_int32(interval_ms, 1000, "Milliseconds between consecutive requests");


int main(int argc, char* argv[]) {
    GFLAGS_NS::ParseCommandLineFlags(&argc, &argv, true);

    // 配置
    brpc::ChannelOptions options;
    options.protocol = FLAGS_protocol;
    options.connection_type = FLAGS_connection_type;
    options.timeout_ms = FLAGS_timeout_ms/*milliseconds*/;
    options.max_retry = FLAGS_max_retry;

    // 初始化channel
    brpc::Channel channel;
    if((channel.Init(FLAGS_server.c_str(), FLAGS_load_balancer.c_str(), &options)) != 0) {
        LOG(ERROR) << "Fail to initialize channel";
        return -1;
    }

    // channel的封装类,线程间共享
    example::EchoService_Stub stub(&channel); 

    // 准备请求响应
    example::EchoRequest request;
    example::EchoResponse response;
    brpc::Controller cntl; 
    char buf[128];
    printf("请输入:");
    scanf("%s",buf);
    request.set_message(buf);

    // 捎带数据
    cntl.request_attachment().append(FLAGS_attachment);

    // Cluster 设置为空,表示同步执行,函数会阻塞,直到结果返回,或者超时
    stub.Echo(&cntl, &request, &response, NULL);
    if(cntl.Failed()) {
        LOG(WARNING) << cntl.ErrorText(); // 通常这只是WARNING,为了演示才直接返回
        return -1;
    }

    // 正确输出
    LOG(INFO) << "Received response from " << cntl.remote_side()
                << " to " << cntl.local_side()
                << ": " << response.message() << " (attached="
                << cntl.response_attachment() << ")"
                << " latency=" << cntl.latency_us() << "us";


}

Echo 异步客户端

server端代码与同步端端server一致。

client端

#include <cstdio>
#include <gflags/gflags.h>
#include <butil/logging.h>
#include <butil/time.h>
#include <brpc/channel.h>
#include <google/protobuf/stubs/callback.h>
#include "brpc/callback.h"
#include "bthread/bthread.h"
#include "echo.pb.h"


DEFINE_string(attachment, "attach", "Carry this along with requests");
DEFINE_string(protocol, "baidu_std", "Protocol type. Defined in src/brpc/options.proto");
DEFINE_string(connection_type, "", "Connection type. Available values: single, pooled, short");
DEFINE_string(server, "0.0.0.0:8000", "IP Address of server");
DEFINE_string(load_balancer, "", "The algorithm for load balancing");
DEFINE_int32(timeout_ms, 100, "RPC timeout in milliseconds");
DEFINE_int32(max_retry, 3, "Max retries(not including the first RPC)"); 
DEFINE_int32(interval_ms, 1000, "Milliseconds between consecutive requests");


void HandleResponse(brpc::Controller *cntl, 
                    example::EchoResponse* response) {
    // 异步任务通常在堆上分配Controller和 EchoResponse, 我这里为了简化直接在栈上分配的,并且生命周期比HandleResponse长,所以不用unique_ptr自动回收资源
    // std::unique_ptr<brpc::Controller> cntl_guard(cntl);
    // std::unique_ptr<example::EchoResponse> response_guard(response);

    if(cntl->Failed()) {
        LOG(ERROR) << "Fail to send EchoRequest, " << cntl->ErrorText();
        return;
    }

    // 故意等一段时间
    bthread_usleep(10);
    // 日志
     LOG(INFO) << "Received response from " << cntl->remote_side()
        << ": " << response->message() << " (attached="
        << cntl->response_attachment() << ")"
        << " latency=" << cntl->latency_us() << "us";
    
}

int main(int argc, char* argv[]) {
    GFLAGS_NS::ParseCommandLineFlags(&argc, &argv, true);

    // 配置
    brpc::ChannelOptions options;
    options.protocol = FLAGS_protocol;
    options.connection_type = FLAGS_connection_type;
    options.timeout_ms = FLAGS_timeout_ms/*milliseconds*/;
    options.max_retry = FLAGS_max_retry;

    // 初始化channel
    brpc::Channel channel;
    if((channel.Init(FLAGS_server.c_str(), FLAGS_load_balancer.c_str(), &options)) != 0) {
        LOG(ERROR) << "Fail to initialize channel";
        return -1;
    }

    // channel的封装类,线程间共享
    example::EchoService_Stub stub(&channel); 

    // 准备请求响应
    example::EchoRequest request;
    example::EchoResponse response;
    brpc::Controller cntl; 
    char buf[128];
    printf("请输入:");
    scanf("%s",buf);
    request.set_message(buf);

    // 捎带数据
    cntl.request_attachment().append(FLAGS_attachment);

    // 设置异步回调函数
    google::protobuf::Closure* done = brpc::NewCallback(&HandleResponse, &cntl, &response);
    
    // Cluster 非空,表示异步执行,接收完消息后调用done
    stub.Echo(&cntl, &request, &response, done);
    
    // 继续执行
    while(true) {
        bthread_usleep(5);
        printf("do something\n"); // 可以看到,stub.Echo是立即返回的,不影响后续执行
    }
    
}
### brpc 和 mprpc 的特点及差异 #### 性能表现 brpc 是百度开源的一个高性能 RPC 框架,支持多种协议和传输方式,在大规模分布式系统中有广泛应用。其性能经过大量测试验证,能够满足高并发场景下的需求[^1]。相比之下,mprpc 是一个轻量级的 RPC 框架,主要面向教学和小型项目开发,虽然也具备一定的扩展性和灵活性,但在复杂环境中的性能表现不如 brpc。 #### 功能特性 brpc 提供了丰富的功能模块,包括但不限于负载均衡、服务发现、流量控制以及日志记录等功能。它还内置了对 gRPC 协议的支持,并提供了灵活的插件机制以便开发者自定义行为[^2]。而 mprpc 则专注于基础通信能力,提供较为简单的 API 接口设计,适合初学者学习如何构建远程过程调用框架。 #### 易用性与社区支持 由于 brpc 背靠百度公司并拥有活跃的开源社区维护者群体,因此文档齐全且更新及时;同时还有许多第三方工具库可供选择以增强其实用价值[^3]。然而,对于刚接触 rpc 技术的新手来说,可能会觉得 brpc 学习曲线较陡峭。另一方面,尽管 mprpc 缺乏强大的企业背景支撑,但由于目标定位清晰&mdash;&mdash;作为入门教程的一部分存在&mdash;&mdash;所以整体架构简单明了,非常适合用来理解基本原理而不至于被过多细节干扰[^4]。 #### 开发语言偏好 值得注意的是两者所采用的主要编程语言存在一定区别:前者基于 C++ 实现,后者则完全由 Python 完成编码工作 。这意味着如果团队内部更倾向于使用静态类型编译型语言进行生产级别应用开发的话,则应优先考虑选用 brpc ;反之若是希望快速原型迭代或者仅仅为了教育目的练习相关技能 ,那么 python 版本的 mprpc 就显得更加合适一些[^5]。 ```cpp // 示例代码展示 brpc 如何设置服务器端逻辑处理函数 void EchoService(::google::protobuf::RpcController* cntl_base, const ::example::EchoRequest* request, ::example::EchoResponse* response, ::google::protobuf::Closure* done) { // Your implementation here... } ``` ```python # 对应地给出一段关于如何创建 MPRPC server 的例子 from mprpc import RPCServer class MyHandler(RPCServer): def echo(self, message): return f"Received: {message}" server = MyHandler(('localhost', 8080)) try: print('Use Control-C to exit') server.serve_forever() except KeyboardInterrupt: print('\nExiting ...') ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值