Linux: inter process communication by tcp 127.0.0.1 on the same node very slow

1

Tcp communication is very slow by 127.0.0.1 or eth IP(eg:10.10.253.12) on the same host. server listen on 0.0.0.0:2000, client connect to 127.0.0.1:2000 or local eth ip:10.10.253.12:2000, CS transfer speed only 100KB per second. Program written by C using libevent and Java using Netty have the same effect, program written as:

  1. server accept connect, echo everything it received.
  2. client sends arbitrary 128-byte data, when socket writable, send another 128-byte data; read and discard what it received.

This couple client\server program works fine if run on a different machine, the speed at 30MB per second.

But zeromq pair communication by 127.0.0.1 has no such issue.

Server side code is:

---start listener
struct evconnlistener *listener = evconnlistener_new_bind(leader,
    listener_cb, NULL,
    LEV_OPT_REUSEABLE | LEV_OPT_CLOSE_ON_FREE, s_backlog, &addr,
    addrlen);
if (!listener) {
    logit("Could not create a listener!");
    return 1;
}
int fd = evconnlistener_get_fd(listener);
int keepAlive = 0; // 非0值,开启keepalive属性
setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&keepAlive, sizeof(keepAlive));
do{
    if (event_base_loop(leader, EVLOOP_NO_EXIT_ON_EMPTY) < 0){
        break;
    }
}while(!event_base_got_exit(leader));

---connect processing
static void listener_cb(struct evconnlistener *listener, evutil_socket_t fd, struct sockaddr *sa, int socklen, void *user_data) {
    if (s_rcvbufsize > 0){
        setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (void *)&s_rcvbufsize, sizeof(s_rcvbufsize));
    }
    if (s_sndbufsize > 0){
        setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (void *)&s_sndbufsize, sizeof(s_sndbufsize));
    }
    setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char*)&s_tcpnodelay, sizeof(s_tcpnodelay));
    int keepAlive = 0;    // 非0值,开启keepalive属性
    setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&keepAlive, sizeof(keepAlive));

    struct bufferevent *bev = bufferevent_socket_new(s_worker, fd, BEV_OPT_CLOSE_ON_FREE|BEV_OPT_THREADSAFE);
    if (!bev) {
        logit("Error constructing bufferevent!");
        evutil_closesocket(fd);
        return;
    }
    bufferevent_setcb(bev, conn_readcb, conn_writecb, conn_eventcb, NULL);
    bufferevent_enable(bev, EV_READ);
}

---read\write processing
static void conn_writecb(struct bufferevent *bev, void *user_data) {
}
static void conn_readcb(struct bufferevent *bev, void *user_data) {
    struct evbuffer *input = bufferevent_get_input(bev);
    int len = evbuffer_get_length(input);

    struct evbuffer *output = bufferevent_get_output(bev);
    evbuffer_add_buffer(output, input);
}

Client side code is:

---init connection
struct bufferevent* bev= bufferevent_socket_new(s_event_base, -1, BEV_OPT_CLOSE_ON_FREE|BEV_OPT_THREADSAFE);
if (!bev){
    return 1;
}
struct timeval tv;
tv.tv_sec = 30; //connect timeout
tv.tv_usec = 0;
bufferevent_set_timeouts(bev, NULL, &tv);
bufferevent_setcb(bev, NULL, NULL, connect_eventcb, (void*)s_event_base);
int flag = bufferevent_socket_connect(bev, &s_target_sockaddr, s_target_socklen);
if (-1 == flag ){
    bufferevent_free(bev);
    return 1;
}

---connected processing
static void connect_eventcb(struct bufferevent *bev, short events, void *user_data) {
    if (events & (BEV_EVENT_EOF | BEV_EVENT_ERROR | BEV_EVENT_TIMEOUT)){
        bufferevent_free(bev);
    }else if (events & BEV_EVENT_CONNECTED) {
        int fd = bufferevent_getfd(bev);
        if (s_sorcvbufsize > 0){
            setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (void *)&s_sorcvbufsize, sizeof(s_sorcvbufsize));
        }
        if (s_sosndbufsize > 0){
            setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (void *)&s_sosndbufsize, sizeof(s_sosndbufsize));
        }
        setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char*)&s_tcpnodelay, sizeof(s_tcpnodelay));
        int keepAlive = 0;    // 非0值,开启keepalive属性
        setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&keepAlive, sizeof(keepAlive));

        bufferevent_setwatermark(bev, EV_WRITE, s_snd_wmark_l, s_snd_wmark_h);
        bufferevent_setcb(bev, conn_readcb, conn_writecb, conn_eventcb, NULL);

        bufferevent_enable(bev, EV_READ|EV_WRITE);
        bufferevent_trigger(bev, EV_WRITE, BEV_TRIG_IGNORE_WATERMARKS|BEV_OPT_DEFER_CALLBACKS);
    }
}

---read/write processing
static void conn_writecb(struct bufferevent *bev, void *user_data) {
    struct evbuffer *output = bufferevent_get_output(bev);
    for (int len = evbuffer_get_length(output); len < s_snd_wmark_h; len += s_sendsize){
        if (0 != bufferevent_write(bev, s_send_buf, s_sendsize)){
            break;
        }
    }
}
static void conn_readcb(struct bufferevent *bev, void *user_data) {
    struct evbuffer *input = bufferevent_get_input(bev);
    evbuffer_drain(input, 0x7FFFFFFF);
}

Tshark capture shows there is many KeepAliveReq no matter how SO_KEEPALIVE is setted: Tshark capture result 1

Tshark capture result 2

networking
server
tcp
client
loopback
asked on Stack Overflow Jan 11, 2021 by ljn • edited Jan 11, 2021 by Hasip Timurtas

1 Answer

0

I tested and resolved it now: the main reason is send buffer size of the server side is two small(8K), then the recv size, which cause server send congest.

When I adjust both buffer size to 32K, the problem disappeared.

answered on Stack Overflow Jan 12, 2021 by ljn

User contributions licensed under CC BY-SA 3.0