From 44423f45966795c51ebc7f1ee4e18d8f38ed0cb5 Mon Sep 17 00:00:00 2001 From: Daniel Collins Date: Wed, 24 Oct 2018 00:57:09 +0100 Subject: [PATCH] Avoid spinning the workers whenever a peer is connected. Calling WSAEventSelect() causes the event to become signalled even if there is nothing new, probably due to the socket being writable. Due to this, io_peer_recv() signals the event object whenever claiming recv_busy just to check for pending data, and again when releasing it, which leads to another worker immediately waking up and claiming it to check for new messages, starting the cycle again. Only claim recv_busy and mask FD_RECV events if some data is actually read from the socket. --- src/DirectPlay8Peer.cpp | 74 ++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 38 deletions(-) diff --git a/src/DirectPlay8Peer.cpp b/src/DirectPlay8Peer.cpp index 100ce81..5559d17 100644 --- a/src/DirectPlay8Peer.cpp +++ b/src/DirectPlay8Peer.cpp @@ -2514,32 +2514,39 @@ void DirectPlay8Peer::io_peer_recv(std::unique_lock &l, unsigned int while((peer = get_peer_by_peer_id(peer_id)) != NULL) { - if(!rb_claimed) + if(!rb_claimed && peer->recv_busy) { - if(peer->recv_busy) - { - /* Another thread is already processing data from this socket. - * - * Only one thread at a time is allowed to handle reads, even when the - * other thread is in the application callback, so that the order of - * messages is preserved. - */ - return; - } - else{ - /* No other thread is processing data from this peer, we shall - * claim the throne and temporarily disable FD_READ events from it - * to avoid other workers spinning against the recv_busy lock. - */ - - peer->recv_busy = true; - rb_claimed = true; - - peer->disable_events(FD_READ | FD_CLOSE); - } + /* Another thread is already processing data from this socket. + * + * Only one thread at a time is allowed to handle reads, even when the + * other thread is in the application callback, so that the order of + * messages is preserved. + */ + return; } int r = recv(peer->sock, (char*)(peer->recv_buf) + peer->recv_buf_cur, sizeof(peer->recv_buf) - peer->recv_buf_cur, 0); + DWORD err = WSAGetLastError(); + + if(r < 0 && err == WSAEWOULDBLOCK) + { + /* Nothing to read. */ + break; + } + + if(!rb_claimed) + { + /* No other thread is processing data from this peer, we shall + * claim the throne and temporarily disable FD_READ events from it + * to avoid other workers spinning against the recv_busy lock. + */ + + peer->recv_busy = true; + rb_claimed = true; + + peer->disable_events(FD_READ | FD_CLOSE); + } + if(r == 0) { /* When the remote end initiates a graceful close, it will no longer @@ -2551,22 +2558,13 @@ void DirectPlay8Peer::io_peer_recv(std::unique_lock &l, unsigned int } else if(r < 0) { - DWORD err = WSAGetLastError(); + /* Read error. */ - if(err == WSAEWOULDBLOCK) - { - /* Nothing to read */ - break; - } - else{ - /* Read error. */ - - log_printf("Read error on peer %u: %s", peer_id, win_strerror(err).c_str()); - log_printf("Closing connection"); - - peer_destroy(l, peer_id, DPNERR_CONNECTIONLOST, DPNDESTROYPLAYERREASON_CONNECTIONLOST); - return; - } + log_printf("Read error on peer %u: %s", peer_id, win_strerror(err).c_str()); + log_printf("Closing connection"); + + peer_destroy(l, peer_id, DPNERR_CONNECTIONLOST, DPNDESTROYPLAYERREASON_CONNECTIONLOST); + return; } if(peer->state == Peer::PS_CLOSING) @@ -2715,7 +2713,7 @@ void DirectPlay8Peer::io_peer_recv(std::unique_lock &l, unsigned int } } - if(peer != NULL) + if(peer != NULL && rb_claimed) { peer->enable_events(FD_READ | FD_CLOSE); peer->recv_busy = false;