mirror of
https://github.com/libuv/libuv
synced 2025-03-28 21:13:16 +00:00
unix: remove busy loop from uv_async_send (#3879)
The current fix (libuv#2231) was found to be slow in certain cases. This change should improve scalabaility a bit by only incurring the spin loop delay while closing an UV_ASYNC. It also is intended to slightly improve the behavior after uv_loop_close is called, by parking all of the pending flags as set, so that it will not access the loop at all (until the uv_async_t memory is freed, which we leave still to the responsibility of the user). Note that this bug appears to still exist on Win32, though it's harder to address without the refactoring done to this code on libuv master. Takes some inspiration from https://github.com/libuv/libuv/pull/2654 Takes some inspiration from https://github.com/libuv/libuv/pull/2656 Refs: https://github.com/libuv/libuv/pull/2231
This commit is contained in:
parent
a40058dbd1
commit
fe7ee4a624
123
src/unix/async.c
123
src/unix/async.c
@ -53,6 +53,7 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
|
||||
uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
|
||||
handle->async_cb = async_cb;
|
||||
handle->pending = 0;
|
||||
handle->u.fd = 0; /* This will be used as a busy flag. */
|
||||
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
|
||||
uv__handle_start(handle);
|
||||
@ -63,53 +64,50 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
|
||||
|
||||
int uv_async_send(uv_async_t* handle) {
|
||||
_Atomic int* pending;
|
||||
int expected;
|
||||
_Atomic int* busy;
|
||||
|
||||
pending = (_Atomic int*) &handle->pending;
|
||||
busy = (_Atomic int*) &handle->u.fd;
|
||||
|
||||
/* Do a cheap read first. */
|
||||
if (atomic_load_explicit(pending, memory_order_relaxed) != 0)
|
||||
return 0;
|
||||
|
||||
/* Tell the other thread we're busy with the handle. */
|
||||
expected = 0;
|
||||
if (!atomic_compare_exchange_strong(pending, &expected, 1))
|
||||
return 0;
|
||||
/* Set the loop to busy. */
|
||||
atomic_fetch_add(busy, 1);
|
||||
|
||||
/* Wake up the other thread's event loop. */
|
||||
uv__async_send(handle->loop);
|
||||
if (atomic_exchange(pending, 1) == 0)
|
||||
uv__async_send(handle->loop);
|
||||
|
||||
/* Tell the other thread we're done. */
|
||||
expected = 1;
|
||||
if (!atomic_compare_exchange_strong(pending, &expected, 2))
|
||||
abort();
|
||||
/* Set the loop to not-busy. */
|
||||
atomic_fetch_add(busy, -1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Only call this from the event loop thread. */
|
||||
static int uv__async_spin(uv_async_t* handle) {
|
||||
/* Wait for the busy flag to clear before closing.
|
||||
* Only call this from the event loop thread. */
|
||||
static void uv__async_spin(uv_async_t* handle) {
|
||||
_Atomic int* pending;
|
||||
int expected;
|
||||
_Atomic int* busy;
|
||||
int i;
|
||||
|
||||
pending = (_Atomic int*) &handle->pending;
|
||||
busy = (_Atomic int*) &handle->u.fd;
|
||||
|
||||
/* Set the pending flag first, so no new events will be added by other
|
||||
* threads after this function returns. */
|
||||
atomic_store(pending, 1);
|
||||
|
||||
for (;;) {
|
||||
/* 997 is not completely chosen at random. It's a prime number, acyclical
|
||||
* by nature, and should therefore hopefully dampen sympathetic resonance.
|
||||
/* 997 is not completely chosen at random. It's a prime number, acyclic by
|
||||
* nature, and should therefore hopefully dampen sympathetic resonance.
|
||||
*/
|
||||
for (i = 0; i < 997; i++) {
|
||||
/* rc=0 -- handle is not pending.
|
||||
* rc=1 -- handle is pending, other thread is still working with it.
|
||||
* rc=2 -- handle is pending, other thread is done.
|
||||
*/
|
||||
expected = 2;
|
||||
atomic_compare_exchange_strong(pending, &expected, 0);
|
||||
|
||||
if (expected != 1)
|
||||
return expected;
|
||||
if (atomic_load(busy) == 0)
|
||||
return;
|
||||
|
||||
/* Other thread is busy with this handle, spin until it's done. */
|
||||
uv__cpu_relax();
|
||||
@ -137,6 +135,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
uv_async_t* h;
|
||||
_Atomic int *pending;
|
||||
|
||||
assert(w == &loop->async_io_watcher);
|
||||
|
||||
@ -166,8 +165,10 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, q);
|
||||
|
||||
if (0 == uv__async_spin(h))
|
||||
continue; /* Not pending. */
|
||||
/* Atomically fetch and clear pending flag */
|
||||
pending = (_Atomic int*) &h->pending;
|
||||
if (atomic_exchange(pending, 0) == 0)
|
||||
continue;
|
||||
|
||||
if (h->async_cb == NULL)
|
||||
continue;
|
||||
@ -239,20 +240,28 @@ static int uv__async_start(uv_loop_t* loop) {
|
||||
}
|
||||
|
||||
|
||||
int uv__async_fork(uv_loop_t* loop) {
|
||||
if (loop->async_io_watcher.fd == -1) /* never started */
|
||||
return 0;
|
||||
|
||||
uv__async_stop(loop);
|
||||
|
||||
return uv__async_start(loop);
|
||||
}
|
||||
|
||||
|
||||
void uv__async_stop(uv_loop_t* loop) {
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
uv_async_t* h;
|
||||
|
||||
if (loop->async_io_watcher.fd == -1)
|
||||
return;
|
||||
|
||||
/* Make sure no other thread is accessing the async handle fd after the loop
|
||||
* cleanup.
|
||||
*/
|
||||
QUEUE_MOVE(&loop->async_handles, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
h = QUEUE_DATA(q, uv_async_t, queue);
|
||||
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, q);
|
||||
|
||||
uv__async_spin(h);
|
||||
}
|
||||
|
||||
if (loop->async_wfd != -1) {
|
||||
if (loop->async_wfd != loop->async_io_watcher.fd)
|
||||
uv__close(loop->async_wfd);
|
||||
@ -265,6 +274,48 @@ void uv__async_stop(uv_loop_t* loop) {
|
||||
}
|
||||
|
||||
|
||||
int uv__async_fork(uv_loop_t* loop) {
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
uv_async_t* h;
|
||||
|
||||
if (loop->async_io_watcher.fd == -1) /* never started */
|
||||
return 0;
|
||||
|
||||
QUEUE_MOVE(&loop->async_handles, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
h = QUEUE_DATA(q, uv_async_t, queue);
|
||||
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, q);
|
||||
|
||||
/* The state of any thread that set pending is now likely corrupt in this
|
||||
* child because the user called fork, so just clear these flags and move
|
||||
* on. Calling most libc functions after `fork` is declared to be undefined
|
||||
* behavior anyways, unless async-signal-safe, for multithreaded programs
|
||||
* like libuv, and nothing interesting in pthreads is async-signal-safe.
|
||||
*/
|
||||
h->pending = 0;
|
||||
/* This is the busy flag, and we just abruptly lost all other threads. */
|
||||
h->u.fd = 0;
|
||||
}
|
||||
|
||||
/* Recreate these, since they still exist, but belong to the wrong pid now. */
|
||||
if (loop->async_wfd != -1) {
|
||||
if (loop->async_wfd != loop->async_io_watcher.fd)
|
||||
uv__close(loop->async_wfd);
|
||||
loop->async_wfd = -1;
|
||||
}
|
||||
|
||||
uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
|
||||
uv__close(loop->async_io_watcher.fd);
|
||||
loop->async_io_watcher.fd = -1;
|
||||
|
||||
return uv__async_start(loop);
|
||||
}
|
||||
|
||||
|
||||
static void uv__cpu_relax(void) {
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
|
||||
|
Loading…
x
Reference in New Issue
Block a user