mirror of
https://github.com/libuv/libuv
synced 2025-03-28 21:13:16 +00:00
linux: eliminate a read on eventfd per wakeup (#4400)
Register the eventfd with EPOLLET to enable edge-triggered notification where we're able to eliminate the overhead of reading the eventfd via system call on each wakeup event. When the eventfd counter reaches the maximum value of the unsigned 64-bit, which may not happen for the entire lifetime of the process, we rewind the counter and retry. This optimization saves one system call on each event-loop wakeup, eliminating the overhead of read(2) as well as the extra latency for each epoll wakeup.
This commit is contained in:
parent
63b22be083
commit
e5cb1d3d3d
@ -130,8 +130,10 @@ void uv__async_close(uv_async_t* handle) {
|
||||
|
||||
|
||||
static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
#ifndef __linux__
|
||||
char buf[1024];
|
||||
ssize_t r;
|
||||
#endif
|
||||
struct uv__queue queue;
|
||||
struct uv__queue* q;
|
||||
uv_async_t* h;
|
||||
@ -139,6 +141,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
|
||||
assert(w == &loop->async_io_watcher);
|
||||
|
||||
#ifndef __linux__
|
||||
for (;;) {
|
||||
r = read(w->fd, buf, sizeof(buf));
|
||||
|
||||
@ -156,6 +159,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
|
||||
abort();
|
||||
}
|
||||
#endif /* !__linux__ */
|
||||
|
||||
uv__queue_move(&loop->async_handles, &queue);
|
||||
while (!uv__queue_empty(&queue)) {
|
||||
@ -179,34 +183,45 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
|
||||
|
||||
static void uv__async_send(uv_loop_t* loop) {
|
||||
const void* buf;
|
||||
ssize_t len;
|
||||
int fd;
|
||||
int r;
|
||||
ssize_t r;
|
||||
#ifdef __linux__
|
||||
uint64_t val;
|
||||
|
||||
buf = "";
|
||||
len = 1;
|
||||
fd = loop->async_wfd;
|
||||
|
||||
#if defined(__linux__)
|
||||
if (fd == -1) {
|
||||
static const uint64_t val = 1;
|
||||
buf = &val;
|
||||
len = sizeof(val);
|
||||
fd = loop->async_io_watcher.fd; /* eventfd */
|
||||
fd = loop->async_io_watcher.fd; /* eventfd */
|
||||
for (val = 1; /* empty */; val = 1) {
|
||||
r = write(fd, &val, sizeof(uint64_t));
|
||||
if (r < 0) {
|
||||
/* When EAGAIN occurs, the eventfd counter hits the maximum value of the unsigned 64-bit.
|
||||
* We need to first drain the eventfd and then write again.
|
||||
*
|
||||
* Check out https://man7.org/linux/man-pages/man2/eventfd.2.html for details.
|
||||
*/
|
||||
if (errno == EAGAIN) {
|
||||
/* It's ready to retry. */
|
||||
if (read(fd, &val, sizeof(uint64_t)) > 0 || errno == EAGAIN) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/* Unknown error occurs. */
|
||||
break;
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
|
||||
fd = loop->async_wfd; /* write end of the pipe */
|
||||
do
|
||||
r = write(fd, buf, len);
|
||||
r = write(fd, "x", 1);
|
||||
while (r == -1 && errno == EINTR);
|
||||
|
||||
if (r == len)
|
||||
if (r == 1)
|
||||
return;
|
||||
|
||||
if (r == -1)
|
||||
if (errno == EAGAIN || errno == EWOULDBLOCK)
|
||||
return;
|
||||
#endif
|
||||
|
||||
abort();
|
||||
}
|
||||
|
@ -1385,6 +1385,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
|
||||
w->events = w->pevents;
|
||||
e.events = w->pevents;
|
||||
if (w == &loop->async_io_watcher)
|
||||
/* Enable edge-triggered mode on async_io_watcher(eventfd),
|
||||
* so that we're able to eliminate the overhead of reading
|
||||
* the eventfd via system call on each event loop wakeup.
|
||||
*/
|
||||
e.events |= EPOLLET;
|
||||
e.data.fd = w->fd;
|
||||
fd = w->fd;
|
||||
|
||||
@ -1632,12 +1638,12 @@ int uv_resident_set_memory(size_t* rss) {
|
||||
long val;
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
|
||||
/* rss: 24th element */
|
||||
rc = uv__slurp("/proc/self/stat", buf, sizeof(buf));
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
|
||||
/* find the last ')' */
|
||||
s = strrchr(buf, ')');
|
||||
if (s == NULL)
|
||||
@ -2256,7 +2262,7 @@ uint64_t uv_get_available_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
static int uv__get_cgroupv2_constrained_cpu(const char* cgroup,
|
||||
static int uv__get_cgroupv2_constrained_cpu(const char* cgroup,
|
||||
uv__cpu_constraint* constraint) {
|
||||
char path[256];
|
||||
char buf[1024];
|
||||
@ -2267,7 +2273,7 @@ static int uv__get_cgroupv2_constrained_cpu(const char* cgroup,
|
||||
|
||||
if (strncmp(cgroup, "0::/", 4) != 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
|
||||
/* Trim ending \n by replacing it with a 0 */
|
||||
cgroup_trimmed = cgroup + sizeof("0::/") - 1; /* Skip the prefix "0::/" */
|
||||
cgroup_size = (int)strcspn(cgroup_trimmed, "\n"); /* Find the first slash */
|
||||
@ -2319,7 +2325,7 @@ static char* uv__cgroup1_find_cpu_controller(const char* cgroup,
|
||||
return cgroup_cpu;
|
||||
}
|
||||
|
||||
static int uv__get_cgroupv1_constrained_cpu(const char* cgroup,
|
||||
static int uv__get_cgroupv1_constrained_cpu(const char* cgroup,
|
||||
uv__cpu_constraint* constraint) {
|
||||
char path[256];
|
||||
char buf[1024];
|
||||
@ -2337,8 +2343,8 @@ static int uv__get_cgroupv1_constrained_cpu(const char* cgroup,
|
||||
cgroup_size, cgroup_cpu);
|
||||
|
||||
if (uv__slurp(path, buf, sizeof(buf)) < 0)
|
||||
return UV_EIO;
|
||||
|
||||
return UV_EIO;
|
||||
|
||||
if (sscanf(buf, "%lld", &constraint->quota_per_period) != 1)
|
||||
return UV_EINVAL;
|
||||
|
||||
@ -2360,7 +2366,7 @@ static int uv__get_cgroupv1_constrained_cpu(const char* cgroup,
|
||||
/* Read cpu.shares */
|
||||
if (uv__slurp(path, buf, sizeof(buf)) < 0)
|
||||
return UV_EIO;
|
||||
|
||||
|
||||
if (sscanf(buf, "%u", &shares) != 1)
|
||||
return UV_EINVAL;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user