1
0
mirror of https://github.com/libuv/libuv synced 2025-03-28 21:13:16 +00:00

kqueue: DRY file descriptor deletion logic (#3746)

Remove the TODO that says to batch up kevent(EV_DELETE) system calls.
It's unduly complicated in case of errors.

If you pass an out array to kevent(), it will store error records that
tell you which file descriptor caused the error but it also stores new
events in the array, complicating state management.

If you don't pass an out array, it simply doesn't tell you anything
except that _something_ failed. Optimistically trying batch deletion
and falling back to one-by-one deletion is probably possible but is
something of a deoptimization and also feels somewhat dangerous.

macOS has a mostly-undocumented kevent_qos() system call that accepts a
KEVENT_FLAG_ERROR_EVENTS flag that fixes the first problem but that of
course isn't portable.

Long story short, it seems like too much hassle for too little payoff.
Libuv has been doing one-by-one deletion for over a decade now and no
one complained about performance so far so let's just stick with that.
This commit is contained in:
Ben Noordhuis 2022-09-12 22:59:43 +02:00 committed by GitHub
parent 9d898acc56
commit 0f478a7de7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -109,6 +109,21 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
}
static void uv__kqueue_delete(int kqfd, const struct kevent *ev) {
struct kevent change;
EV_SET(&change, ev->ident, ev->filter, EV_DELETE, 0, 0, 0);
if (0 == kevent(kqfd, &change, 1, NULL, 0, NULL))
return;
if (errno == EBADF || errno == ENOENT)
return;
abort();
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct kevent events[1024];
struct kevent* ev;
@ -307,15 +322,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it.
* TODO: batch up. */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != EBADF && errno != ENOENT)
abort();
/* File descriptor that we've stopped watching, disarm it. */
uv__kqueue_delete(loop->backend_fd, ev);
continue;
}
@ -331,44 +339,27 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
revents = 0;
if (ev->filter == EVFILT_READ) {
if (w->pevents & POLLIN) {
if (w->pevents & POLLIN)
revents |= POLLIN;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
else
uv__kqueue_delete(loop->backend_fd, ev);
if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
revents |= UV__POLLRDHUP;
}
if (ev->filter == EV_OOBAND) {
if (w->pevents & UV__POLLPRI) {
if (w->pevents & UV__POLLPRI)
revents |= UV__POLLPRI;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
else
uv__kqueue_delete(loop->backend_fd, ev);
}
if (ev->filter == EVFILT_WRITE) {
if (w->pevents & POLLOUT) {
if (w->pevents & POLLOUT)
revents |= POLLOUT;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
else
uv__kqueue_delete(loop->backend_fd, ev);
}
if (ev->flags & EV_ERROR)