1
0
mirror of https://github.com/libuv/libuv synced 2025-03-28 21:13:16 +00:00

linux: always use io_uring for epoll batching (#4638)

io_uring support was default-disabled because of numerous kernel bugs
but those are all in the sqpoll (file i/o) parts of io_uring.

Batching of epoll_ctl calls through io_uring works fine, is a nice
optimization, and is therefore unconditionally enabled again.

The UV_USE_IO_URING environment variable now only affects sqpoll, and
only when the UV_LOOP_ENABLE_IO_URING_SQPOLL event loop flag is set.

Fixes: https://github.com/libuv/libuv/issues/4616
This commit is contained in:
Ben Noordhuis 2024-12-06 00:11:05 +01:00 committed by GitHub
parent c431bc39c3
commit 69bad8201b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -455,7 +455,7 @@ int uv__io_uring_register(int fd, unsigned opcode, void* arg, unsigned nargs) {
}
static int uv__use_io_uring(void) {
static int uv__use_io_uring(uint32_t flags) {
#if defined(__ANDROID_API__)
return 0; /* Possibly available but blocked by seccomp. */
#elif defined(__arm__) && __SIZEOF_POINTER__ == 4
@ -470,25 +470,27 @@ static int uv__use_io_uring(void) {
char* val;
int use;
#if defined(__hppa__)
/* io_uring first supported on parisc in 6.1, functional in .51
* https://lore.kernel.org/all/cb912694-b1fe-dbb0-4d8c-d608f3526905@gmx.de/
*/
if (uv__kernel_version() < /*6.1.51*/0x060133)
return 0;
#endif
/* SQPOLL is all kinds of buggy but epoll batching should work fine. */
if (0 == (flags & UV__IORING_SETUP_SQPOLL))
return 1;
/* Older kernels have a bug where the sqpoll thread uses 100% CPU. */
if (uv__kernel_version() < /*5.10.186*/0x050ABA)
return 0;
use = atomic_load_explicit(&use_io_uring, memory_order_relaxed);
if (use == 0) {
use = uv__kernel_version() >=
#if defined(__hppa__)
/* io_uring first supported on parisc in 6.1, functional in .51 */
/* https://lore.kernel.org/all/cb912694-b1fe-dbb0-4d8c-d608f3526905@gmx.de/ */
/* 6.1.51 */ 0x060133
#else
/* Older kernels have a bug where the sqpoll thread uses 100% CPU. */
/* 5.10.186 */ 0x050ABA
#endif
? 1 : -1;
/* But users can still enable it if they so desire. */
val = getenv("UV_USE_IO_URING");
if (val != NULL)
use = atoi(val) ? 1 : -1;
use = val != NULL && atoi(val) > 0 ? 1 : -1;
atomic_store_explicit(&use_io_uring, use, memory_order_relaxed);
}
@ -518,7 +520,7 @@ static void uv__iou_init(int epollfd,
sq = MAP_FAILED;
sqe = MAP_FAILED;
if (!uv__use_io_uring())
if (!uv__use_io_uring(flags))
return;
kernel_version = uv__kernel_version();
@ -766,14 +768,13 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou,
*/
if (iou->ringfd == -2) {
/* By default, the SQPOLL is not created. Enable only if the loop is
* configured with UV_LOOP_USE_IO_URING_SQPOLL.
* configured with UV_LOOP_USE_IO_URING_SQPOLL and the UV_USE_IO_URING
* environment variable is unset or a positive number.
*/
if ((loop->flags & UV_LOOP_ENABLE_IO_URING_SQPOLL) == 0) {
iou->ringfd = -1;
return NULL;
}
if (loop->flags & UV_LOOP_ENABLE_IO_URING_SQPOLL)
if (uv__use_io_uring(UV__IORING_SETUP_SQPOLL))
uv__iou_init(loop->backend_fd, iou, 64, UV__IORING_SETUP_SQPOLL);
uv__iou_init(loop->backend_fd, iou, 64, UV__IORING_SETUP_SQPOLL);
if (iou->ringfd == -2)
iou->ringfd = -1; /* "failed" */
}