1
0
mirror of https://github.com/libuv/libuv synced 2025-03-28 21:13:16 +00:00

sunos: restore use of event ports

The sunos platform currently covers at least the Solaris and illumos
operating systems. Although these diverged 11 years ago they still share
some common features such as support for event ports.

illumos also has a compatibility wrapper for epoll but this is not
recommended for use over event ports. From the NOTES section of
https://illumos.org/man/5/epoll:

	The epoll facility is implemented for purposes of offering
	compatibility to and portability of Linux-borne
	applications; native applications should continue to prefer
	using event ports... In particular, use of epoll in a
	multithreaded environment is fraught with peril...

Restore the event ports code so that libuv can continue to be used
on Solaris, and to avoid the problems that come with using epoll()
on illumos. The separation of epoll into src/unix/epoll.c has been
retained.

Fixes: https://github.com/libuv/libuv/issues/3241
PR-URL: https://github.com/libuv/libuv/pull/3242
Reviewed-By: Jameson Nash <vtjnash@gmail.com>
This commit is contained in:
Andy Fiddaman 2021-07-20 02:19:24 +00:00 committed by GitHub
parent 8ea8f12438
commit 5fe597268e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 276 additions and 5 deletions

View File

@ -329,8 +329,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
list(APPEND uv_libraries kstat nsl sendfile socket)
list(APPEND uv_sources
src/unix/no-proctitle.c
src/unix/sunos.c
src/unix/epoll.c)
src/unix/sunos.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "Haiku")

View File

@ -519,7 +519,6 @@ libuv_la_CFLAGS += -D__EXTENSIONS__ \
-D_XOPEN_SOURCE=500 \
-D_REENTRANT
libuv_la_SOURCES += src/unix/no-proctitle.c \
src/unix/epoll.c \
src/unix/sunos.c
endif

View File

@ -65,9 +65,24 @@
int uv__platform_loop_init(uv_loop_t* loop) {
loop->fs_fd = -1;
int err;
int fd;
return uv__epoll_init(loop);
loop->fs_fd = -1;
loop->backend_fd = -1;
fd = port_create();
if (fd == -1)
return UV__ERR(errno);
err = uv__cloexec(fd, 1);
if (err) {
uv__close(fd);
return err;
}
loop->backend_fd = fd;
return 0;
}
@ -96,6 +111,264 @@ int uv__io_fork(uv_loop_t* loop) {
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct port_event* events;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
assert(fd >= 0);
events = (struct port_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events == NULL)
return;
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].portev_object == fd)
events[i].portev_object = -1;
}
int uv__io_check_fd(uv_loop_t* loop, int fd) {
if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0))
return UV__ERR(errno);
if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd)) {
perror("(libuv) port_dissociate()");
abort();
}
return 0;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct port_event events[1024];
struct port_event* pe;
struct timespec spec;
QUEUE* q;
uv__io_t* w;
sigset_t* pset;
sigset_t set;
uint64_t base;
uint64_t diff;
uint64_t idle_poll;
unsigned int nfds;
unsigned int i;
int saved_errno;
int have_signals;
int nevents;
int count;
int err;
int fd;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
if (port_associate(loop->backend_fd,
PORT_SOURCE_FD,
w->fd,
w->pevents,
0)) {
perror("(libuv) port_associate()");
abort();
}
w->events = w->pevents;
}
pset = NULL;
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
pset = &set;
sigemptyset(pset);
sigaddset(pset, SIGPROF);
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
}
/* Work around a kernel bug where nfds is not updated. */
events[0].portev_source = 0;
nfds = 1;
saved_errno = 0;
if (pset != NULL)
pthread_sigmask(SIG_BLOCK, pset, NULL);
err = port_getn(loop->backend_fd,
events,
ARRAY_SIZE(events),
&nfds,
timeout == -1 ? NULL : &spec);
if (pset != NULL)
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
if (err) {
/* Work around another kernel bug: port_getn() may return events even
* on error.
*/
if (errno == EINTR || errno == ETIME) {
saved_errno = errno;
} else {
perror("(libuv) port_getn()");
abort();
}
}
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (events[0].portev_source == 0) {
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
goto update_timeout;
}
if (nfds == 0) {
assert(timeout != -1);
return;
}
have_signals = 0;
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->portev_object;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
/* File descriptor that we've stopped watching, ignore. */
if (w == NULL)
continue;
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher) {
have_signals = 1;
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->portev_events);
}
nevents++;
if (w != loop->watchers[fd])
continue; /* Disabled by callback. */
/* Events Ports operates in oneshot mode, rearm timer on next run. */
if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue))
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
}
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (have_signals != 0)
return; /* Event loop should cycle now so don't poll again. */
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (saved_errno == ETIME) {
assert(timeout != -1);
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
timeout -= diff;
}
}
uint64_t uv__hrtime(uv_clocktype_t type) {
return gethrtime();
}