1
0
mirror of https://github.com/libuv/libuv synced 2025-03-28 21:13:16 +00:00

core: add API to measure event loop idle time

The API addition `uv_metrics_idle_time()` is a thread safe call that
allows the user to retrieve the amount of time the event loop has spent
in the kernel's event provider (i.e. poll). It was done this way to
allow retrieving this value without needing to interrupt the execution
of the event loop. This option can be enabled by passing
`UV_METRICS_IDLE_TIME` to `uv_loop_configure()`.

One important aspect of this change is, when enabled, to always first
call the event provider with a `timeout == 0`. This allows libuv to know
whether any events were waiting in the event queue when the event
provider was called. The importance of this is because libuv is tracking
the amount of "idle time", not "poll time". Thus the provider entry time
is not recorded when `timeout == 0` (the event provider never idles in
this case).

While this does add a small amount of overhead, when enabled, but the
overhead decreases when the event loop has a heavier load. This is
because poll events will be waiting when the event provider is called.
Thus never actually recording the provider entry time.

Checking if `uv_loop_t` is configured with `UV_METRICS_IDLE_TIME` always
happens in `uv__metrics_set_provider_entry_time()` and
`uv__metrics_update_idle_time()`. Making the conditional logic wrapping
each call simpler and allows for instrumentation to always hook into
those two function calls.

Rather than placing the fields directly on `uv__loop_internal_fields_t`
add the struct `uv__loop_metrics_t` as a location for future metrics API
additions.

Tests and additional documentation has been included.

PR-URL: https://github.com/libuv/libuv/pull/2725
Reviewed-By: Fedor Indutny <fedor.indutny@gmail.com>
Reviewed-By: Santiago Gimeno <santiago.gimeno@gmail.com>
Reviewed-By: Jameson Nash <vtjnash@gmail.com>
This commit is contained in:
Trevor Norris 2020-03-26 17:15:13 -06:00 committed by Jameson Nash
parent 70bbc093f1
commit e8effd4556
19 changed files with 572 additions and 15 deletions

View File

@ -418,6 +418,7 @@ if(LIBUV_BUILD_TESTS)
test/test-loop-handles.c
test/test-loop-stop.c
test/test-loop-time.c
test/test-metrics.c
test/test-multiple-listen.c
test/test-mutexes.c
test/test-not-readable-nor-writable-on-read-error.c

View File

@ -203,6 +203,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-loop-stop.c \
test/test-loop-time.c \
test/test-loop-configure.c \
test/test-metrics.c \
test/test-multiple-listen.c \
test/test-mutexes.c \
test/test-not-readable-nor-writable-on-read-error.c \

View File

@ -32,4 +32,5 @@ API documentation
dll
threading
misc
metrics

View File

@ -68,6 +68,11 @@ API
to suppress unnecessary wakeups when using a sampling profiler.
Requesting other signals will fail with UV_EINVAL.
- UV_METRICS_IDLE_TIME: Accumulate the amount of idle time the event loop
spends in the event provider.
This option is necessary to use :c:func:`uv_metrics_idle_time`.
.. c:function:: int uv_loop_close(uv_loop_t* loop)
Releases all internal loop resources. Call this function only when the loop

25
docs/src/metrics.rst Normal file
View File

@ -0,0 +1,25 @@
.. _metrics:
Metrics operations
======================
libuv provides a metrics API to track the amount of time the event loop has
spent idle in the kernel's event provider.
API
---
.. c:function:: uint64_t uv_metrics_idle_time(uv_loop_t* loop)
Retrieve the amount of time the event loop has been idle in the kernel's
event provider (e.g. ``epoll_wait``). The call is thread safe.
The return value is the accumulated time spent idle in the kernel's event
provider starting from when the :c:type:`uv_loop_t` was configured to
collect the idle time.
.. note::
The event loop will not begin accumulating the event provider's idle
time until calling :c:type:`uv_loop_configure` with
:c:type:`UV_METRICS_IDLE_TIME`.

View File

@ -247,7 +247,8 @@ typedef struct uv_utsname_s uv_utsname_t;
typedef struct uv_statfs_s uv_statfs_t;
typedef enum {
UV_LOOP_BLOCK_SIGNAL
UV_LOOP_BLOCK_SIGNAL = 0,
UV_METRICS_IDLE_TIME
} uv_loop_option;
typedef enum {
@ -1244,6 +1245,7 @@ UV_EXTERN int uv_os_gethostname(char* buffer, size_t* size);
UV_EXTERN int uv_os_uname(uv_utsname_t* buffer);
UV_EXTERN uint64_t uv_metrics_idle_time(uv_loop_t* loop);
typedef enum {
UV_FS_UNKNOWN = -1,

View File

@ -145,6 +145,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int i;
int rc;
int add_failed;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
@ -214,7 +216,21 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
nfds = pollset_poll(loop->backend_fd,
events,
ARRAY_SIZE(events),
@ -227,6 +243,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
if (timeout == -1)
continue;
if (timeout > 0)
goto update_timeout;
}
assert(timeout != -1);
return;
}
@ -236,6 +261,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
abort();
}
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
@ -280,16 +310,25 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher)
if (w == &loop->signal_io_watcher) {
have_signals = 1;
else
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->revents);
}
nevents++;
}
if (have_signals != 0)
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;

View File

@ -383,6 +383,14 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
timeout = uv_backend_timeout(loop);
uv__io_poll(loop, timeout);
/* Run one final update on the provider_idle_time in case uv__io_poll
* returned because the timeout expired, but no events were received. This
* call will be ignored if the provider_entry_time was either never set (if
* the timeout == 0) or was already updated b/c an event was received.
*/
uv__metrics_update_idle_time(loop);
uv__run_check(loop);
uv__run_closing_handles(loop);

View File

@ -129,6 +129,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
@ -202,7 +204,21 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
for (;; nevents = 0) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
@ -228,6 +244,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
if (timeout == -1)
continue;
if (timeout > 0)
goto update_timeout;
}
assert(timeout != -1);
return;
}
@ -236,6 +261,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (errno != EINTR)
abort();
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == 0)
return;
@ -276,6 +306,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (ev->filter == EVFILT_VNODE) {
assert(w->events == POLLIN);
assert(w->pevents == POLLIN);
uv__metrics_update_idle_time(loop);
w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
nevents++;
continue;
@ -337,16 +368,25 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher)
if (w == &loop->signal_io_watcher) {
have_signals = 1;
else
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, revents);
}
nevents++;
}
if (have_signals != 0)
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;

View File

@ -218,6 +218,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
@ -273,6 +275,14 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
count = 48; /* Benchmarks suggest this gives the best throughput. */
real_timeout = timeout;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
/* You could argue there is a dependency between these two but
* ultimately we don't care about their ordering with respect
* to one another. Worst case, we make a few system calls that
@ -283,6 +293,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
/* See the comment for max_safe_timeout for an explanation of why
* this is necessary. Executive summary: kernel bug workaround.
*/
@ -327,6 +343,14 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (nfds == 0) {
assert(timeout != -1);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
@ -346,6 +370,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (errno != EINTR)
abort();
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
@ -425,17 +454,26 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher)
if (w == &loop->signal_io_watcher) {
have_signals = 1;
else
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->events);
}
nevents++;
}
}
if (have_signals != 0)
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;

View File

@ -42,6 +42,10 @@ int uv_loop_init(uv_loop_t* loop) {
return UV_ENOMEM;
loop->internal_fields = lfields;
err = uv_mutex_init(&lfields->loop_metrics.lock);
if (err)
goto fail_metrics_mutex_init;
heap_init((struct heap*) &loop->timer_heap);
QUEUE_INIT(&loop->wq);
QUEUE_INIT(&loop->idle_handles);
@ -72,7 +76,7 @@ int uv_loop_init(uv_loop_t* loop) {
err = uv__platform_loop_init(loop);
if (err)
return err;
goto fail_platform_init;
uv__signal_global_once_init();
err = uv_signal_init(loop, &loop->child_watcher);
@ -111,6 +115,11 @@ fail_rwlock_init:
fail_signal_init:
uv__platform_loop_delete(loop);
fail_platform_init:
uv_mutex_destroy(&lfields->loop_metrics.lock);
fail_metrics_mutex_init:
uv__free(lfields);
loop->internal_fields = NULL;
@ -193,12 +202,21 @@ void uv__loop_close(uv_loop_t* loop) {
loop->nwatchers = 0;
lfields = uv__get_internal_fields(loop);
uv_mutex_destroy(&lfields->loop_metrics.lock);
uv__free(lfields);
loop->internal_fields = NULL;
}
int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
uv__loop_internal_fields_t* lfields;
lfields = uv__get_internal_fields(loop);
if (option == UV_METRICS_IDLE_TIME) {
lfields->flags |= UV_METRICS_IDLE_TIME;
return 0;
}
if (option != UV_LOOP_BLOCK_SIGNAL)
return UV_ENOSYS;

View File

@ -818,6 +818,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
@ -870,8 +872,22 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
real_timeout = timeout;
int nevents = 0;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
nfds = 0;
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
timeout = max_safe_timeout;
@ -887,6 +903,14 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (nfds == 0) {
assert(timeout != -1);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
@ -901,6 +925,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (errno != EINTR)
abort();
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
@ -955,6 +984,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
pe->events |= w->pevents & (POLLIN | POLLOUT);
if (pe->events != 0) {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->events);
nevents++;
}
@ -962,6 +992,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */

View File

@ -144,6 +144,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int have_signals;
struct pollfd* pe;
int fd;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
@ -177,11 +179,25 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
assert(timeout >= -1);
time_base = loop->time;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
/* Loop calls to poll() and processing of results. If we get some
* results from poll() but they turn out not to be interesting to
* our caller then we need to loop around and poll() again.
*/
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
if (pset != NULL)
if (pthread_sigmask(SIG_BLOCK, pset, NULL))
abort();
@ -197,6 +213,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
if (timeout == -1)
continue;
if (timeout > 0)
goto update_timeout;
}
assert(timeout != -1);
return;
}
@ -205,6 +230,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (errno != EINTR)
abort();
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
@ -254,6 +284,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (w == &loop->signal_io_watcher) {
have_signals = 1;
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->revents);
}
@ -261,8 +292,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
}
}
if (have_signals != 0)
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->poll_fds_iterating = 0;

View File

@ -154,6 +154,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
sigset_t set;
uint64_t base;
uint64_t diff;
uint64_t idle_poll;
unsigned int nfds;
unsigned int i;
int saved_errno;
@ -162,6 +163,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int count;
int err;
int fd;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
@ -199,7 +202,21 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
@ -242,6 +259,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
SAVE_ERRNO(uv__update_time(loop));
if (events[0].portev_source == 0) {
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == 0)
return;
@ -282,10 +304,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher)
if (w == &loop->signal_io_watcher) {
have_signals = 1;
else
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->portev_events);
}
nevents++;
@ -297,8 +321,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
}
if (have_signals != 0)
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;

View File

@ -867,3 +867,62 @@ void uv_library_shutdown(void) {
uv__threadpool_cleanup();
uv__store_relaxed(&was_shutdown, 1);
}
void uv__metrics_update_idle_time(uv_loop_t* loop) {
uv__loop_metrics_t* loop_metrics;
uint64_t entry_time;
uint64_t exit_time;
if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
return;
loop_metrics = uv__get_loop_metrics(loop);
/* The thread running uv__metrics_update_idle_time() is always the same
* thread that sets provider_entry_time. So it's unnecessary to lock before
* retrieving this value.
*/
if (loop_metrics->provider_entry_time == 0)
return;
exit_time = uv_hrtime();
uv_mutex_lock(&loop_metrics->lock);
entry_time = loop_metrics->provider_entry_time;
loop_metrics->provider_entry_time = 0;
loop_metrics->provider_idle_time += exit_time - entry_time;
uv_mutex_unlock(&loop_metrics->lock);
}
void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
uv__loop_metrics_t* loop_metrics;
uint64_t now;
if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
return;
now = uv_hrtime();
loop_metrics = uv__get_loop_metrics(loop);
uv_mutex_lock(&loop_metrics->lock);
loop_metrics->provider_entry_time = now;
uv_mutex_unlock(&loop_metrics->lock);
}
uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
uv__loop_metrics_t* loop_metrics;
uint64_t entry_time;
uint64_t idle_time;
loop_metrics = uv__get_loop_metrics(loop);
uv_mutex_lock(&loop_metrics->lock);
idle_time = loop_metrics->provider_idle_time;
entry_time = loop_metrics->provider_entry_time;
uv_mutex_unlock(&loop_metrics->lock);
if (entry_time > 0)
idle_time += uv_hrtime() - entry_time;
return idle_time;
}

View File

@ -336,6 +336,9 @@ void uv__threadpool_cleanup(void);
#define uv__get_internal_fields(loop) \
((uv__loop_internal_fields_t*) loop->internal_fields)
#define uv__get_loop_metrics(loop) \
(&uv__get_internal_fields(loop)->loop_metrics)
/* Allocator prototypes */
void *uv__calloc(size_t count, size_t size);
char *uv__strdup(const char* s);
@ -345,10 +348,21 @@ void uv__free(void* ptr);
void* uv__realloc(void* ptr, size_t size);
void* uv__reallocf(void* ptr, size_t size);
typedef struct uv__loop_metrics_s uv__loop_metrics_t;
typedef struct uv__loop_internal_fields_s uv__loop_internal_fields_t;
struct uv__loop_metrics_s {
uint64_t provider_entry_time;
uint64_t provider_idle_time;
uv_mutex_t lock;
};
void uv__metrics_update_idle_time(uv_loop_t* loop);
void uv__metrics_set_provider_entry_time(uv_loop_t* loop);
struct uv__loop_internal_fields_s {
unsigned int flags;
uv__loop_metrics_t loop_metrics;
};
#endif /* UV_COMMON_H_ */

View File

@ -222,6 +222,7 @@ static void uv_init(void) {
int uv_loop_init(uv_loop_t* loop) {
uv__loop_internal_fields_t* lfields;
struct heap* timer_heap;
int err;
@ -238,6 +239,10 @@ int uv_loop_init(uv_loop_t* loop) {
return UV_ENOMEM;
loop->internal_fields = lfields;
err = uv_mutex_init(&lfields->loop_metrics.lock);
if (err)
goto fail_metrics_mutex_init;
/* To prevent uninitialized memory access, loop->time must be initialized
* to zero before calling uv_update_time for the first time.
*/
@ -302,6 +307,9 @@ fail_mutex_init:
loop->timer_heap = NULL;
fail_timers_alloc:
uv_mutex_destroy(&lfields->loop_metrics.lock);
fail_metrics_mutex_init:
uv__free(lfields);
loop->internal_fields = NULL;
CloseHandle(loop->iocp);
@ -324,6 +332,7 @@ void uv__once_init(void) {
void uv__loop_close(uv_loop_t* loop) {
uv__loop_internal_fields_t* lfields;
size_t i;
uv__loops_remove(loop);
@ -355,6 +364,7 @@ void uv__loop_close(uv_loop_t* loop) {
loop->timer_heap = NULL;
lfields = uv__get_internal_fields(loop);
uv_mutex_destroy(&lfields->loop_metrics.lock);
uv__free(lfields);
loop->internal_fields = NULL;
@ -363,6 +373,14 @@ void uv__loop_close(uv_loop_t* loop) {
int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
uv__loop_internal_fields_t* lfields;
lfields = uv__get_internal_fields(loop);
if (option == UV_METRICS_IDLE_TIME) {
lfields->flags |= UV_METRICS_IDLE_TIME;
return 0;
}
return UV_ENOSYS;
}
@ -404,16 +422,44 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
uv_req_t* req;
int repeat;
uint64_t timeout_time;
uint64_t user_timeout;
int reset_timeout;
timeout_time = loop->time + timeout;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
for (repeat = 0; ; repeat++) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
GetQueuedCompletionStatus(loop->iocp,
&bytes,
&key,
&overlapped,
timeout);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
/* Placed here because on success the loop will break whether there is an
* empty package or not, or if GetQueuedCompletionStatus returned early then
* the timeout will be updated and the loop will run again. In either case
* the idle time will need to be updated.
*/
uv__metrics_update_idle_time(loop);
if (overlapped) {
/* Package was dequeued */
req = uv_overlapped_to_req(overlapped);
@ -456,10 +502,26 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
ULONG i;
int repeat;
uint64_t timeout_time;
uint64_t user_timeout;
int reset_timeout;
timeout_time = loop->time + timeout;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
for (repeat = 0; ; repeat++) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
success = pGetQueuedCompletionStatusEx(loop->iocp,
overlappeds,
ARRAY_SIZE(overlappeds),
@ -467,6 +529,18 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
timeout,
FALSE);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
/* Placed here because on success the loop will break whether there is an
* empty package or not, or if GetQueuedCompletionStatus returned early then
* the timeout will be updated and the loop will run again. In either case
* the idle time will need to be updated.
*/
uv__metrics_update_idle_time(loop);
if (success) {
for (i = 0; i < count; i++) {
/* Package was dequeued, but see if it is not a empty package
@ -545,6 +619,12 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
else
uv__poll_wine(loop, timeout);
/* Run one final update on the provider_idle_time in case uv__poll*
* returned because the timeout expired, but no events were received. This
* call will be ignored if the provider_entry_time was either never set (if
* the timeout == 0) or was already updated b/c an event was received.
*/
uv__metrics_update_idle_time(loop);
uv_check_invoke(loop);
uv_process_endgames(loop);

View File

@ -524,6 +524,10 @@ TEST_DECLARE (idna_toascii)
TEST_DECLARE (utf8_decode1)
TEST_DECLARE (uname)
TEST_DECLARE (metrics_idle_time)
TEST_DECLARE (metrics_idle_time_thread)
TEST_DECLARE (metrics_idle_time_zero)
TASK_LIST_START
TEST_ENTRY_CUSTOM (platform_output, 0, 1, 5000)
@ -1116,6 +1120,10 @@ TASK_LIST_START
TEST_ENTRY (not_readable_on_eof)
TEST_HELPER (not_readable_on_eof, tcp4_echo_server)
TEST_ENTRY (metrics_idle_time)
TEST_ENTRY (metrics_idle_time_thread)
TEST_ENTRY (metrics_idle_time_zero)
#if 0
/* These are for testing the test runner. */
TEST_ENTRY (fail_always)

114
test/test-metrics.c Normal file
View File

@ -0,0 +1,114 @@
#include "uv.h"
#include "task.h"
#include <string.h> /* memset */
#define NS_MS 1000000
static void timer_spin_cb(uv_timer_t* handle) {
uint64_t t;
(*(int*) handle->data)++;
t = uv_hrtime();
/* Spin for 500 ms to spin loop time out of the delta check. */
while (uv_hrtime() - t < 600 * NS_MS) { }
}
TEST_IMPL(metrics_idle_time) {
const uint64_t timeout = 1000;
uv_timer_t timer;
uint64_t idle_time;
int cntr;
cntr = 0;
timer.data = &cntr;
ASSERT_EQ(0, uv_loop_configure(uv_default_loop(), UV_METRICS_IDLE_TIME));
ASSERT_EQ(0, uv_timer_init(uv_default_loop(), &timer));
ASSERT_EQ(0, uv_timer_start(&timer, timer_spin_cb, timeout, 0));
ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT));
ASSERT_GT(cntr, 0);
idle_time = uv_metrics_idle_time(uv_default_loop());
/* Permissive check that the idle time matches within the timeout ±500 ms. */
ASSERT((idle_time <= (timeout + 500) * NS_MS) &&
(idle_time >= (timeout - 500) * NS_MS));
MAKE_VALGRIND_HAPPY();
return 0;
}
static void metrics_routine_cb(void* arg) {
const uint64_t timeout = 1000;
uv_loop_t loop;
uv_timer_t timer;
uint64_t idle_time;
int cntr;
cntr = 0;
timer.data = &cntr;
ASSERT_EQ(0, uv_loop_init(&loop));
ASSERT_EQ(0, uv_loop_configure(&loop, UV_METRICS_IDLE_TIME));
ASSERT_EQ(0, uv_timer_init(&loop, &timer));
ASSERT_EQ(0, uv_timer_start(&timer, timer_spin_cb, timeout, 0));
ASSERT_EQ(0, uv_run(&loop, UV_RUN_DEFAULT));
ASSERT_GT(cntr, 0);
idle_time = uv_metrics_idle_time(&loop);
/* Only checking that idle time is greater than the lower bound since there
* may have been thread contention, causing the event loop to be delayed in
* the idle phase longer than expected.
*/
ASSERT_GE(idle_time, (timeout - 500) * NS_MS);
close_loop(&loop);
ASSERT_EQ(0, uv_loop_close(&loop));
}
TEST_IMPL(metrics_idle_time_thread) {
uv_thread_t threads[5];
int i;
for (i = 0; i < 5; i++) {
ASSERT_EQ(0, uv_thread_create(&threads[i], metrics_routine_cb, NULL));
}
for (i = 0; i < 5; i++) {
uv_thread_join(&threads[i]);
}
return 0;
}
static void timer_noop_cb(uv_timer_t* handle) {
(*(int*) handle->data)++;
}
TEST_IMPL(metrics_idle_time_zero) {
uv_timer_t timer;
int cntr;
cntr = 0;
timer.data = &cntr;
ASSERT_EQ(0, uv_loop_configure(uv_default_loop(), UV_METRICS_IDLE_TIME));
ASSERT_EQ(0, uv_timer_init(uv_default_loop(), &timer));
ASSERT_EQ(0, uv_timer_start(&timer, timer_noop_cb, 0, 0));
ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT));
ASSERT_GT(cntr, 0);
ASSERT_EQ(0, uv_metrics_idle_time(uv_default_loop()));
MAKE_VALGRIND_HAPPY();
return 0;
}