[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-block] [PATCH 2/3] aio-posix: compute timeout before polling
From: |
Fam Zheng |
Subject: |
Re: [Qemu-block] [PATCH 2/3] aio-posix: compute timeout before polling |
Date: |
Thu, 13 Sep 2018 15:16:15 +0800 |
User-agent: |
Mutt/1.10.1 (2018-07-13) |
On Wed, 09/12 19:10, Paolo Bonzini wrote:
> This is a preparation for the next patch, and also a very small
> optimization. Compute the timeout only once, before invoking
> try_poll_mode, and adjust it in run_poll_handlers. The adjustment
> is the polling time when polling fails, or zero (non-blocking) if
> polling succeeds.
>
> Fixes: 70232b5253a3c4e03ed1ac47ef9246a8ac66c6fa
> Signed-off-by: Paolo Bonzini <address@hidden>
> ---
> util/aio-posix.c | 59 +++++++++++++++++++++++++++--------------------
> util/trace-events | 4 ++--
> 2 files changed, 36 insertions(+), 27 deletions(-)
>
> diff --git a/util/aio-posix.c b/util/aio-posix.c
> index 5c29380575..21d8987179 100644
> --- a/util/aio-posix.c
> +++ b/util/aio-posix.c
> @@ -490,7 +490,7 @@ static void add_pollfd(AioHandler *node)
> npfd++;
> }
>
> -static bool run_poll_handlers_once(AioContext *ctx)
> +static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
> {
> bool progress = false;
> AioHandler *node;
> @@ -500,6 +500,7 @@ static bool run_poll_handlers_once(AioContext *ctx)
> aio_node_check(ctx, node->is_external) &&
> node->io_poll(node->opaque) &&
> node->opaque != &ctx->notifier) {
> + *timeout = 0;
> progress = true;
> }
>
> @@ -522,31 +523,38 @@ static bool run_poll_handlers_once(AioContext *ctx)
> *
> * Returns: true if progress was made, false otherwise
> */
> -static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
> +static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t
> *timeout)
> {
> bool progress;
> - int64_t end_time;
> + int64_t start_time, elapsed_time;
>
> assert(ctx->notify_me);
> assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
>
> - trace_run_poll_handlers_begin(ctx, max_ns);
> -
> - end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns;
> + trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
>
> + start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
> do {
> - progress = run_poll_handlers_once(ctx);
> - } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time
> + progress = run_poll_handlers_once(ctx, timeout);
> + elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
> + } while (!progress && elapsed_time < max_ns
> && !atomic_read(&ctx->poll_disable_cnt));
>
> - trace_run_poll_handlers_end(ctx, progress);
> + /* If time has passed with no successful polling, adjust *timeout to
> + * keep the same ending time.
> + */
> + if (*timeout != -1) {
> + *timeout -= MIN(*timeout, elapsed_time);
> + }
>
> + trace_run_poll_handlers_end(ctx, progress, *timeout);
> return progress;
> }
>
> /* try_poll_mode:
> * @ctx: the AioContext
> - * @blocking: busy polling is only attempted when blocking is true
> + * @timeout: timeout for blocking wait, computed by the caller and updated if
> + * polling succeeds.
> *
> * ctx->notify_me must be non-zero so this function can detect aio_notify().
> *
> @@ -554,19 +562,16 @@ static bool run_poll_handlers(AioContext *ctx, int64_t
> max_ns)
> *
> * Returns: true if progress was made, false otherwise
> */
> -static bool try_poll_mode(AioContext *ctx, bool blocking)
> +static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
> {
> - if (blocking && ctx->poll_max_ns &&
> !atomic_read(&ctx->poll_disable_cnt)) {
> - /* See qemu_soonest_timeout() uint64_t hack */
> - int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
> - (uint64_t)ctx->poll_ns);
> + /* See qemu_soonest_timeout() uint64_t hack */
> + int64_t max_ns = MIN((uint64_t)*timeout, (uint64_t)ctx->poll_ns);
>
> - if (max_ns) {
> - poll_set_started(ctx, true);
> + if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
> + poll_set_started(ctx, true);
>
> - if (run_poll_handlers(ctx, max_ns)) {
> - return true;
> - }
> + if (run_poll_handlers(ctx, max_ns, timeout)) {
> + return true;
> }
> }
>
> @@ -575,7 +580,7 @@ static bool try_poll_mode(AioContext *ctx, bool blocking)
> /* Even if we don't run busy polling, try polling once in case it can
> make
> * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
> */
> - return run_poll_handlers_once(ctx);
> + return run_poll_handlers_once(ctx, timeout);
> }
>
> bool aio_poll(AioContext *ctx, bool blocking)
> @@ -605,8 +610,14 @@ bool aio_poll(AioContext *ctx, bool blocking)
> start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
> }
>
> - progress = try_poll_mode(ctx, blocking);
> - if (!progress) {
> + timeout = blocking ? aio_compute_timeout(ctx) : 0;
> + progress = try_poll_mode(ctx, &timeout);
> + assert(!(timeout && progress));
> +
> + /* If polling is allowed, non-blocking aio_poll does not need the
> + * system call---a single round of run_poll_handlers_once suffices.
> + */
> + if (timeout || atomic_read(&ctx->poll_disable_cnt)) {
> assert(npfd == 0);
>
> /* fill pollfds */
> @@ -620,8 +631,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
> }
> }
>
> - timeout = blocking ? aio_compute_timeout(ctx) : 0;
> -
> /* wait until next event */
> if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
> AioHandler epoll_handler;
> diff --git a/util/trace-events b/util/trace-events
> index 4822434c89..79569b7fdf 100644
> --- a/util/trace-events
> +++ b/util/trace-events
> @@ -1,8 +1,8 @@
> # See docs/devel/tracing.txt for syntax documentation.
>
> # util/aio-posix.c
> -run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
> -run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
> +run_poll_handlers_begin(void *ctx, int64_t max_ns, int64_t timeout) "ctx %p
> max_ns %"PRId64 " timeout %"PRId64
> +run_poll_handlers_end(void *ctx, bool progress, int64_t timeout) "ctx %p
> progress %d new timeout %"PRId64
> poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new
> %"PRId64
> poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new
> %"PRId64
>
> --
> 2.17.1
>
>
Reviewed-by: Fam Zheng <address@hidden>