[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 14/16] aio-posix: partially inline aio_dispatch into
From: |
Paolo Bonzini |
Subject: |
[Qemu-devel] [PATCH 14/16] aio-posix: partially inline aio_dispatch into aio_poll |
Date: |
Fri, 13 Jan 2017 14:17:29 +0100 |
This patch prepares for the removal of unnecessary lockcnt inc/dec pairs.
Extract the dispatching loop for file descriptor handlers into a new
function aio_dispatch_handlers, and then inline aio_dispatch into
aio_poll.
aio_dispatch can now become void.
Signed-off-by: Paolo Bonzini <address@hidden>
---
aio-posix.c | 44 ++++++++++++++------------------------------
aio-win32.c | 13 ++++---------
async.c | 2 +-
include/block/aio.h | 6 +-----
4 files changed, 20 insertions(+), 45 deletions(-)
diff --git a/aio-posix.c b/aio-posix.c
index 6beebcd..51e92b8 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -386,12 +386,6 @@ static bool aio_dispatch_handlers(AioContext *ctx)
AioHandler *node, *tmp;
bool progress = false;
- /*
- * We have to walk very carefully in case aio_set_fd_handler is
- * called while we're walking.
- */
- qemu_lockcnt_inc(&ctx->list_lock);
-
QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
int revents;
@@ -426,33 +420,18 @@ static bool aio_dispatch_handlers(AioContext *ctx)
}
}
- qemu_lockcnt_dec(&ctx->list_lock);
return progress;
}
-/*
- * Note that dispatch_fds == false has the side-effect of post-poning the
- * freeing of deleted handlers.
- */
-bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
+void aio_dispatch(AioContext *ctx)
{
- bool progress;
-
- /*
- * If there are callbacks left that have been queued, we need to call them.
- * Do not call select in this case, because it is possible that the caller
- * does not need a complete flush (as is the case for aio_poll loops).
- */
- progress = aio_bh_poll(ctx);
+ aio_bh_poll(ctx);
- if (dispatch_fds) {
- progress |= aio_dispatch_handlers(ctx);
- }
-
- /* Run our timers */
- progress |= timerlistgroup_run_timers(&ctx->tlg);
+ qemu_lockcnt_inc(&ctx->list_lock);
+ aio_dispatch_handlers(ctx);
+ qemu_lockcnt_dec(&ctx->list_lock);
- return progress;
+ timerlistgroup_run_timers(&ctx->tlg);
}
/* These thread-local variables are used only in a small part of aio_poll
@@ -701,11 +680,16 @@ bool aio_poll(AioContext *ctx, bool blocking)
npfd = 0;
qemu_lockcnt_dec(&ctx->list_lock);
- /* Run dispatch even if there were no readable fds to run timers */
- if (aio_dispatch(ctx, ret > 0)) {
- progress = true;
+ progress |= aio_bh_poll(ctx);
+
+ if (ret > 0) {
+ qemu_lockcnt_inc(&ctx->list_lock);
+ progress |= aio_dispatch_handlers(ctx);
+ qemu_lockcnt_dec(&ctx->list_lock);
}
+ progress |= timerlistgroup_run_timers(&ctx->tlg);
+
return progress;
}
diff --git a/aio-win32.c b/aio-win32.c
index 20b63ce..442a179 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -309,16 +309,11 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE
event)
return progress;
}
-bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
+void aio_dispatch(AioContext *ctx)
{
- bool progress;
-
- progress = aio_bh_poll(ctx);
- if (dispatch_fds) {
- progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
- }
- progress |= timerlistgroup_run_timers(&ctx->tlg);
- return progress;
+ aio_bh_poll(ctx);
+ aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
+ timerlistgroup_run_timers(&ctx->tlg);
}
bool aio_poll(AioContext *ctx, bool blocking)
diff --git a/async.c b/async.c
index 0243ca9..1839aa5 100644
--- a/async.c
+++ b/async.c
@@ -257,7 +257,7 @@ aio_ctx_dispatch(GSource *source,
AioContext *ctx = (AioContext *) source;
assert(callback == NULL);
- aio_dispatch(ctx, true);
+ aio_dispatch(ctx);
return true;
}
diff --git a/include/block/aio.h b/include/block/aio.h
index 614cbc6..677b6ff 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -310,12 +310,8 @@ bool aio_pending(AioContext *ctx);
/* Dispatch any pending callbacks from the GSource attached to the AioContext.
*
* This is used internally in the implementation of the GSource.
- *
- * @dispatch_fds: true to process fds, false to skip them
- * (can be used as an optimization by callers that know there
- * are no fds ready)
*/
-bool aio_dispatch(AioContext *ctx, bool dispatch_fds);
+void aio_dispatch(AioContext *ctx);
/* Progress in completing AIO work to occur. This can issue new pending
* aio as a result of executing I/O completion or bh callbacks.
--
2.9.3
- Re: [Qemu-devel] [PATCH 10/16] block: explicitly acquire aiocontext in timers that need it, (continued)
[Qemu-devel] [PATCH 12/16] block: explicitly acquire aiocontext in bottom halves that need it, Paolo Bonzini, 2017/01/13
[Qemu-devel] [PATCH 11/16] block: explicitly acquire aiocontext in callbacks that need it, Paolo Bonzini, 2017/01/13
[Qemu-devel] [PATCH 14/16] aio-posix: partially inline aio_dispatch into aio_poll,
Paolo Bonzini <=
Re: [Qemu-devel] [PATCH 00/16] aio_context_acquire/release pushdown, part 2, Fam Zheng, 2017/01/16
Re: [Qemu-devel] [PATCH 00/16] aio_context_acquire/release pushdown, part 2, Stefan Hajnoczi, 2017/01/18
Re: [Qemu-devel] [PATCH 00/16] aio_context_acquire/release pushdown, part 2, Stefan Hajnoczi, 2017/01/18