[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 16/16] aio: push aio_context_acquire/release down to
From: |
Paolo Bonzini |
Subject: |
[Qemu-devel] [PATCH 16/16] aio: push aio_context_acquire/release down to dispatching |
Date: |
Fri, 15 Jan 2016 16:12:19 +0100 |
The AioContext data structures are now protected by list_lock and/or
they are walked with FOREACH_RCU primitives. There is no need anymore
to acquire the AioContext for the entire duration of aio_dispatch.
Instead, just acquire it before and after invoking the callbacks.
The next step is then to push it further down.
Signed-off-by: Paolo Bonzini <address@hidden>
---
aio-posix.c | 15 ++++++---------
aio-win32.c | 15 +++++++--------
async.c | 2 ++
3 files changed, 15 insertions(+), 17 deletions(-)
diff --git a/aio-posix.c b/aio-posix.c
index e088173..7f290cd 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -333,7 +333,9 @@ bool aio_dispatch(AioContext *ctx)
if (!node->deleted &&
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
node->io_read) {
+ aio_context_acquire(ctx);
node->io_read(node->opaque);
+ aio_context_release(ctx);
/* aio_notify() does not count as progress */
if (node->opaque != &ctx->notifier) {
@@ -343,7 +345,9 @@ bool aio_dispatch(AioContext *ctx)
if (!node->deleted &&
(revents & (G_IO_OUT | G_IO_ERR)) &&
node->io_write) {
+ aio_context_acquire(ctx);
node->io_write(node->opaque);
+ aio_context_release(ctx);
progress = true;
}
@@ -359,7 +363,9 @@ bool aio_dispatch(AioContext *ctx)
qemu_lockcnt_dec(&ctx->list_lock);
/* Run our timers */
+ aio_context_acquire(ctx);
progress |= timerlistgroup_run_timers(&ctx->tlg);
+ aio_context_release(ctx);
return progress;
}
@@ -417,7 +423,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
bool progress;
int64_t timeout;
- aio_context_acquire(ctx);
progress = false;
/* aio_notify can avoid the expensive event_notifier_set if
@@ -446,9 +451,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
timeout = blocking ? aio_compute_timeout(ctx) : 0;
/* wait until next event */
- if (timeout) {
- aio_context_release(ctx);
- }
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
AioHandler epoll_handler;
@@ -463,9 +465,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
if (blocking) {
atomic_sub(&ctx->notify_me, 2);
}
- if (timeout) {
- aio_context_acquire(ctx);
- }
aio_notify_accept(ctx);
@@ -484,8 +483,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
progress = true;
}
- aio_context_release(ctx);
-
return progress;
}
diff --git a/aio-win32.c b/aio-win32.c
index 9deaf24..f7c6509 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -244,7 +244,9 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE
event)
if (!node->deleted &&
(revents || event_notifier_get_handle(node->e) == event) &&
node->io_notify) {
+ aio_context_acquire(ctx);
node->io_notify(node->e);
+ aio_context_release(ctx);
/* aio_notify() does not count as progress */
if (node->e != &ctx->notifier) {
@@ -255,11 +257,15 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE
event)
if (!node->deleted &&
(node->io_read || node->io_write)) {
if ((revents & G_IO_IN) && node->io_read) {
+ aio_context_acquire(ctx);
node->io_read(node->opaque);
+ aio_context_release(ctx);
progress = true;
}
if ((revents & G_IO_OUT) && node->io_write) {
+ aio_context_acquire(ctx);
node->io_write(node->opaque);
+ aio_context_release(ctx);
progress = true;
}
@@ -304,7 +310,6 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
int count;
int timeout;
- aio_context_acquire(ctx);
progress = false;
/* aio_notify can avoid the expensive event_notifier_set if
@@ -346,17 +351,11 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
timeout = blocking && !have_select_revents
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
- if (timeout) {
- aio_context_release(ctx);
- }
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
if (blocking) {
assert(first);
atomic_sub(&ctx->notify_me, 2);
}
- if (timeout) {
- aio_context_acquire(ctx);
- }
if (first) {
aio_notify_accept(ctx);
@@ -379,8 +378,8 @@ bool aio_poll_internal(AioContext *ctx, bool blocking)
progress |= aio_dispatch_handlers(ctx, event);
} while (count > 0);
+ aio_context_acquire(ctx);
progress |= timerlistgroup_run_timers(&ctx->tlg);
-
aio_context_release(ctx);
return progress;
}
diff --git a/async.c b/async.c
index 7c8a792..64a135d 100644
--- a/async.c
+++ b/async.c
@@ -87,7 +87,9 @@ int aio_bh_poll(AioContext *ctx)
ret = 1;
}
bh->idle = 0;
+ aio_context_acquire(ctx);
aio_bh_call(bh);
+ aio_context_release(ctx);
}
}
--
2.5.0
- [Qemu-devel] [PATCH 06/16] qemu-thread: introduce QemuRecMutex, (continued)
- [Qemu-devel] [PATCH 06/16] qemu-thread: introduce QemuRecMutex, Paolo Bonzini, 2016/01/15
- [Qemu-devel] [PATCH 07/16] aio: convert from RFifoLock to QemuRecMutex, Paolo Bonzini, 2016/01/15
- [Qemu-devel] [PATCH 10/16] aio: make ctx->list_lock a QemuLockCnt, subsuming ctx->walking_bh, Paolo Bonzini, 2016/01/15
- [Qemu-devel] [PATCH 09/16] qemu-thread: introduce QemuLockCnt, Paolo Bonzini, 2016/01/15
- [Qemu-devel] [PATCH 08/16] aio: rename bh_lock to list_lock, Paolo Bonzini, 2016/01/15
- [Qemu-devel] [PATCH 12/16] aio: tweak walking in dispatch phase, Paolo Bonzini, 2016/01/15
- [Qemu-devel] [PATCH 11/16] qemu-thread: optimize QemuLockCnt with futexes on Linux, Paolo Bonzini, 2016/01/15
- [Qemu-devel] [PATCH 14/16] aio-win32: remove walking_handlers, protecting AioHandler list with list_lock, Paolo Bonzini, 2016/01/15
- [Qemu-devel] [PATCH 13/16] aio-posix: remove walking_handlers, protecting AioHandler list with list_lock, Paolo Bonzini, 2016/01/15
- [Qemu-devel] [PATCH 15/16] aio: document locking, Paolo Bonzini, 2016/01/15
- [Qemu-devel] [PATCH 16/16] aio: push aio_context_acquire/release down to dispatching,
Paolo Bonzini <=