qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH for-2.4 2/2] AioContext: force event loop iteration


From: Stefan Hajnoczi
Subject: [Qemu-devel] [PATCH for-2.4 2/2] AioContext: force event loop iteration using BH
Date: Mon, 27 Jul 2015 17:33:39 +0100

The notify_me optimization introduced in commit eabc97797310
("AioContext: fix broken ctx->dispatching optimization") skips
event_notifier_set() calls when the event loop thread is not blocked in
ppoll(2).

This optimization causes a deadlock if two aio_context_acquire() calls
race.  notify_me = 0 during the race so the winning thread can enter
ppoll(2) unaware that the other thread is waiting its turn to acquire
the AioContext.

This patch forces ppoll(2) to return by scheduling a BH instead of
calling aio_notify().

The following deadlock with virtio-blk dataplane is fixed:

  qemu ... -object iothread,id=iothread0 \
           -drive if=none,id=drive0,file=test.img,... \
           -device virtio-blk-pci,iothread=iothread0,drive=drive0

This command-line results in a hang early on without this patch.

Thanks to Paolo Bonzini <address@hidden> for investigating this bug
with me.

Cc: Christian Borntraeger <address@hidden>
Cc: Cornelia Huck <address@hidden>
Cc: Paolo Bonzini <address@hidden>
Signed-off-by: Stefan Hajnoczi <address@hidden>
---
 async.c             | 17 +++++++++++++++--
 include/block/aio.h |  3 +++
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/async.c b/async.c
index 9fab4c6..9ca7095 100644
--- a/async.c
+++ b/async.c
@@ -79,8 +79,10 @@ int aio_bh_poll(AioContext *ctx)
          * aio_notify again if necessary.
          */
         if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
-            if (!bh->idle)
+            /* Idle BHs and the notify BH don't count as progress */
+            if (!bh->idle && bh != ctx->notify_dummy_bh) {
                 ret = 1;
+            }
             bh->idle = 0;
             bh->cb(bh->opaque);
         }
@@ -230,6 +232,8 @@ aio_ctx_finalize(GSource     *source)
 {
     AioContext *ctx = (AioContext *) source;
 
+    qemu_bh_delete(ctx->notify_dummy_bh);
+
     qemu_mutex_lock(&ctx->bh_lock);
     while (ctx->first_bh) {
         QEMUBH *next = ctx->first_bh->next;
@@ -297,8 +301,15 @@ static void aio_timerlist_notify(void *opaque)
 
 static void aio_rfifolock_cb(void *opaque)
 {
+    AioContext *ctx = opaque;
+
     /* Kick owner thread in case they are blocked in aio_poll() */
-    aio_notify(opaque);
+    qemu_bh_schedule(ctx->notify_dummy_bh);
+}
+
+static void notify_dummy_bh(void *opaque)
+{
+    /* Do nothing, we were invoked just to force the event loop to iterate */
 }
 
 static void event_notifier_dummy_cb(EventNotifier *e)
@@ -325,6 +336,8 @@ AioContext *aio_context_new(Error **errp)
     rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
     timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
 
+    ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);
+
     return ctx;
 }
 
diff --git a/include/block/aio.h b/include/block/aio.h
index 9dd32e0..400b1b0 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -114,6 +114,9 @@ struct AioContext {
     bool notified;
     EventNotifier notifier;
 
+    /* Scheduling this BH forces the event loop it iterate */
+    QEMUBH *notify_dummy_bh;
+
     /* Thread pool for performing work and receiving completion callbacks */
     struct ThreadPool *thread_pool;
 
-- 
2.4.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]