qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PATCH v2] block: make BDRV_POLL_WHILE() re-entrancy safe


From: Stefan Hajnoczi
Subject: [Qemu-block] [PATCH v2] block: make BDRV_POLL_WHILE() re-entrancy safe
Date: Wed, 7 Mar 2018 12:46:19 +0000

Nested BDRV_POLL_WHILE() calls can occur.  Currently
assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens.

This patch converts the bool wait_->need_kick flag to an unsigned
wait_->num_waiters counter.

Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate
the condition again after the inner caller completes (invoking the inner
caller counts as aio_poll() progress).

Reported-by: "fuweiwei (C)" <address@hidden>
Cc: Paolo Bonzini <address@hidden>
Signed-off-by: Stefan Hajnoczi <address@hidden>
---
v2:
 * Rebase onto qemu.git/master now that AIO_WAIT_WHILE() has landed
   [Kevin]

 include/block/aio-wait.h | 61 ++++++++++++++++++++++++------------------------
 util/aio-wait.c          |  2 +-
 2 files changed, 31 insertions(+), 32 deletions(-)

diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index a48c744fa8..74cde07bef 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -50,8 +50,8 @@
  *   }
  */
 typedef struct {
-    /* Is the main loop waiting for a kick?  Accessed with atomic ops. */
-    bool need_kick;
+    /* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */
+    unsigned num_waiters;
 } AioWait;
 
 /**
@@ -71,35 +71,34 @@ typedef struct {
  * wait on conditions between two IOThreads since that could lead to deadlock,
  * go via the main loop instead.
  */
-#define AIO_WAIT_WHILE(wait, ctx, cond) ({                  \
-    bool waited_ = false;                                   \
-    bool busy_ = true;                                      \
-    AioWait *wait_ = (wait);                                \
-    AioContext *ctx_ = (ctx);                               \
-    if (in_aio_context_home_thread(ctx_)) {                 \
-        while ((cond) || busy_) {                           \
-            busy_ = aio_poll(ctx_, (cond));                 \
-            waited_ |= !!(cond) | busy_;                    \
-        }                                                   \
-    } else {                                                \
-        assert(qemu_get_current_aio_context() ==            \
-               qemu_get_aio_context());                     \
-        assert(!wait_->need_kick);                          \
-        /* Set wait_->need_kick before evaluating cond.  */ \
-        atomic_mb_set(&wait_->need_kick, true);             \
-        while (busy_) {                                     \
-            if ((cond)) {                                   \
-                waited_ = busy_ = true;                     \
-                aio_context_release(ctx_);                  \
-                aio_poll(qemu_get_aio_context(), true);     \
-                aio_context_acquire(ctx_);                  \
-            } else {                                        \
-                busy_ = aio_poll(ctx_, false);              \
-                waited_ |= busy_;                           \
-            }                                               \
-        }                                                   \
-        atomic_set(&wait_->need_kick, false);               \
-    }                                                       \
+#define AIO_WAIT_WHILE(wait, ctx, cond) ({                         \
+    bool waited_ = false;                                          \
+    bool busy_ = true;                                             \
+    AioWait *wait_ = (wait);                                       \
+    AioContext *ctx_ = (ctx);                                      \
+    if (in_aio_context_home_thread(ctx_)) {                        \
+        while ((cond) || busy_) {                                  \
+            busy_ = aio_poll(ctx_, (cond));                        \
+            waited_ |= !!(cond) | busy_;                           \
+        }                                                          \
+    } else {                                                       \
+        assert(qemu_get_current_aio_context() ==                   \
+               qemu_get_aio_context());                            \
+        /* Increment wait_->num_waiters before evaluating cond. */ \
+        atomic_inc(&wait_->num_waiters);                           \
+        while (busy_) {                                            \
+            if ((cond)) {                                          \
+                waited_ = busy_ = true;                            \
+                aio_context_release(ctx_);                         \
+                aio_poll(qemu_get_aio_context(), true);            \
+                aio_context_acquire(ctx_);                         \
+            } else {                                               \
+                busy_ = aio_poll(ctx_, false);                     \
+                waited_ |= busy_;                                  \
+            }                                                      \
+        }                                                          \
+        atomic_dec(&wait_->num_waiters);                           \
+    }                                                              \
     waited_; })
 
 /**
diff --git a/util/aio-wait.c b/util/aio-wait.c
index a487cdb852..be85a32273 100644
--- a/util/aio-wait.c
+++ b/util/aio-wait.c
@@ -34,7 +34,7 @@ static void dummy_bh_cb(void *opaque)
 void aio_wait_kick(AioWait *wait)
 {
     /* The barrier (or an atomic op) is in the caller.  */
-    if (atomic_read(&wait->need_kick)) {
+    if (atomic_read(&wait->num_waiters)) {
         aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
     }
 }
-- 
2.14.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]