qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PATCH 37/40] async: optimize aio_bh_poll


From: Paolo Bonzini
Subject: [Qemu-block] [PATCH 37/40] async: optimize aio_bh_poll
Date: Tue, 24 Nov 2015 19:01:28 +0100

Avoid entering the slow path of qemu_lockcnt_dec_and_lock if
no bottom half has to be deleted.  If a bottom half deletes itself,
it will be picked up on the next visit of the list, or when the
AioContext itself is finalized.

Signed-off-by: Paolo Bonzini <address@hidden>
---
 async.c | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)

diff --git a/async.c b/async.c
index 4c1f658..529934c 100644
--- a/async.c
+++ b/async.c
@@ -69,19 +69,24 @@ int aio_bh_poll(AioContext *ctx)
 {
     QEMUBH *bh, **bhp, *next;
     int ret;
+    bool deleted = false;
 
     qemu_lockcnt_inc(&ctx->list_lock);
 
     ret = 0;
     for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
         next = atomic_rcu_read(&bh->next);
+        if (bh->deleted) {
+            deleted = true;
+            continue;
+        }
         /* The atomic_xchg is paired with the one in qemu_bh_schedule.  The
          * implicit memory barrier ensures that the callback sees all writes
          * done by the scheduling thread.  It also ensures that the scheduling
          * thread sees the zero before bh->cb has run, and thus will call
          * aio_notify again if necessary.
          */
-        if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
+        if (atomic_xchg(&bh->scheduled, 0)) {
             /* Idle BHs don't count as progress */
             if (!bh->idle) {
                 ret = 1;
@@ -92,6 +97,11 @@ int aio_bh_poll(AioContext *ctx)
     }
 
     /* remove deleted bhs */
+    if (!deleted) {
+        qemu_lockcnt_dec(&ctx->list_lock);
+        return ret;
+    }
+
     if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) {
         bhp = &ctx->first_bh;
         while (*bhp) {
@@ -105,7 +115,6 @@ int aio_bh_poll(AioContext *ctx)
         }
         qemu_lockcnt_unlock(&ctx->list_lock);
     }
-
     return ret;
 }
 
-- 
1.8.3.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]