qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v2 7/7] block/nbd-client: do not yield from nbd_read


From: Vladimir Sementsov-Ogievskiy
Subject: [Qemu-devel] [PATCH v2 7/7] block/nbd-client: do not yield from nbd_read_reply_entry
Date: Mon, 18 Sep 2017 16:59:35 +0300

Refactor nbd client to not yield from nbd_read_reply_entry. It's
possible now as all reading is done in nbd_read_reply_entry and
all request-related data is stored in per-request s->requests[i].

We need here some additional work with s->requests[i].ret and
s->quit to not fail requests on normal disconnet and to not report
reading errors on normal disconnect.

Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
---
 block/nbd-client.c | 29 ++++++++++-------------------
 1 file changed, 10 insertions(+), 19 deletions(-)

diff --git a/block/nbd-client.c b/block/nbd-client.c
index f80a4c5564..bf20d5d5e6 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -79,7 +79,11 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque)
     while (!s->quit) {
         ret = nbd_receive_reply(s->ioc, &reply, &local_err);
         if (ret < 0) {
-            error_report_err(local_err);
+            if (s->quit) {
+                error_free(local_err);
+            } else {
+                error_report_err(local_err);
+            }
         }
         if (ret <= 0 || s->quit) {
             break;
@@ -112,19 +116,8 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque)
             }
         }
 
-        /* We're woken up again by the request itself.  Note that there
-         * is no race between yielding and reentering read_reply_co.  This
-         * is because:
-         *
-         * - if the request runs on the same AioContext, it is only
-         *   entered after we yield
-         *
-         * - if the request runs on a different AioContext, reentering
-         *   read_reply_co happens through a bottom half, which can only
-         *   run after we yield.
-         */
+        s->requests[i].receiving = false;
         aio_co_wake(s->requests[i].coroutine);
-        qemu_coroutine_yield();
     }
 
     s->quit = true;
@@ -157,6 +150,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
 
     s->requests[i].coroutine = qemu_coroutine_self();
     s->requests[i].receiving = false;
+    s->requests[i].ret = -EIO;
 
     request->handle = INDEX_TO_HANDLE(s, i);
     s->requests[i].request = request;
@@ -210,7 +204,7 @@ static int nbd_co_receive_reply(NBDClientSession *s, 
NBDRequest *request)
     s->requests[i].receiving = true;
     qemu_coroutine_yield();
     s->requests[i].receiving = false;
-    if (!s->ioc || s->quit) {
+    if (!s->ioc) {
         ret = -EIO;
     } else {
         ret = s->requests[i].ret;
@@ -218,11 +212,6 @@ static int nbd_co_receive_reply(NBDClientSession *s, 
NBDRequest *request)
 
     s->requests[i].coroutine = NULL;
 
-    /* Kick the read_reply_co to get the next reply.  */
-    if (s->read_reply_co) {
-        aio_co_wake(s->read_reply_co);
-    }
-
     qemu_co_mutex_lock(&s->send_mutex);
     s->in_flight--;
     qemu_co_queue_next(&s->free_sema);
@@ -364,6 +353,8 @@ void nbd_client_close(BlockDriverState *bs)
 
     nbd_send_request(client->ioc, &request);
 
+    client->quit = true;
+
     nbd_teardown_connection(bs);
 }
 
-- 
2.11.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]