qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v2] Consolidate reads and writes in nbd block device


From: Michael Tokarev
Subject: [Qemu-devel] [PATCH v2] Consolidate reads and writes in nbd block device into one common routine
Date: Tue, 28 Feb 2012 14:24:08 +0400

This removes quite some duplicated code.

v2 fixes a bug (uninitialized reply.error) and makes the loop more natural.

Signed-off-By: Michael Tokarev <address@hidden>
---
 block/nbd.c |   95 +++++++++++++++++++---------------------------------------
 1 files changed, 31 insertions(+), 64 deletions(-)

diff --git a/block/nbd.c b/block/nbd.c
index 161b299..a78e212 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -320,91 +320,58 @@ static int nbd_open(BlockDriverState *bs, const char* 
filename, int flags)
     return result;
 }
 
-static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
-                          int nb_sectors, QEMUIOVector *qiov,
-                          int offset)
-{
-    BDRVNBDState *s = bs->opaque;
-    struct nbd_request request;
-    struct nbd_reply reply;
-
-    request.type = NBD_CMD_READ;
-    request.from = sector_num * 512;
-    request.len = nb_sectors * 512;
-
-    nbd_coroutine_start(s, &request);
-    if (nbd_co_send_request(s, &request, NULL, 0) == -1) {
-        reply.error = errno;
-    } else {
-        nbd_co_receive_reply(s, &request, &reply, qiov->iov, offset);
-    }
-    nbd_coroutine_end(s, &request);
-    return -reply.error;
-
-}
+/* qemu-nbd has a limit of slightly less than 1M per request.  Try to
+ * remain aligned to 4K. */
+#define NBD_MAX_SECTORS 2040
 
-static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
-                           int nb_sectors, QEMUIOVector *qiov,
-                           int offset)
+static int nbd_co_rwv(BlockDriverState *bs, int64_t sector_num,
+                      int nb_sectors, QEMUIOVector *qiov, int iswrite)
 {
     BDRVNBDState *s = bs->opaque;
     struct nbd_request request;
     struct nbd_reply reply;
+    int offset = 0;
 
-    request.type = NBD_CMD_WRITE;
-    if (!bdrv_enable_write_cache(bs) && (s->nbdflags & NBD_FLAG_SEND_FUA)) {
+    request.type = iswrite ? NBD_CMD_WRITE : NBD_CMD_READ;
+    if (iswrite && !bdrv_enable_write_cache(bs) && (s->nbdflags & 
NBD_FLAG_SEND_FUA)) {
         request.type |= NBD_CMD_FLAG_FUA;
     }
+    reply.error = 0;
+
+    /* we split the request into pieces of at most NBD_MAX_SECTORS size
+     * and process them in a loop... */
+    do {
+        request.from = sector_num * 512;
+        request.len = MIN(nb_sectors, NBD_MAX_SECTORS) * 512;
+
+        nbd_coroutine_start(s, &request);
+        if (nbd_co_send_request(s, &request, iswrite ? qiov->iov : NULL, 0) == 
-1) {
+            reply.error = errno;
+        } else {
+            nbd_co_receive_reply(s, &request, &reply, iswrite ? NULL : 
qiov->iov, offset);
+        }
+        nbd_coroutine_end(s, &request);
 
-    request.from = sector_num * 512;
-    request.len = nb_sectors * 512;
+        offset += NBD_MAX_SECTORS * 512;
+        sector_num += NBD_MAX_SECTORS;
+        nb_sectors -= NBD_MAX_SECTORS;
+
+    /* ..till we hit an error or there's nothing more to process */
+    } while (reply.error == 0 && nb_sectors > 0);
 
-    nbd_coroutine_start(s, &request);
-    if (nbd_co_send_request(s, &request, qiov->iov, offset) == -1) {
-        reply.error = errno;
-    } else {
-        nbd_co_receive_reply(s, &request, &reply, NULL, 0);
-    }
-    nbd_coroutine_end(s, &request);
     return -reply.error;
 }
 
-/* qemu-nbd has a limit of slightly less than 1M per request.  Try to
- * remain aligned to 4K. */
-#define NBD_MAX_SECTORS 2040
-
 static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num,
                         int nb_sectors, QEMUIOVector *qiov)
 {
-    int offset = 0;
-    int ret;
-    while (nb_sectors > NBD_MAX_SECTORS) {
-        ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
-        if (ret < 0) {
-            return ret;
-        }
-        offset += NBD_MAX_SECTORS * 512;
-        sector_num += NBD_MAX_SECTORS;
-        nb_sectors -= NBD_MAX_SECTORS;
-    }
-    return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset);
+    return nbd_co_rwv(bs, sector_num, nb_sectors, qiov, 0);
 }
 
 static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num,
                          int nb_sectors, QEMUIOVector *qiov)
 {
-    int offset = 0;
-    int ret;
-    while (nb_sectors > NBD_MAX_SECTORS) {
-        ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
-        if (ret < 0) {
-            return ret;
-        }
-        offset += NBD_MAX_SECTORS * 512;
-        sector_num += NBD_MAX_SECTORS;
-        nb_sectors -= NBD_MAX_SECTORS;
-    }
-    return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset);
+    return nbd_co_rwv(bs, sector_num, nb_sectors, qiov, 1);
 }
 
 static int nbd_co_flush(BlockDriverState *bs)
-- 
1.7.9




reply via email to

[Prev in Thread] Current Thread [Next in Thread]