qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 2/6] change vectored block I/O API to plain iovecs


From: Christoph Hellwig
Subject: [Qemu-devel] [PATCH 2/6] change vectored block I/O API to plain iovecs
Date: Sat, 14 Mar 2009 20:28:28 +0100
User-agent: Mutt/1.3.28i


QEMUIOVector is a useful helper for the dma-helper.c internals but for a generic
block API it's more of hindrance.  Some top-level consumers like virtio-blk
already have the plain iovec and segment number at hand and can pass it down
directly, and for those that just have a single element and need to fake up
a vector the plain iovec also is a lot easier.

Last but not leas we want to push down vectored I/O to the lowest level,
and if posix-aio-compat.c wants to stay somewhat true to it's goal of
beeing like an ehanced posix AIO API it should stick to posix types.


Signed-off-by: Christoph Hellwig <address@hidden>

Index: qemu/block.c
===================================================================
--- qemu.orig/block.c   2009-03-14 14:01:44.000000000 +0100
+++ qemu/block.c        2009-03-14 14:32:55.000000000 +0100
@@ -1253,7 +1253,9 @@ char *bdrv_snapshot_dump(char *buf, int 
 /* async I/Os */
 
 typedef struct VectorTranslationState {
-    QEMUIOVector *iov;
+    struct iovec *iov;
+    int nr_iov;
+    int size;
     uint8_t *bounce;
     int is_write;
     BlockDriverAIOCB *aiocb;
@@ -1265,7 +1267,7 @@ static void bdrv_aio_rw_vector_cb(void *
     VectorTranslationState *s = opaque;
 
     if (!s->is_write) {
-        qemu_iovec_from_buffer(s->iov, s->bounce, s->iov->size);
+        iovec_from_buffer(s->iov, s->nr_iov, s->bounce, s->size);
     }
     qemu_vfree(s->bounce);
     s->this_aiocb->cb(s->this_aiocb->opaque, ret);
@@ -1274,7 +1276,8 @@ static void bdrv_aio_rw_vector_cb(void *
 
 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
                                             int64_t sector_num,
-                                            QEMUIOVector *iov,
+                                            struct iovec *iov,
+                                           int nr_iov,
                                             int nb_sectors,
                                             BlockDriverCompletionFunc *cb,
                                             void *opaque,
@@ -1286,10 +1289,12 @@ static BlockDriverAIOCB *bdrv_aio_rw_vec
 
     s->this_aiocb = aiocb;
     s->iov = iov;
-    s->bounce = qemu_memalign(512, nb_sectors * 512);
+    s->nr_iov = nr_iov;
+    s->size = nb_sectors * 512;
+    s->bounce = qemu_memalign(512, s->size);
     s->is_write = is_write;
     if (is_write) {
-        qemu_iovec_to_buffer(s->iov, s->bounce);
+        iovec_to_buffer(s->iov, s->nr_iov, s->bounce);
         s->aiocb = bdrv_aio_write(bs, sector_num, s->bounce, nb_sectors,
                                   bdrv_aio_rw_vector_cb, s);
     } else {
@@ -1300,24 +1305,24 @@ static BlockDriverAIOCB *bdrv_aio_rw_vec
 }
 
 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
-                                 QEMUIOVector *iov, int nb_sectors,
+                                 struct iovec *iov, int nr_iov, int nb_sectors,
                                  BlockDriverCompletionFunc *cb, void *opaque)
 {
     if (bdrv_check_request(bs, sector_num, nb_sectors))
         return NULL;
 
-    return bdrv_aio_rw_vector(bs, sector_num, iov, nb_sectors,
+    return bdrv_aio_rw_vector(bs, sector_num, iov, nr_iov, nb_sectors,
                               cb, opaque, 0);
 }
 
 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
-                                  QEMUIOVector *iov, int nb_sectors,
+                                  struct iovec *iov, int nr_iov, int 
nb_sectors,
                                   BlockDriverCompletionFunc *cb, void *opaque)
 {
     if (bdrv_check_request(bs, sector_num, nb_sectors))
         return NULL;
 
-    return bdrv_aio_rw_vector(bs, sector_num, iov, nb_sectors,
+    return bdrv_aio_rw_vector(bs, sector_num, iov, nr_iov, nb_sectors,
                               cb, opaque, 1);
 }
 
Index: qemu/block.h
===================================================================
--- qemu.orig/block.h   2009-03-14 14:00:54.000000000 +0100
+++ qemu/block.h        2009-03-14 14:32:55.000000000 +0100
@@ -87,10 +87,10 @@ typedef struct BlockDriverAIOCB BlockDri
 typedef void BlockDriverCompletionFunc(void *opaque, int ret);
 
 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
-                                 QEMUIOVector *iov, int nb_sectors,
+                                 struct iovec *iov, int nr_iov, int nb_sectors,
                                  BlockDriverCompletionFunc *cb, void *opaque);
 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
-                                  QEMUIOVector *iov, int nb_sectors,
+                                  struct iovec *iov, int nr_iov, int 
nb_sectors,
                                   BlockDriverCompletionFunc *cb, void *opaque);
 
 BlockDriverAIOCB *bdrv_aio_read(BlockDriverState *bs, int64_t sector_num,
Index: qemu/cutils.c
===================================================================
--- qemu.orig/cutils.c  2009-03-14 14:00:54.000000000 +0100
+++ qemu/cutils.c       2009-03-14 14:32:55.000000000 +0100
@@ -135,28 +135,29 @@ void qemu_iovec_reset(QEMUIOVector *qiov
     qiov->size = 0;
 }
 
-void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf)
+void iovec_to_buffer(struct iovec *iov, int nr_iov, void *buf)
 {
     uint8_t *p = (uint8_t *)buf;
     int i;
 
-    for (i = 0; i < qiov->niov; ++i) {
-        memcpy(p, qiov->iov[i].iov_base, qiov->iov[i].iov_len);
-        p += qiov->iov[i].iov_len;
+    for (i = 0; i < nr_iov; ++i) {
+        memcpy(p, iov[i].iov_base, iov[i].iov_len);
+        p += iov[i].iov_len;
     }
 }
 
-void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count)
+void iovec_from_buffer(struct iovec *iov, int nr_iov,
+        const void *buf, size_t count)
 {
     const uint8_t *p = (const uint8_t *)buf;
     size_t copy;
     int i;
 
-    for (i = 0; i < qiov->niov && count; ++i) {
+    for (i = 0; i < nr_iov && count; ++i) {
         copy = count;
-        if (copy > qiov->iov[i].iov_len)
-            copy = qiov->iov[i].iov_len;
-        memcpy(qiov->iov[i].iov_base, p, copy);
+        if (copy > iov[i].iov_len)
+            copy = iov[i].iov_len;
+        memcpy(iov[i].iov_base, p, copy);
         p     += copy;
         count -= copy;
     }
Index: qemu/dma-helpers.c
===================================================================
--- qemu.orig/dma-helpers.c     2009-03-14 14:00:54.000000000 +0100
+++ qemu/dma-helpers.c  2009-03-14 14:32:55.000000000 +0100
@@ -110,10 +110,10 @@ static void dma_bdrv_cb(void *opaque, in
     }
 
     if (dbs->is_write) {
-        bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
+        bdrv_aio_writev(dbs->bs, dbs->sector_num, dbs->iov.iov, dbs->iov.niov,
                         dbs->iov.size / 512, dma_bdrv_cb, dbs);
     } else {
-        bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
+        bdrv_aio_readv(dbs->bs, dbs->sector_num, dbs->iov.iov, dbs->iov.niov,
                        dbs->iov.size / 512, dma_bdrv_cb, dbs);
     }
 }
Index: qemu/qemu-common.h
===================================================================
--- qemu.orig/qemu-common.h     2009-03-14 14:00:54.000000000 +0100
+++ qemu/qemu-common.h  2009-03-14 14:32:55.000000000 +0100
@@ -197,8 +197,10 @@ void qemu_iovec_init(QEMUIOVector *qiov,
 void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
 void qemu_iovec_destroy(QEMUIOVector *qiov);
 void qemu_iovec_reset(QEMUIOVector *qiov);
-void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf);
-void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count);
+
+void iovec_to_buffer(struct iovec *iov, int nr_iov, void *buf);
+void iovec_from_buffer(struct iovec *iov, int nr_iov,
+        const void *buf, size_t count);
 
 struct Monitor;
 typedef struct Monitor Monitor;




reply via email to

[Prev in Thread] Current Thread [Next in Thread]