qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v6 03/16] block: make copy-on-read a per-request fla


From: Stefan Hajnoczi
Subject: [Qemu-devel] [PATCH v6 03/16] block: make copy-on-read a per-request flag
Date: Wed, 18 Jan 2012 14:40:42 +0000

Previously copy-on-read could only be enabled for all requests to a
block device.  This means requests coming from the guest as well as
QEMU's internal requests would perform copy-on-read when enabled.

For image streaming we want to support finer-grained behavior than just
populating the image file from its backing image.  Image streaming
supports partial streaming where a common backing image is preserved.
In this case guest requests should not perform copy-on-read because they
would indiscriminately copy data which should be left in a backing image
from the backing chain.

Introduce a per-request flag for copy-on-read so that a block device can
process both regular and copy-on-read requests.  Overlapping reads and
writes still need to be serialized for correctness when copy-on-read is
happening, so add an in-flight reference count to track this.

Signed-off-by: Stefan Hajnoczi <address@hidden>
---
 block.c      |   49 ++++++++++++++++++++++++++++++++++++++-----------
 block.h      |    2 ++
 block_int.h  |    3 +++
 trace-events |    3 ++-
 4 files changed, 45 insertions(+), 12 deletions(-)

diff --git a/block.c b/block.c
index 43f6484..edfab49 100644
--- a/block.c
+++ b/block.c
@@ -48,6 +48,10 @@
 
 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress 
*/
 
+typedef enum {
+    BDRV_REQ_COPY_ON_READ = 0x1,
+} BdrvRequestFlags;
+
 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
@@ -62,7 +66,8 @@ static int coroutine_fn bdrv_co_writev_em(BlockDriverState 
*bs,
                                          int64_t sector_num, int nb_sectors,
                                          QEMUIOVector *iov);
 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
-    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+    BdrvRequestFlags flags);
 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
@@ -1292,7 +1297,7 @@ static void coroutine_fn bdrv_rw_co_entry(void *opaque)
 
     if (!rwco->is_write) {
         rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
-                                     rwco->nb_sectors, rwco->qiov);
+                                     rwco->nb_sectors, rwco->qiov, 0);
     } else {
         rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
                                       rwco->nb_sectors, rwco->qiov);
@@ -1500,7 +1505,7 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
     return 0;
 }
 
-static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
+static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
 {
     /* Perform I/O through a temporary buffer so that users who scribble over
@@ -1523,8 +1528,8 @@ static int coroutine_fn 
bdrv_co_copy_on_readv(BlockDriverState *bs,
     round_to_clusters(bs, sector_num, nb_sectors,
                       &cluster_sector_num, &cluster_nb_sectors);
 
-    trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors,
-                                cluster_sector_num, cluster_nb_sectors);
+    trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
+                                   cluster_sector_num, cluster_nb_sectors);
 
     iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
     iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
@@ -1559,7 +1564,8 @@ err:
  * Handle a read request in coroutine context
  */
 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
-    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
+    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+    BdrvRequestFlags flags)
 {
     BlockDriver *drv = bs->drv;
     BdrvTrackedRequest req;
@@ -1578,12 +1584,19 @@ static int coroutine_fn 
bdrv_co_do_readv(BlockDriverState *bs,
     }
 
     if (bs->copy_on_read) {
+        flags |= BDRV_REQ_COPY_ON_READ;
+    }
+    if (flags & BDRV_REQ_COPY_ON_READ) {
+        bs->copy_on_read_in_flight++;
+    }
+
+    if (bs->copy_on_read_in_flight) {
         wait_for_overlapping_requests(bs, sector_num, nb_sectors);
     }
 
     tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
 
-    if (bs->copy_on_read) {
+    if (flags & BDRV_REQ_COPY_ON_READ) {
         int pnum;
 
         ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
@@ -1592,7 +1605,7 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState 
*bs,
         }
 
         if (!ret || pnum != nb_sectors) {
-            ret = bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, qiov);
+            ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
             goto out;
         }
     }
@@ -1601,6 +1614,11 @@ static int coroutine_fn 
bdrv_co_do_readv(BlockDriverState *bs,
 
 out:
     tracked_request_end(&req);
+
+    if (flags & BDRV_REQ_COPY_ON_READ) {
+        bs->copy_on_read_in_flight--;
+    }
+
     return ret;
 }
 
@@ -1609,7 +1627,16 @@ int coroutine_fn bdrv_co_readv(BlockDriverState *bs, 
int64_t sector_num,
 {
     trace_bdrv_co_readv(bs, sector_num, nb_sectors);
 
-    return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov);
+    return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
+}
+
+int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
+    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
+{
+    trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
+
+    return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
+                            BDRV_REQ_COPY_ON_READ);
 }
 
 /*
@@ -1637,7 +1664,7 @@ static int coroutine_fn 
bdrv_co_do_writev(BlockDriverState *bs,
         bdrv_io_limits_intercept(bs, true, nb_sectors);
     }
 
-    if (bs->copy_on_read) {
+    if (bs->copy_on_read_in_flight) {
         wait_for_overlapping_requests(bs, sector_num, nb_sectors);
     }
 
@@ -3144,7 +3171,7 @@ static void coroutine_fn bdrv_co_do_rw(void *opaque)
 
     if (!acb->is_write) {
         acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
-            acb->req.nb_sectors, acb->req.qiov);
+            acb->req.nb_sectors, acb->req.qiov, 0);
     } else {
         acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
             acb->req.nb_sectors, acb->req.qiov);
diff --git a/block.h b/block.h
index 3bd4398..a3b0b80 100644
--- a/block.h
+++ b/block.h
@@ -142,6 +142,8 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
     const void *buf, int count);
 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
     int nb_sectors, QEMUIOVector *qiov);
+int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
+    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
     int nb_sectors, QEMUIOVector *qiov);
 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
diff --git a/block_int.h b/block_int.h
index 311bd2a..07d67ed 100644
--- a/block_int.h
+++ b/block_int.h
@@ -218,6 +218,9 @@ struct BlockDriverState {
     BlockDriverState *backing_hd;
     BlockDriverState *file;
 
+    /* number of in-flight copy-on-read requests */
+    unsigned int copy_on_read_in_flight;
+
     /* async read/write emulation */
 
     void *sync_aiocb;
diff --git a/trace-events b/trace-events
index c18435b..656127c 100644
--- a/trace-events
+++ b/trace-events
@@ -65,9 +65,10 @@ bdrv_aio_readv(void *bs, int64_t sector_num, int nb_sectors, 
void *opaque) "bs %
 bdrv_aio_writev(void *bs, int64_t sector_num, int nb_sectors, void *opaque) 
"bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
 bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
 bdrv_co_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num 
%"PRId64" nb_sectors %d"
+bdrv_co_copy_on_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p 
sector_num %"PRId64" nb_sectors %d"
 bdrv_co_writev(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num 
%"PRId64" nb_sectors %d"
 bdrv_co_io_em(void *bs, int64_t sector_num, int nb_sectors, int is_write, void 
*acb) "bs %p sector_num %"PRId64" nb_sectors %d is_write %d acb %p"
-bdrv_co_copy_on_readv(void *bs, int64_t sector_num, int nb_sectors, int64_t 
cluster_sector_num, int cluster_nb_sectors) "bs %p sector_num %"PRId64" 
nb_sectors %d cluster_sector_num %"PRId64" cluster_nb_sectors %d"
+bdrv_co_do_copy_on_readv(void *bs, int64_t sector_num, int nb_sectors, int64_t 
cluster_sector_num, int cluster_nb_sectors) "bs %p sector_num %"PRId64" 
nb_sectors %d cluster_sector_num %"PRId64" cluster_nb_sectors %d"
 
 # hw/virtio-blk.c
 virtio_blk_req_complete(void *req, int status) "req %p status %d"
-- 
1.7.8.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]