qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 09/16] block: wait for all pending I/O when doing sy


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 09/16] block: wait for all pending I/O when doing synchronous requests
Date: Tue, 16 Feb 2016 18:56:21 +0100

Synchronous I/O should in general happen either in the main thread (e.g.
for bdrv_open and bdrv_create) or between bdrv_drained_begin and
bdrv_drained_end.  Therefore, the simplest way to wait for it to finish
is to wait for _all_ pending I/O to complete.

In fact, there was one case in bdrv_close where we explicitly drained
after bdrv_flush; this is now unnecessary.  And we should probably have
called bdrv_drain_all after calls to bdrv_flush_all, which is now
unnecessary too.

On the other hand, there was a case where a test was relying on doing
a synchronous read after a breakpoint had suspended an asynchronous read.
This is easily fixed.

This decouples synchronous I/O from aio_poll.  When the request used
not to be tracked as part of bdrv_drain (e.g. bdrv_co_get_block_status)
we need to update the in_flight count.

Signed-off-by: Paolo Bonzini <address@hidden>
---
 block.c                    |  1 -
 block/io.c                 | 39 ++++++++++++---------------------------
 block/qed-table.c          | 16 ++++------------
 block/qed.c                |  4 +++-
 tests/qemu-iotests/060     |  8 ++++++--
 tests/qemu-iotests/060.out |  4 +++-
 6 files changed, 28 insertions(+), 44 deletions(-)

diff --git a/block.c b/block.c
index b4f328f..fb02d7f 100644
--- a/block.c
+++ b/block.c
@@ -2152,7 +2152,6 @@ static void bdrv_close(BlockDriverState *bs)
 
     bdrv_drained_begin(bs); /* complete I/O */
     bdrv_flush(bs);
-    bdrv_drain(bs); /* in case flush left pending I/O */
 
     bdrv_release_named_dirty_bitmaps(bs);
     assert(QLIST_EMPTY(&bs->dirty_bitmaps));
diff --git a/block/io.c b/block/io.c
index e0c9215..04b52c8 100644
--- a/block/io.c
+++ b/block/io.c
@@ -593,13 +593,9 @@ static int bdrv_prwv_co(BlockDriverState *bs, int64_t 
offset,
         /* Fast-path if already in coroutine context */
         bdrv_rw_co_entry(&rwco);
     } else {
-        AioContext *aio_context = bdrv_get_aio_context(bs);
-
         co = qemu_coroutine_create(bdrv_rw_co_entry);
         qemu_coroutine_enter(co, &rwco);
-        while (rwco.ret == NOT_DONE) {
-            aio_poll(aio_context, true);
-        }
+        bdrv_drain(bs);
     }
 
     bdrv_no_throttling_end(bs);
@@ -1545,17 +1541,19 @@ static int64_t coroutine_fn 
bdrv_co_get_block_status(BlockDriverState *bs,
     }
 
     *file = NULL;
+    bdrv_inc_in_flight(bs);
     ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
                                             file);
     if (ret < 0) {
         *pnum = 0;
-        return ret;
+        goto out;
     }
 
     if (ret & BDRV_BLOCK_RAW) {
         assert(ret & BDRV_BLOCK_OFFSET_VALID);
-        return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
-                                     *pnum, pnum, file);
+        ret = bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
+                                    *pnum, pnum, file);
+        goto out;
     }
 
     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
@@ -1597,6 +1595,8 @@ static int64_t coroutine_fn 
bdrv_co_get_block_status(BlockDriverState *bs,
         }
     }
 
+out:
+    bdrv_dec_in_flight(bs);
     return ret;
 }
 
@@ -1662,13 +1662,9 @@ int64_t bdrv_get_block_status_above(BlockDriverState *bs,
         /* Fast-path if already in coroutine context */
         bdrv_get_block_status_above_co_entry(&data);
     } else {
-        AioContext *aio_context = bdrv_get_aio_context(bs);
-
         co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
         qemu_coroutine_enter(co, &data);
-        while (!data.done) {
-            aio_poll(aio_context, true);
-        }
+        bdrv_drain(bs);
     }
     return data.ret;
 }
@@ -2469,13 +2465,9 @@ int bdrv_flush(BlockDriverState *bs)
         /* Fast-path if already in coroutine context */
         bdrv_flush_co_entry(&rwco);
     } else {
-        AioContext *aio_context = bdrv_get_aio_context(bs);
-
         co = qemu_coroutine_create(bdrv_flush_co_entry);
         qemu_coroutine_enter(co, &rwco);
-        while (rwco.ret == NOT_DONE) {
-            aio_poll(aio_context, true);
-        }
+        bdrv_drain(bs);
     }
 
     return rwco.ret;
@@ -2592,13 +2584,9 @@ int bdrv_discard(BlockDriverState *bs, int64_t 
sector_num, int nb_sectors)
         /* Fast-path if already in coroutine context */
         bdrv_discard_co_entry(&rwco);
     } else {
-        AioContext *aio_context = bdrv_get_aio_context(bs);
-
         co = qemu_coroutine_create(bdrv_discard_co_entry);
         qemu_coroutine_enter(co, &rwco);
-        while (rwco.ret == NOT_DONE) {
-            aio_poll(aio_context, true);
-        }
+        bdrv_drain(bs);
     }
 
     return rwco.ret;
@@ -2673,11 +2661,8 @@ int bdrv_ioctl(BlockDriverState *bs, unsigned long int 
req, void *buf)
         bdrv_co_ioctl_entry(&data);
     } else {
         Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry);
-
         qemu_coroutine_enter(co, &data);
-        while (data.ret == -EINPROGRESS) {
-            aio_poll(bdrv_get_aio_context(bs), true);
-        }
+        bdrv_drain(bs);
     }
     return data.ret;
 }
diff --git a/block/qed-table.c b/block/qed-table.c
index 802945f..3a8566f 100644
--- a/block/qed-table.c
+++ b/block/qed-table.c
@@ -173,9 +173,7 @@ int qed_read_l1_table_sync(BDRVQEDState *s)
 
     qed_read_table(s, s->header.l1_table_offset,
                    s->l1_table, qed_sync_cb, &ret);
-    while (ret == -EINPROGRESS) {
-        aio_poll(bdrv_get_aio_context(s->bs), true);
-    }
+    bdrv_drain(s->bs);
 
     return ret;
 }
@@ -194,9 +192,7 @@ int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int 
index,
     int ret = -EINPROGRESS;
 
     qed_write_l1_table(s, index, n, qed_sync_cb, &ret);
-    while (ret == -EINPROGRESS) {
-        aio_poll(bdrv_get_aio_context(s->bs), true);
-    }
+    bdrv_drain(s->bs);
 
     return ret;
 }
@@ -267,9 +263,7 @@ int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest 
*request, uint64_t offset
     int ret = -EINPROGRESS;
 
     qed_read_l2_table(s, request, offset, qed_sync_cb, &ret);
-    while (ret == -EINPROGRESS) {
-        aio_poll(bdrv_get_aio_context(s->bs), true);
-    }
+    bdrv_drain(s->bs);
 
     return ret;
 }
@@ -289,9 +283,7 @@ int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest 
*request,
     int ret = -EINPROGRESS;
 
     qed_write_l2_table(s, request, index, n, flush, qed_sync_cb, &ret);
-    while (ret == -EINPROGRESS) {
-        aio_poll(bdrv_get_aio_context(s->bs), true);
-    }
+    bdrv_drain(s->bs);
 
     return ret;
 }
diff --git a/block/qed.c b/block/qed.c
index ebba220..e90792f 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -352,7 +352,9 @@ static void qed_start_need_check_timer(BDRVQEDState *s)
 static void qed_cancel_need_check_timer(BDRVQEDState *s)
 {
     trace_qed_cancel_need_check_timer(s);
-    timer_del(s->need_check_timer);
+    if (s->need_check_timer) {
+        timer_del(s->need_check_timer);
+    }
 }
 
 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
diff --git a/tests/qemu-iotests/060 b/tests/qemu-iotests/060
index c81319c..dffe35a 100755
--- a/tests/qemu-iotests/060
+++ b/tests/qemu-iotests/060
@@ -164,8 +164,12 @@ echo "open -o file.driver=blkdebug $TEST_IMG
 break cow_read 0
 aio_write 0k 1k
 wait_break 0
-write 64k 64k
-resume 0" | $QEMU_IO | _filter_qemu_io
+break pwritev_done 1
+aio_write 64k 64k
+wait_break 1
+resume 1
+resume 0
+aio_flush" | $QEMU_IO | _filter_qemu_io
 
 echo
 echo "=== Testing unallocated image header ==="
diff --git a/tests/qemu-iotests/060.out b/tests/qemu-iotests/060.out
index 5d40206..a0a9a11 100644
--- a/tests/qemu-iotests/060.out
+++ b/tests/qemu-iotests/060.out
@@ -105,7 +105,9 @@ discard 65536/65536 bytes at offset 0
 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
 qcow2: Marking image as corrupt: Preventing invalid write on metadata 
(overlaps with active L2 table); further corruption events will be suppressed
 blkdebug: Suspended request '0'
-write failed: Input/output error
+blkdebug: Suspended request '1'
+blkdebug: Resuming request '1'
+aio_write failed: Input/output error
 blkdebug: Resuming request '0'
 aio_write failed: No medium found
 
-- 
2.5.0





reply via email to

[Prev in Thread] Current Thread [Next in Thread]