qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC PATCH v5 24/24] replay: don't drain/flush bdrv queue w


From: Pavel Dovgalyuk
Subject: [Qemu-devel] [RFC PATCH v5 24/24] replay: don't drain/flush bdrv queue while RR is working
Date: Tue, 23 Jan 2018 11:55:33 +0300
User-agent: StGit/0.17.1-dirty

In record/replay mode bdrv queue is controlled by replay mechanism.
It does not allow stopping the vm, saving or loading the snapshots
when bdrv queue is not empty. Therefore draining and flushing is unnecessary
(and may cause deadlocks in replay mode).
This patch disables bdrv_drain_all and bdrv_flush_all in replay mode.

Signed-off-by: Pavel Dovgalyuk <address@hidden>
---
 block/io.c |   22 ++++++++++++++++++++++
 cpus.c     |    2 --
 2 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/block/io.c b/block/io.c
index 7ea4023..e773e68 100644
--- a/block/io.c
+++ b/block/io.c
@@ -31,6 +31,7 @@
 #include "qemu/cutils.h"
 #include "qapi/error.h"
 #include "qemu/error-report.h"
+#include "sysemu/replay.h"
 
 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress 
*/
 
@@ -407,6 +408,13 @@ void bdrv_drain_all_begin(void)
     BdrvNextIterator it;
     GSList *aio_ctxs = NULL, *ctx;
 
+    /* bdrv queue is managed by record/replay,
+       waiting for finishing the I/O requests may
+       be infinite */
+    if (replay_events_enabled()) {
+        return;
+    }
+
     /* BDRV_POLL_WHILE() for a node can only be called from its own I/O thread
      * or the main loop AioContext. We potentially use BDRV_POLL_WHILE() on
      * nodes in several different AioContexts, so make sure we're in the main
@@ -458,6 +466,13 @@ void bdrv_drain_all_end(void)
     BlockDriverState *bs;
     BdrvNextIterator it;
 
+    /* bdrv queue is managed by record/replay,
+       waiting for finishing the I/O requests may
+       be endless */
+    if (replay_events_enabled()) {
+        return;
+    }
+
     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
         AioContext *aio_context = bdrv_get_aio_context(bs);
 
@@ -1839,6 +1854,13 @@ int bdrv_flush_all(void)
     BlockDriverState *bs = NULL;
     int result = 0;
 
+    /* bdrv queue is managed by record/replay,
+       creating new flush request for stopping
+       the VM may break the determinism */
+    if (replay_events_enabled()) {
+        return result;
+    }
+
     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
         AioContext *aio_context = bdrv_get_aio_context(bs);
         int ret;
diff --git a/cpus.c b/cpus.c
index a1f8808..e84f041 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1005,7 +1005,6 @@ static int do_vm_stop(RunState state)
     }
 
     bdrv_drain_all();
-    replay_disable_events();
     ret = bdrv_flush_all();
 
     return ret;
@@ -1988,7 +1987,6 @@ int vm_prepare_start(void)
         qapi_event_send_stop(&error_abort);
         res = -1;
     } else {
-        replay_enable_events();
         cpu_enable_ticks();
         runstate_set(RUN_STATE_RUNNING);
         vm_state_notify(1, RUN_STATE_RUNNING);




reply via email to

[Prev in Thread] Current Thread [Next in Thread]