qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH] ram_save_live: add a no-progress convergence rule


From: Uri Lublin
Subject: [Qemu-devel] [PATCH] ram_save_live: add a no-progress convergence rule
Date: Tue, 19 May 2009 14:09:07 +0300

Currently the live-part (section QEMU_VM_SECTION_PART) of
ram_save_live has only one convergence rule, which is
when the number of dirty pages is smaller than a threshold.

When the guest uses more memory pages than the threshold (e.g.
playing a movie, copying files, sending/receiving many packets),
it may take a very long time before convergence according to
this rule.

This patch (re)introduces a no-progress convergence rule, which limit
the number of times the migration process is not progressing
(and even regressing), with regards to the number of dirty
pages. No-progress means that the number of pages that got
dirty is larger than the number of pages that got transferred
to the destination during the last transfer.
This rule applies only after the first round (in which most
memory pages are being transferred).

Also this patch enlarges the number-dirty-pages threshold (of
the first convergence rule) to 50 pages (was 10)

Signed-off-by: Uri Lublin <address@hidden>
---
 vl.c |   25 +++++++++++++++++++++++--
 1 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/vl.c b/vl.c
index 40b1d8b..5f145a0 100644
--- a/vl.c
+++ b/vl.c
@@ -3181,6 +3181,11 @@ static void ram_decompress_close(RamDecompressState *s)
 #define RAM_SAVE_FLAG_PAGE     0x08
 #define RAM_SAVE_FLAG_EOS      0x10
 
+static ram_addr_t ram_save_threshold = 50;
+static unsigned ram_save_no_progress_max = 10;
+static unsigned ram_save_no_progress = 0;
+static ram_addr_t ram_save_rounds = 0;
+
 static int is_dup_page(uint8_t *page, uint8_t ch)
 {
     uint32_t val = ch << 24 | ch << 16 | ch << 8 | ch;
@@ -3225,12 +3230,13 @@ static int ram_save_block(QEMUFile *f)
         }
         addr += TARGET_PAGE_SIZE;
         current_addr = (saved_addr + addr) % last_ram_offset;
+        if (current_addr == 0)
+            ram_save_rounds++;
     }
 
     return found;
 }
 
-static ram_addr_t ram_save_threshold = 10;
 
 static ram_addr_t ram_save_remaining(void)
 {
@@ -3245,11 +3251,26 @@ static ram_addr_t ram_save_remaining(void)
     return count;
 }
 
+static int ram_save_is_converged(void)
+{
+    const ram_addr_t count = ram_save_remaining();
+    static ram_addr_t last_count = 0;
+
+    if ((count > last_count) && (ram_save_rounds > 0))
+        ram_save_no_progress++;
+    last_count = count;
+
+    return ((count < ram_save_threshold) ||
+            (ram_save_no_progress > ram_save_no_progress_max));
+}
+
 static int ram_save_live(QEMUFile *f, int stage, void *opaque)
 {
     ram_addr_t addr;
 
     if (stage == 1) {
+        ram_save_rounds = 0;
+        ram_save_no_progress = 0;
         /* Make sure all dirty bits are set */
         for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
             if (!cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG))
@@ -3281,7 +3302,7 @@ static int ram_save_live(QEMUFile *f, int stage, void 
*opaque)
 
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
 
-    return (stage == 2) && (ram_save_remaining() < ram_save_threshold);
+    return (stage == 2) && ram_save_is_converged();
 }
 
 static int ram_load_dead(QEMUFile *f, void *opaque)
-- 
1.6.0.6





reply via email to

[Prev in Thread] Current Thread [Next in Thread]