qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 6/6] qemu-kvm: Use cpu_physical_memory_get_dirty_ran


From: Yoshiaki Tamura
Subject: [Qemu-devel] [PATCH 6/6] qemu-kvm: Use cpu_physical_memory_get_dirty_range() to check multiple dirty pages.
Date: Tue, 16 Mar 2010 19:53:59 +0900

Modifies ram_save_block() and ram_save_remaining() to use 
cpu_physical_memory_get_dirty_range() to check multiple dirty and non-dirty
pages at once.

Signed-off-by: Yoshiaki Tamura <address@hidden>
Signed-off-by: OHMURA Kei <address@hidden>
---
 vl.c |   55 +++++++++++++++++++++++++++++++++++--------------------
 1 files changed, 35 insertions(+), 20 deletions(-)

diff --git a/vl.c b/vl.c
index 6e35cc6..e9ad7c9 100644
--- a/vl.c
+++ b/vl.c
@@ -2779,7 +2779,8 @@ static int ram_save_block(QEMUFile *f)
     static ram_addr_t current_addr = 0;
     ram_addr_t saved_addr = current_addr;
     ram_addr_t addr = 0;
-    int found = 0;
+    ram_addr_t dirty_rams[HOST_LONG_BITS];
+    int i, found = 0;
 
     while (addr < last_ram_offset) {
         if (kvm_enabled() && current_addr == 0) {
@@ -2791,28 +2792,35 @@ static int ram_save_block(QEMUFile *f)
                 return 0;
             }
         }
-        if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) 
{
+        if ((found = cpu_physical_memory_get_dirty_range(
+                 current_addr, last_ram_offset, dirty_rams, HOST_LONG_BITS,
+                 MIGRATION_DIRTY_FLAG))) {
             uint8_t *p;
 
-            cpu_physical_memory_reset_dirty(current_addr,
-                                            current_addr + TARGET_PAGE_SIZE,
-                                            MIGRATION_DIRTY_FLAG);
+            for (i = 0; i < found; i++) {
+                ram_addr_t page_addr = dirty_rams[i];
+                cpu_physical_memory_reset_dirty(page_addr,
+                                                page_addr + TARGET_PAGE_SIZE,
+                                                MIGRATION_DIRTY_FLAG);
 
-            p = qemu_get_ram_ptr(current_addr);
+                p = qemu_get_ram_ptr(page_addr);
 
-            if (is_dup_page(p, *p)) {
-                qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_COMPRESS);
-                qemu_put_byte(f, *p);
-            } else {
-                qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_PAGE);
-                qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
+                if (is_dup_page(p, *p)) {
+                    qemu_put_be64(f, (page_addr) |
+                                  RAM_SAVE_FLAG_COMPRESS);
+                    qemu_put_byte(f, *p);
+                } else {
+                    qemu_put_be64(f, (page_addr) |
+                                  RAM_SAVE_FLAG_PAGE);
+                    qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
+                }
             }
-
-            found = 1;
+       
             break;
+        } else {
+            addr += dirty_rams[0];
+            current_addr = (saved_addr + addr) % last_ram_offset;
         }
-        addr += TARGET_PAGE_SIZE;
-        current_addr = (saved_addr + addr) % last_ram_offset;
     }
 
     return found;
@@ -2822,12 +2830,19 @@ static uint64_t bytes_transferred;
 
 static ram_addr_t ram_save_remaining(void)
 {
-    ram_addr_t addr;
+    ram_addr_t addr = 0;
     ram_addr_t count = 0;
+    ram_addr_t dirty_rams[HOST_LONG_BITS];
+    int found = 0;
 
-    for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
-        if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG))
-            count++;
+    while (addr < last_ram_offset) {
+        if ((found = cpu_physical_memory_get_dirty_range(addr, last_ram_offset,
+            dirty_rams, HOST_LONG_BITS, MIGRATION_DIRTY_FLAG))) {
+            count += found;
+            addr = dirty_rams[found - 1] + TARGET_PAGE_SIZE;
+        } else {
+            addr += dirty_rams[0];
+        }
     }
 
     return count;
-- 
1.7.0.31.g1df487





reply via email to

[Prev in Thread] Current Thread [Next in Thread]