qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 4/5] Separate migration dirty bitmap


From: Umesh Deshpande
Subject: [Qemu-devel] [PATCH 4/5] Separate migration dirty bitmap
Date: Sat, 27 Aug 2011 14:09:47 -0400

This patch creates a migration bitmap, which is periodically kept in sync with
the qemu bitmap. A separate copy of the dirty bitmap for the migration avoids
concurrent access to the qemu bitmap from the iothread and the migration thread.

Signed-off-by: Umesh Deshpande <address@hidden>
---
 arch_init.c |   17 ++++++++---------
 cpu-all.h   |   37 +++++++++++++++++++++++++++++++++++++
 exec.c      |   57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 102 insertions(+), 9 deletions(-)

diff --git a/arch_init.c b/arch_init.c
index 9d02270..b5b852b 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -124,13 +124,13 @@ static int ram_save_block(QEMUFile *f)
     current_addr = block->offset + offset;
 
     do {
-        if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) 
{
+        if (migration_bitmap_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
             uint8_t *p;
             int cont = (block == last_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
 
-            cpu_physical_memory_reset_dirty(current_addr,
-                                            current_addr + TARGET_PAGE_SIZE,
-                                            MIGRATION_DIRTY_FLAG);
+            migration_bitmap_reset_dirty(current_addr,
+                                         current_addr + TARGET_PAGE_SIZE,
+                                         MIGRATION_DIRTY_FLAG);
 
             p = block->host + offset;
 
@@ -187,7 +187,7 @@ static ram_addr_t ram_save_remaining(void)
         ram_addr_t addr;
         for (addr = block->offset; addr < block->offset + block->length;
              addr += TARGET_PAGE_SIZE) {
-            if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
+            if (migration_bitmap_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
                 count++;
             }
         }
@@ -267,6 +267,8 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, 
void *opaque)
         return 0;
     }
 
+    sync_migration_bitmap(0, TARGET_PHYS_ADDR_MAX);
+
     if (stage == 1) {
         RAMBlock *block;
         bytes_transferred = 0;
@@ -279,10 +281,7 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, 
void *opaque)
         QLIST_FOREACH(block, &ram_list.blocks, next) {
             for (addr = block->offset; addr < block->offset + block->length;
                  addr += TARGET_PAGE_SIZE) {
-                if (!cpu_physical_memory_get_dirty(addr,
-                                                   MIGRATION_DIRTY_FLAG)) {
-                    cpu_physical_memory_set_dirty(addr);
-                }
+                migration_bitmap_set_dirty(addr);
             }
         }
 
diff --git a/cpu-all.h b/cpu-all.h
index b85483f..8181f8b 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -935,6 +935,7 @@ typedef struct RAMBlock {
 typedef struct RAMList {
     QemuMutex mutex;    /* Protects RAM block list */
     uint8_t *phys_dirty;
+    uint8_t *migration_bitmap; /* Dedicated bitmap for migration thread */
     uint32_t version;   /* To detect ram block addition/removal */
     QLIST_HEAD(ram, RAMBlock) blocks;
     QLIST_HEAD(, RAMBlock) blocks_mru;
@@ -1009,8 +1010,44 @@ static inline void 
cpu_physical_memory_mask_dirty_range(ram_addr_t start,
     }
 }
 
+
+
 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
                                      int dirty_flags);
+
+static inline int migration_bitmap_get_dirty(ram_addr_t addr,
+                                             int dirty_flags)
+{
+    return ram_list.migration_bitmap[addr >> TARGET_PAGE_BITS] & dirty_flags;
+}
+
+static inline void migration_bitmap_set_dirty(ram_addr_t addr)
+{
+    ram_list.migration_bitmap[addr >> TARGET_PAGE_BITS] = 0xff;
+}
+
+static inline void migration_bitmap_mask_dirty_range(ram_addr_t start,
+                                                     int length,
+                                                     int dirty_flags)
+{
+    int i, mask, len;
+    uint8_t *p;
+
+    len = length >> TARGET_PAGE_BITS;
+    mask = ~dirty_flags;
+    p = ram_list.migration_bitmap + (start >> TARGET_PAGE_BITS);
+    for (i = 0; i < len; i++) {
+        p[i] &= mask;
+    }
+}
+
+
+void migration_bitmap_reset_dirty(ram_addr_t start,
+                                  ram_addr_t end,
+                                  int dirty_flags);
+
+void sync_migration_bitmap(ram_addr_t start, ram_addr_t end);
+
 void cpu_tlb_update_dirty(CPUState *env);
 
 int cpu_physical_memory_set_dirty_tracking(int enable);
diff --git a/exec.c b/exec.c
index 7627483..8dfbdbc 100644
--- a/exec.c
+++ b/exec.c
@@ -2111,6 +2111,10 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, 
ram_addr_t end,
         abort();
     }
 
+    if (kvm_enabled()) {
+        return;
+    }
+
     for(env = first_cpu; env != NULL; env = env->next_cpu) {
         int mmu_idx;
         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
@@ -2119,8 +2123,54 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, 
ram_addr_t end,
                                       start1, length);
         }
     }
+
+}
+
+void migration_bitmap_reset_dirty(ram_addr_t start, ram_addr_t end,
+                                  int dirty_flags)
+{
+    unsigned long length;
+
+    start &= TARGET_PAGE_MASK;
+    end = TARGET_PAGE_ALIGN(end);
+
+    length = end - start;
+    if (length == 0) {
+        return;
+    }
+
+    migration_bitmap_mask_dirty_range(start, length, dirty_flags);
+}
+
+/* Synchronizes migration bitmap with the qemu dirty bitmap.
+ * Called by acquiring the iothread mutex */
+
+void sync_migration_bitmap(ram_addr_t start, ram_addr_t end)
+{
+    unsigned long length, len, i;
+    ram_addr_t addr;
+    start &= TARGET_PAGE_MASK;
+    end = TARGET_PAGE_ALIGN(end);
+
+    length = end - start;
+    if (length == 0) {
+        return;
+    }
+
+    len = length >> TARGET_PAGE_BITS;
+    for (i = 0; i < len; i++) {
+        addr = i << TARGET_PAGE_BITS;
+        if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
+            migration_bitmap_set_dirty(addr);
+            cpu_physical_memory_reset_dirty(addr, addr + TARGET_PAGE_SIZE,
+                                            MIGRATION_DIRTY_FLAG);
+        }
+    }
+
 }
 
+
+
 int cpu_physical_memory_set_dirty_tracking(int enable)
 {
     int ret = 0;
@@ -2999,6 +3049,13 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, 
const char *name,
     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
            0xff, size >> TARGET_PAGE_BITS);
 
+    ram_list.migration_bitmap = qemu_realloc(ram_list.phys_dirty,
+                                             last_ram_offset() >>
+                                             TARGET_PAGE_BITS);
+
+    memset(ram_list.migration_bitmap + (new_block->offset >> TARGET_PAGE_BITS),
+           0xff, size >> TARGET_PAGE_BITS);
+
     qemu_mutex_unlock_migrate_ram();
 
     if (kvm_enabled())
-- 
1.7.4.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]