qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH COLO-Frame v8 15/34] COLO RAM: Flush cached RAM into


From: zhanghailiang
Subject: [Qemu-devel] [PATCH COLO-Frame v8 15/34] COLO RAM: Flush cached RAM into SVM's memory
Date: Wed, 29 Jul 2015 16:45:25 +0800

During the time of VM's running, PVM/SVM may dirty some pages, we will transfer
PVM's dirty pages to SVM and store them into SVM's RAM cache at next checkpoint
time. So, the content of SVM's RAM cache will always be some with PVM's memory
after checkpoint.

Instead of flushing all content of SVM's RAM cache into SVM's MEMORY,
we do this in a more efficient way:
Only flush any page that dirtied by PVM or SVM since last checkpoint.
In this way, we ensure SVM's memory same with PVM's.

Besides, we must ensure flush RAM cache before load device state.

Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
Signed-off-by: Yang Hongyang <address@hidden>
Signed-off-by: Gonglei <address@hidden>
---
 include/migration/colo.h |  1 +
 migration/colo.c         |  2 --
 migration/ram.c          | 87 ++++++++++++++++++++++++++++++++++++++++++++++++
 trace-events             |  1 +
 4 files changed, 89 insertions(+), 2 deletions(-)

diff --git a/include/migration/colo.h b/include/migration/colo.h
index 3b1eff9..79b5381 100644
--- a/include/migration/colo.h
+++ b/include/migration/colo.h
@@ -32,5 +32,6 @@ void *colo_process_incoming_checkpoints(void *opaque);
 bool migration_incoming_in_colo_state(void);
 /* ram cache */
 int create_and_init_ram_cache(void);
+void colo_flush_ram_cache(void);
 void release_ram_cache(void);
 #endif
diff --git a/migration/colo.c b/migration/colo.c
index e4e1671..bcbe748 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -429,8 +429,6 @@ void *colo_process_incoming_checkpoints(void *opaque)
         }
         qemu_mutex_unlock_iothread();
 
-        /* TODO: flush vm state */
-
         ret = colo_ctl_put(ctl, COLO_CHECKPOINT_LOADED);
         if (ret < 0) {
             goto out;
diff --git a/migration/ram.c b/migration/ram.c
index a0f6348..1111893 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1522,6 +1522,7 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
     int flags = 0, ret = 0;
     static uint64_t seq_iter;
     int len = 0;
+    bool need_flush = false;
 
     seq_iter++;
 
@@ -1590,6 +1591,8 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
                 ret = -EINVAL;
                 break;
             }
+
+            need_flush = true;
             ch = qemu_get_byte(f);
             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
             break;
@@ -1600,6 +1603,8 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
                 ret = -EINVAL;
                 break;
             }
+
+            need_flush = true;
             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
             break;
         case RAM_SAVE_FLAG_COMPRESS_PAGE:
@@ -1632,6 +1637,7 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
                 ret = -EINVAL;
                 break;
             }
+            need_flush = true;
             break;
         case RAM_SAVE_FLAG_EOS:
             /* normal exit */
@@ -1651,6 +1657,11 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
     }
 
     rcu_read_unlock();
+
+    if (!ret  && ram_cache_enable && need_flush) {
+        DPRINTF("Flush ram_cache\n");
+        colo_flush_ram_cache();
+    }
     DPRINTF("Completed load of VM with exit code %d seq iteration "
             "%" PRIu64 "\n", ret, seq_iter);
     return ret;
@@ -1736,6 +1747,82 @@ static void 
*memory_region_get_ram_cache_ptr(MemoryRegion *mr, RAMBlock *block)
     return block->host_cache + (addr - block->offset);
 }
 
+/* fix me: should this helper function be merged with
+ * migration_bitmap_find_and_reset_dirty ?
+ */
+static inline
+ram_addr_t host_bitmap_find_and_reset_dirty(MemoryRegion *mr,
+                                            ram_addr_t start)
+{
+    unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
+    unsigned long nr = base + (start >> TARGET_PAGE_BITS);
+    uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
+    unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
+
+    unsigned long next;
+
+    next = find_next_bit(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION],
+                         size, nr);
+    if (next < size) {
+        clear_bit(next, ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+    }
+    return (next - base) << TARGET_PAGE_BITS;
+}
+
+/*
+ * Flush content of RAM cache into SVM's memory.
+ * Only flush the pages that be dirtied by PVM or SVM or both.
+ * TODO: option walk optimization:
+ *   1. bitmap = host_bitmap | migration_bitmap
+ *   2. walk bitmap and find offset
+ *   3. memcpy
+ */
+void colo_flush_ram_cache(void)
+{
+    RAMBlock *block = NULL;
+    void *dst_host;
+    void *src_host;
+    ram_addr_t cache_offset = 0, host_offset = 0, offset = 0;
+    int64_t host_dirty = 0, remote_dirty = 0, both_dirty = 0;
+
+    address_space_sync_dirty_bitmap(&address_space_memory);
+    rcu_read_lock();
+    block = QLIST_FIRST_RCU(&ram_list.blocks);
+    while (block) {
+        if (offset == cache_offset) {
+            cache_offset =
+                migration_bitmap_find_and_reset_dirty(block->mr, cache_offset);
+        }
+        if (offset == host_offset) {
+            host_offset =
+                host_bitmap_find_and_reset_dirty(block->mr, host_offset);
+        }
+
+        if (cache_offset >= block->used_length &&
+            host_offset >= block->used_length) {
+            offset = host_offset = cache_offset = 0;
+            block = QLIST_NEXT_RCU(block, next);
+        } else {
+            if (host_offset <= cache_offset) {
+                offset = host_offset;
+                host_dirty++;
+                both_dirty += (host_offset == cache_offset);
+            } else {
+                offset = cache_offset;
+                remote_dirty++;
+            }
+
+            dst_host = memory_region_get_ram_ptr(block->mr) + offset;
+            src_host = memory_region_get_ram_cache_ptr(block->mr, block)
+                       + offset;
+            memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+        }
+    }
+    rcu_read_unlock();
+    assert(migration_dirty_pages == 0);
+    trace_colo_flush_ram_cache(host_dirty, remote_dirty, both_dirty);
+}
+
 static SaveVMHandlers savevm_ram_handlers = {
     .save_live_setup = ram_save_setup,
     .save_live_iterate = ram_save_iterate,
diff --git a/trace-events b/trace-events
index 4487633..86d4c49 100644
--- a/trace-events
+++ b/trace-events
@@ -1216,6 +1216,7 @@ qemu_file_fclose(void) ""
 migration_bitmap_sync_start(void) ""
 migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64""
 migration_throttle(void) ""
+colo_flush_ram_cache(int64_t host_dirty, int64_t remote_dirty, int64_t 
both_dirty) "secondary vm dirty pages:%" PRId64" primary vm dirty pages: 
%"PRId64" both dirty:%"PRId64
 
 # hw/display/qxl.c
 disable qxl_interface_set_mm_time(int qid, uint32_t mm_time) "%d %d"
-- 
1.8.3.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]