qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH COLO-Frame v10 16/38] COLO: Flush PVM's cached R


From: zhanghailiang
Subject: Re: [Qemu-devel] [PATCH COLO-Frame v10 16/38] COLO: Flush PVM's cached RAM into SVM's memory
Date: Mon, 16 Nov 2015 20:46:22 +0800
User-agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Thunderbird/38.1.0

On 2015/11/14 0:38, Dr. David Alan Gilbert wrote:
* zhanghailiang (address@hidden) wrote:
During the time of VM's running, PVM may dirty some pages, we will transfer
PVM's dirty pages to SVM and store them into SVM's RAM cache at next checkpoint
time. So, the content of SVM's RAM cache will always be some with PVM's memory
after checkpoint.

Instead of flushing all content of PVM's RAM cache into SVM's MEMORY,
we do this in a more efficient way:
Only flush any page that dirtied by PVM since last checkpoint.
In this way, we can ensure SVM's memory same with PVM's.

Besides, we must ensure flush RAM cache before load device state.

Yes, just a couple of minor comments below; mostly OK.

Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
Signed-off-by: Gonglei <address@hidden>
---
v10: trace the number of dirty pages that be received.
---
  include/migration/colo.h |  1 +
  migration/colo.c         |  2 --
  migration/ram.c          | 40 ++++++++++++++++++++++++++++++++++++++++
  trace-events             |  1 +
  4 files changed, 42 insertions(+), 2 deletions(-)

diff --git a/include/migration/colo.h b/include/migration/colo.h
index 8edd5f1..be2890b 100644
--- a/include/migration/colo.h
+++ b/include/migration/colo.h
@@ -32,4 +32,5 @@ bool migration_incoming_in_colo_state(void);
  /* ram cache */
  int colo_init_ram_cache(void);
  void colo_release_ram_cache(void);
+void colo_flush_ram_cache(void);
  #endif
diff --git a/migration/colo.c b/migration/colo.c
index 1339774..0efab21 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -381,8 +381,6 @@ void *colo_process_incoming_thread(void *opaque)
          }
          qemu_mutex_unlock_iothread();

-        /* TODO: flush vm state */
-
          ret = colo_ctl_put(mis->to_src_file, COLO_COMMAND_VMSTATE_LOADED, 0);
          if (ret < 0) {
              goto out;
diff --git a/migration/ram.c b/migration/ram.c
index 70879bd..d7e0e37 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1601,6 +1601,7 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
      int flags = 0, ret = 0;
      static uint64_t seq_iter;
      int len = 0;
+    bool need_flush = false;

      seq_iter++;

@@ -1669,6 +1670,8 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
                  ret = -EINVAL;
                  break;
              }
+
+            need_flush = true;
              ch = qemu_get_byte(f);
              ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
              break;
@@ -1679,6 +1682,8 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
                  ret = -EINVAL;
                  break;
              }
+
+            need_flush = true;
              qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
              break;
          case RAM_SAVE_FLAG_COMPRESS_PAGE:
@@ -1711,6 +1716,7 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
                  ret = -EINVAL;
                  break;
              }
+            need_flush = true;

You can probably move the 'need_flush' to the big if near the top of the loop 
in the
current version.


Good catch, i will fix it in next version.

              break;
          case RAM_SAVE_FLAG_EOS:
              /* normal exit */
@@ -1730,6 +1736,11 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
      }

      rcu_read_unlock();
+
+    if (!ret  && ram_cache_enable && need_flush) {
+        DPRINTF("Flush ram_cache\n");

trace_

Got it.


+        colo_flush_ram_cache();
+    }
      DPRINTF("Completed load of VM with exit code %d seq iteration "
              "%" PRIu64 "\n", ret, seq_iter);
      return ret;
@@ -1799,6 +1810,35 @@ void colo_release_ram_cache(void)
      rcu_read_unlock();
  }

+/*
+ * Flush content of RAM cache into SVM's memory.
+ * Only flush the pages that be dirtied by PVM or SVM or both.
+ */
+void colo_flush_ram_cache(void)
+{
+    RAMBlock *block = NULL;
+    void *dst_host;
+    void *src_host;
+    ram_addr_t  offset = 0;
+
+    trace_colo_flush_ram_cache(migration_dirty_pages);
+    rcu_read_lock();
+    block = QLIST_FIRST_RCU(&ram_list.blocks);
+    while (block) {
+        offset = migration_bitmap_find_and_reset_dirty(block, offset);

You'll need to rework that a little (I split that into
migration_bitmap_find_dirty and migration_bitmap_clear_dirty)


Yes, i have rebase it in my private branch after your post-copy merged. ;)

Thanks,
zhanghailiang

+        if (offset >= block->used_length) {
+            offset = 0;
+            block = QLIST_NEXT_RCU(block, next);
+        } else {
+            dst_host = block->host + offset;
+            src_host = block->host_cache + offset;
+            memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+        }
+    }
+    rcu_read_unlock();
+    assert(migration_dirty_pages == 0);
+}
+
  static SaveVMHandlers savevm_ram_handlers = {
      .save_live_setup = ram_save_setup,
      .save_live_iterate = ram_save_iterate,
diff --git a/trace-events b/trace-events
index ee4679c..c98bc13 100644
--- a/trace-events
+++ b/trace-events
@@ -1232,6 +1232,7 @@ qemu_file_fclose(void) ""
  migration_bitmap_sync_start(void) ""
  migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64""
  migration_throttle(void) ""
+colo_flush_ram_cache(uint64_t dirty_pages) "dirty_pages %" PRIu64""

  # hw/display/qxl.c
  disable qxl_interface_set_mm_time(int qid, uint32_t mm_time) "%d %d"
--
1.8.3.1


--
Dr. David Alan Gilbert / address@hidden / Manchester, UK

.






reply via email to

[Prev in Thread] Current Thread [Next in Thread]