qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 05/59] ram: Add dirty_rate_high_cnt to RAMState


From: Juan Quintela
Subject: [Qemu-devel] [PATCH 05/59] ram: Add dirty_rate_high_cnt to RAMState
Date: Wed, 19 Apr 2017 22:58:29 +0200

We need to add a parameter to several functions to make this work.

Signed-off-by: Juan Quintela <address@hidden>
Reviewed-by: Dr. David Alan Gilbert <address@hidden>
Reviewed-by: Peter Xu <address@hidden>
---
 migration/ram.c | 21 +++++++++++----------
 1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 3eb4430..a59140b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -45,8 +45,6 @@
 #include "qemu/rcu_queue.h"
 #include "migration/colo.h"
 
-static int dirty_rate_high_cnt;
-
 static uint64_t bitmap_sync_count;
 
 /***********************************************************/
@@ -154,6 +152,8 @@ struct RAMState {
     uint32_t last_version;
     /* We are in the first round */
     bool ram_bulk_stage;
+    /* How many times we have dirty too many pages */
+    int dirty_rate_high_cnt;
 };
 typedef struct RAMState RAMState;
 
@@ -651,7 +651,7 @@ uint64_t ram_pagesize_summary(void)
     return summary;
 }
 
-static void migration_bitmap_sync(void)
+static void migration_bitmap_sync(RAMState *rs)
 {
     RAMBlock *block;
     MigrationState *s = migrate_get_current();
@@ -696,9 +696,9 @@ static void migration_bitmap_sync(void)
             if (s->dirty_pages_rate &&
                (num_dirty_pages_period * TARGET_PAGE_SIZE >
                    (bytes_xfer_now - bytes_xfer_prev)/2) &&
-               (dirty_rate_high_cnt++ >= 2)) {
+               (rs->dirty_rate_high_cnt++ >= 2)) {
                     trace_migration_throttle();
-                    dirty_rate_high_cnt = 0;
+                    rs->dirty_rate_high_cnt = 0;
                     mig_throttle_guest_down();
              }
              bytes_xfer_prev = bytes_xfer_now;
@@ -1920,7 +1920,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
     rcu_read_lock();
 
     /* This should be our last sync, the src is now paused */
-    migration_bitmap_sync();
+    migration_bitmap_sync(&ram_state);
 
     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
     if (!unsentmap) {
@@ -1998,7 +1998,7 @@ static int ram_save_init_globals(RAMState *rs)
 {
     int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
 
-    dirty_rate_high_cnt = 0;
+    rs->dirty_rate_high_cnt = 0;
     bitmap_sync_count = 0;
     migration_bitmap_sync_init();
     qemu_mutex_init(&migration_bitmap_mutex);
@@ -2062,7 +2062,7 @@ static int ram_save_init_globals(RAMState *rs)
     migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
 
     memory_global_dirty_log_start();
-    migration_bitmap_sync();
+    migration_bitmap_sync(rs);
     qemu_mutex_unlock_ramlist();
     qemu_mutex_unlock_iothread();
     rcu_read_unlock();
@@ -2210,7 +2210,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
     rcu_read_lock();
 
     if (!migration_in_postcopy(migrate_get_current())) {
-        migration_bitmap_sync();
+        migration_bitmap_sync(rs);
     }
 
     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
@@ -2243,6 +2243,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, 
uint64_t max_size,
                              uint64_t *non_postcopiable_pending,
                              uint64_t *postcopiable_pending)
 {
+    RAMState *rs = opaque;
     uint64_t remaining_size;
 
     remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
@@ -2251,7 +2252,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, 
uint64_t max_size,
         remaining_size < max_size) {
         qemu_mutex_lock_iothread();
         rcu_read_lock();
-        migration_bitmap_sync();
+        migration_bitmap_sync(rs);
         rcu_read_unlock();
         qemu_mutex_unlock_iothread();
         remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
-- 
2.9.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]