qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 07/13] migration: create ram_multifd_page


From: Juan Quintela
Subject: [Qemu-devel] [PATCH 07/13] migration: create ram_multifd_page
Date: Fri, 21 Oct 2016 21:42:09 +0200

The function still don't use multifd, but we have simplified
ram_save_page, xbzrle and RDMA stuff is gone.  We have added a new
counter and a new flag for this type of pages.

Signed-off-by: Juan Quintela <address@hidden>
---
 hmp.c                         |  2 ++
 include/migration/migration.h |  1 +
 migration/migration.c         |  1 +
 migration/ram.c               | 44 ++++++++++++++++++++++++++++++++++++++++++-
 qapi-schema.json              |  4 +++-
 5 files changed, 50 insertions(+), 2 deletions(-)

diff --git a/hmp.c b/hmp.c
index 54f9f03..17a0ee2 100644
--- a/hmp.c
+++ b/hmp.c
@@ -222,6 +222,8 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
             monitor_printf(mon, "postcopy request count: %" PRIu64 "\n",
                            info->ram->postcopy_requests);
         }
+        monitor_printf(mon, "multifd: %" PRIu64 " pages\n",
+                       info->ram->multifd);
     }

     if (info->has_disk) {
diff --git a/include/migration/migration.h b/include/migration/migration.h
index 0b455d6..afdc7ec 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -274,6 +274,7 @@ uint64_t xbzrle_mig_pages_transferred(void);
 uint64_t xbzrle_mig_pages_overflow(void);
 uint64_t xbzrle_mig_pages_cache_miss(void);
 double xbzrle_mig_cache_miss_rate(void);
+uint64_t multifd_mig_pages_transferred(void);

 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
 void ram_debug_dump_bitmap(unsigned long *todump, bool expected);
diff --git a/migration/migration.c b/migration/migration.c
index a4615f5..407e0c3 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -625,6 +625,7 @@ static void populate_ram_info(MigrationInfo *info, 
MigrationState *s)
     info->ram->mbps = s->mbps;
     info->ram->dirty_sync_count = s->dirty_sync_count;
     info->ram->postcopy_requests = s->postcopy_requests;
+    info->ram->multifd = multifd_mig_pages_transferred();

     if (s->state != MIGRATION_STATUS_COMPLETED) {
         info->ram->remaining = ram_bytes_remaining();
diff --git a/migration/ram.c b/migration/ram.c
index 0ea40eb..44b9380 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -68,6 +68,7 @@ static uint64_t bitmap_sync_count;
 #define RAM_SAVE_FLAG_XBZRLE   0x40
 /* 0x80 is reserved in migration.h start with 0x100 next */
 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
+#define RAM_SAVE_FLAG_MULTIFD_PAGE     0x200

 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];

@@ -148,6 +149,7 @@ typedef struct AccountingInfo {
     uint64_t dup_pages;
     uint64_t skipped_pages;
     uint64_t norm_pages;
+    uint64_t multifd_pages;
     uint64_t iterations;
     uint64_t xbzrle_bytes;
     uint64_t xbzrle_pages;
@@ -218,6 +220,11 @@ uint64_t xbzrle_mig_pages_overflow(void)
     return acct_info.xbzrle_overflows;
 }

+uint64_t multifd_mig_pages_transferred(void)
+{
+    return acct_info.multifd_pages;
+}
+
 /* This is the last block that we have visited serching for dirty pages
  */
 static RAMBlock *last_seen_block;
@@ -995,6 +1002,33 @@ static int ram_save_page(QEMUFile *f, PageSearchStatus 
*pss,
     return pages;
 }

+static int ram_multifd_page(QEMUFile *f, PageSearchStatus *pss,
+                            bool last_stage, uint64_t *bytes_transferred)
+{
+    int pages;
+    uint8_t *p;
+    RAMBlock *block = pss->block;
+    ram_addr_t offset = pss->offset;
+
+    p = block->host + offset;
+
+    if (block == last_sent_block) {
+        offset |= RAM_SAVE_FLAG_CONTINUE;
+    }
+    pages = save_zero_page(f, block, offset, p, bytes_transferred);
+    if (pages == -1) {
+        *bytes_transferred +=
+            save_page_header(f, block, offset | RAM_SAVE_FLAG_MULTIFD_PAGE);
+        qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
+        *bytes_transferred += TARGET_PAGE_SIZE;
+        pages = 1;
+        acct_info.norm_pages++;
+        acct_info.multifd_pages++;
+    }
+
+    return pages;
+}
+
 static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
                                 ram_addr_t offset)
 {
@@ -1432,6 +1466,8 @@ static int ram_save_target_page(MigrationState *ms, 
QEMUFile *f,
             res = ram_save_compressed_page(f, pss,
                                            last_stage,
                                            bytes_transferred);
+        } else if (migrate_multifd()) {
+            res = ram_multifd_page(f, pss, last_stage, bytes_transferred);
         } else {
             res = ram_save_page(f, pss, last_stage,
                                 bytes_transferred);
@@ -2678,7 +2714,8 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
         addr &= TARGET_PAGE_MASK;

         if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
-                     RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
+                     RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE |
+                     RAM_SAVE_FLAG_MULTIFD_PAGE)) {
             RAMBlock *block = ram_block_from_stream(f, flags);

             host = host_from_ram_block_offset(block, addr);
@@ -2753,6 +2790,11 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
                 break;
             }
             break;
+
+        case RAM_SAVE_FLAG_MULTIFD_PAGE:
+            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
+            break;
+
         case RAM_SAVE_FLAG_EOS:
             /* normal exit */
             break;
diff --git a/qapi-schema.json b/qapi-schema.json
index b5c9a06..e5b3a84 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -405,6 +405,7 @@
 #
 # @postcopy-requests: The number of page requests received from the destination
 #        (since 2.7)
+# @multifd: number of pages sent with multifd (since 2.8)
 #
 # Since: 0.14.0
 ##
@@ -413,7 +414,8 @@
            'duplicate': 'int', 'skipped': 'int', 'normal': 'int',
            'normal-bytes': 'int', 'dirty-pages-rate' : 'int',
            'mbps' : 'number', 'dirty-sync-count' : 'int',
-           'postcopy-requests' : 'int' } }
+           'postcopy-requests' : 'int',
+           'multifd' : 'int'} }

 ##
 # @XBZRLECacheStats
-- 
2.7.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]