qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 08/11] block-migration: efficiently encode zero bloc


From: Stefan Hajnoczi
Subject: [Qemu-devel] [PATCH 08/11] block-migration: efficiently encode zero blocks
Date: Fri, 19 Jul 2013 16:38:49 +0800

From: Peter Lieven <address@hidden>

this patch adds a efficient encoding for zero blocks by
adding a new flag indicating a block is completely zero.

additionally bdrv_write_zeros() is used at the destination
to efficiently write these zeroes. depending on the implementation
this avoids that the destination target gets fully provisioned.

Signed-off-by: Peter Lieven <address@hidden>
Signed-off-by: Stefan Hajnoczi <address@hidden>
---
 block-migration.c             | 32 ++++++++++++++++++++++++++------
 include/migration/migration.h |  1 +
 migration.c                   |  9 +++++++++
 qapi-schema.json              |  8 +++++++-
 4 files changed, 43 insertions(+), 7 deletions(-)

diff --git a/block-migration.c b/block-migration.c
index 2fd7699..f803f20 100644
--- a/block-migration.c
+++ b/block-migration.c
@@ -29,6 +29,7 @@
 #define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
 #define BLK_MIG_FLAG_EOS                0x02
 #define BLK_MIG_FLAG_PROGRESS           0x04
+#define BLK_MIG_FLAG_ZERO_BLOCK         0x08
 
 #define MAX_IS_ALLOCATED_SEARCH 65536
 
@@ -80,6 +81,7 @@ typedef struct BlkMigState {
     int shared_base;
     QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
     int64_t total_sector_sum;
+    bool zero_blocks;
 
     /* Protected by lock.  */
     QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
@@ -114,16 +116,30 @@ static void blk_mig_unlock(void)
 static void blk_send(QEMUFile *f, BlkMigBlock * blk)
 {
     int len;
+    uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
+
+    if (block_mig_state.zero_blocks &&
+        buffer_is_zero(blk->buf, BLOCK_SIZE)) {
+        flags |= BLK_MIG_FLAG_ZERO_BLOCK;
+    }
 
     /* sector number and flags */
     qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
-                     | BLK_MIG_FLAG_DEVICE_BLOCK);
+                     | flags);
 
     /* device name */
     len = strlen(blk->bmds->bs->device_name);
     qemu_put_byte(f, len);
     qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
 
+    /* if a block is zero we need to flush here since the network
+     * bandwidth is now a lot higher than the storage device bandwidth.
+     * thus if we queue zero blocks we slow down the migration */
+    if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
+        qemu_fflush(f);
+        return;
+    }
+
     qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
 }
 
@@ -344,6 +360,7 @@ static void init_blk_migration(QEMUFile *f)
     block_mig_state.total_sector_sum = 0;
     block_mig_state.prev_progress = -1;
     block_mig_state.bulk_completed = 0;
+    block_mig_state.zero_blocks = migrate_zero_blocks();
 
     bdrv_iterate(init_blk_migration_it, NULL);
 }
@@ -762,12 +779,15 @@ static int block_load(QEMUFile *f, void *opaque, int 
version_id)
                 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
             }
 
-            buf = g_malloc(BLOCK_SIZE);
-
-            qemu_get_buffer(f, buf, BLOCK_SIZE);
-            ret = bdrv_write(bs, addr, buf, nr_sectors);
+            if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
+                ret = bdrv_write_zeroes(bs, addr, nr_sectors);
+            } else {
+                buf = g_malloc(BLOCK_SIZE);
+                qemu_get_buffer(f, buf, BLOCK_SIZE);
+                ret = bdrv_write(bs, addr, buf, nr_sectors);
+                g_free(buf);
+            }
 
-            g_free(buf);
             if (ret < 0) {
                 return ret;
             }
diff --git a/include/migration/migration.h b/include/migration/migration.h
index bc9fde0..701709a 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -124,6 +124,7 @@ void migrate_add_blocker(Error *reason);
 void migrate_del_blocker(Error *reason);
 
 bool migrate_rdma_pin_all(void);
+bool migrate_zero_blocks(void);
 
 bool migrate_auto_converge(void);
 
diff --git a/migration.c b/migration.c
index 9f5a423..a9c0421 100644
--- a/migration.c
+++ b/migration.c
@@ -493,6 +493,15 @@ bool migrate_auto_converge(void)
     return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
 }
 
+bool migrate_zero_blocks(void)
+{
+    MigrationState *s;
+
+    s = migrate_get_current();
+
+    return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
+}
+
 int migrate_use_xbzrle(void)
 {
     MigrationState *s;
diff --git a/qapi-schema.json b/qapi-schema.json
index 8d33d52..592bb9c 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -613,10 +613,16 @@
 #          Disabled by default. Experimental: may (or may not) be renamed after
 #          further testing is complete. (since 1.6)
 #
+# @zero-blocks: During storage migration encode blocks of zeroes efficiently. 
This
+#          essentially saves 1MB of zeroes per block on the wire. Enabling 
requires
+#          source and target VM to support this feature. To enable it is 
sufficient
+#          to enable the capability on the source VM. The feature is disabled 
by
+#          default. (since 1.6)
+#
 # Since: 1.2
 ##
 { 'enum': 'MigrationCapability',
-  'data': ['xbzrle', 'x-rdma-pin-all', 'auto-converge'] }
+  'data': ['xbzrle', 'x-rdma-pin-all', 'auto-converge', 'zero-blocks'] }
 
 ##
 # @MigrationCapabilityStatus
-- 
1.8.1.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]