[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-block] [PATCH 03/17] migration: split common postcopy out of ram p
From: |
Vladimir Sementsov-Ogievskiy |
Subject: |
[Qemu-block] [PATCH 03/17] migration: split common postcopy out of ram postcopy |
Date: |
Tue, 7 Feb 2017 18:05:26 +0300 |
Split common postcopy staff from ram postcopy staff.
Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
Reviewed-by: Juan Quintela <address@hidden>
---
include/migration/migration.h | 1 +
migration/migration.c | 39 ++++++++++++++++++++++++-----------
migration/postcopy-ram.c | 4 +++-
migration/savevm.c | 48 +++++++++++++++++++++++++++++++++++--------
4 files changed, 70 insertions(+), 22 deletions(-)
diff --git a/include/migration/migration.h b/include/migration/migration.h
index af9135f..3aa228c 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -305,6 +305,7 @@ int migrate_add_blocker(Error *reason, Error **errp);
*/
void migrate_del_blocker(Error *reason);
+bool migrate_postcopy(void);
bool migrate_postcopy_ram(void);
bool migrate_zero_blocks(void);
diff --git a/migration/migration.c b/migration/migration.c
index 2766d2f..0a5fd38 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1310,6 +1310,11 @@ bool migrate_postcopy_ram(void)
return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
}
+bool migrate_postcopy(void)
+{
+ return migrate_postcopy_ram();
+}
+
bool migrate_auto_converge(void)
{
MigrationState *s;
@@ -1637,9 +1642,11 @@ static int postcopy_start(MigrationState *ms, bool
*old_vm_running)
* need to tell the destination to throw any pages it's already received
* that are dirty
*/
- if (ram_postcopy_send_discard_bitmap(ms)) {
- error_report("postcopy send discard bitmap failed");
- goto fail;
+ if (migrate_postcopy_ram()) {
+ if (ram_postcopy_send_discard_bitmap(ms)) {
+ error_report("postcopy send discard bitmap failed");
+ goto fail;
+ }
}
/*
@@ -1648,8 +1655,10 @@ static int postcopy_start(MigrationState *ms, bool
*old_vm_running)
* wrap their state up here
*/
qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
- /* Ping just for debugging, helps line traces up */
- qemu_savevm_send_ping(ms->to_dst_file, 2);
+ if (migrate_postcopy_ram()) {
+ /* Ping just for debugging, helps line traces up */
+ qemu_savevm_send_ping(ms->to_dst_file, 2);
+ }
/*
* While loading the device state we may trigger page transfer
@@ -1674,7 +1683,9 @@ static int postcopy_start(MigrationState *ms, bool
*old_vm_running)
qemu_savevm_send_postcopy_listen(fb);
qemu_savevm_state_complete_precopy(fb, false);
- qemu_savevm_send_ping(fb, 3);
+ if (migrate_postcopy_ram()) {
+ qemu_savevm_send_ping(fb, 3);
+ }
qemu_savevm_send_postcopy_run(fb);
@@ -1697,11 +1708,13 @@ static int postcopy_start(MigrationState *ms, bool
*old_vm_running)
qemu_mutex_unlock_iothread();
- /*
- * Although this ping is just for debug, it could potentially be
- * used for getting a better measurement of downtime at the source.
- */
- qemu_savevm_send_ping(ms->to_dst_file, 4);
+ if (migrate_postcopy_ram()) {
+ /*
+ * Although this ping is just for debug, it could potentially be
+ * used for getting a better measurement of downtime at the source.
+ */
+ qemu_savevm_send_ping(ms->to_dst_file, 4);
+ }
ret = qemu_file_get_error(ms->to_dst_file);
if (ret) {
@@ -1857,7 +1870,9 @@ static void *migration_thread(void *opaque)
/* And do a ping that will make stuff easier to debug */
qemu_savevm_send_ping(s->to_dst_file, 1);
+ }
+ if (migrate_postcopy()) {
/*
* Tell the destination that we *might* want to do postcopy later;
* if the other end can't do postcopy it should fail now, nice and
@@ -1891,7 +1906,7 @@ static void *migration_thread(void *opaque)
if (pending_size && pending_size >= max_size) {
/* Still a significant amount to transfer */
- if (migrate_postcopy_ram() &&
+ if (migrate_postcopy() &&
s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
pend_nonpost <= max_size &&
atomic_read(&s->start_postcopy)) {
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index a40dddb..aef5690 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -339,7 +339,9 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis)
}
postcopy_state_set(POSTCOPY_INCOMING_END);
- migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
+ if (migrate_postcopy_ram()) {
+ migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) !=
0);
+ }
if (mis->postcopy_tmp_page) {
munmap(mis->postcopy_tmp_page, getpagesize());
diff --git a/migration/savevm.c b/migration/savevm.c
index 5235833..965a58c 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -73,7 +73,7 @@ static struct mig_cmd_args {
[MIG_CMD_INVALID] = { .len = -1, .name = "INVALID" },
[MIG_CMD_OPEN_RETURN_PATH] = { .len = 0, .name = "OPEN_RETURN_PATH" },
[MIG_CMD_PING] = { .len = sizeof(uint32_t), .name = "PING" },
- [MIG_CMD_POSTCOPY_ADVISE] = { .len = 16, .name = "POSTCOPY_ADVISE" },
+ [MIG_CMD_POSTCOPY_ADVISE] = { .len = -1, .name = "POSTCOPY_ADVISE" },
[MIG_CMD_POSTCOPY_LISTEN] = { .len = 0, .name = "POSTCOPY_LISTEN" },
[MIG_CMD_POSTCOPY_RUN] = { .len = 0, .name = "POSTCOPY_RUN" },
[MIG_CMD_POSTCOPY_RAM_DISCARD] = {
@@ -82,6 +82,23 @@ static struct mig_cmd_args {
[MIG_CMD_MAX] = { .len = -1, .name = "MAX" },
};
+/* Note for MIG_CMD_POSTCOPY_ADVISE:
+ * The format of arguments is depending on postcopy mode:
+ * - postcopy RAM only
+ * uint64_t host page size
+ * uint64_t taget page size
+ *
+ * - postcopy RAM and postcopy dirty bitmaps
+ * format is the same as for postcopy RAM only
+ *
+ * - postcopy dirty bitmaps only
+ * Nothing. Command length field is 0.
+ *
+ * Be careful: adding a new postcopy entity with some other parameters should
+ * not break format self-description ability. Good way is to introduce some
+ * generic extendable format with an exception for two old entities.
+ */
+
static int announce_self_create(uint8_t *buf,
uint8_t *mac_addr)
{
@@ -856,12 +873,17 @@ int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t
*buf, size_t len)
/* Send prior to any postcopy transfer */
void qemu_savevm_send_postcopy_advise(QEMUFile *f)
{
- uint64_t tmp[2];
- tmp[0] = cpu_to_be64(getpagesize());
- tmp[1] = cpu_to_be64(1ul << qemu_target_page_bits());
+ if (migrate_postcopy_ram()) {
+ uint64_t tmp[2];
+ tmp[0] = cpu_to_be64(getpagesize());
+ tmp[1] = cpu_to_be64(1ul << qemu_target_page_bits());
- trace_qemu_savevm_send_postcopy_advise();
- qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 16, (uint8_t *)tmp);
+ trace_qemu_savevm_send_postcopy_advise();
+ qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE,
+ 16, (uint8_t *)tmp);
+ } else {
+ qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 0, NULL);
+ }
}
/* Sent prior to starting the destination running in postcopy, discard pages
@@ -1344,6 +1366,10 @@ static int
loadvm_postcopy_handle_advise(MigrationIncomingState *mis)
return -1;
}
+ if (!migrate_postcopy_ram()) {
+ return 0;
+ }
+
if (!postcopy_ram_supported_by_host()) {
return -1;
}
@@ -1544,7 +1570,9 @@ static int
loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
* A rare case, we entered listen without having to do any discards,
* so do the setup that's normally done at the time of the 1st discard.
*/
- postcopy_ram_prepare_discard(mis);
+ if (migrate_postcopy_ram()) {
+ postcopy_ram_prepare_discard(mis);
+ }
}
/*
@@ -1552,8 +1580,10 @@ static int
loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
* However, at this point the CPU shouldn't be running, and the IO
* shouldn't be doing anything yet so don't actually expect requests
*/
- if (postcopy_ram_enable_notify(mis)) {
- return -1;
+ if (migrate_postcopy_ram()) {
+ if (postcopy_ram_enable_notify(mis)) {
+ return -1;
+ }
}
if (mis->have_listen_thread) {
--
1.8.3.1
- [Qemu-block] [PATCH 02/17] migration: fix ram_save_pending, (continued)
- [Qemu-block] [PATCH 02/17] migration: fix ram_save_pending, Vladimir Sementsov-Ogievskiy, 2017/02/07
- [Qemu-block] [PATCH 13/17] iotests: add add_incoming_migration to VM class, Vladimir Sementsov-Ogievskiy, 2017/02/07
- [Qemu-block] [PATCH 06/17] block: add bdrv_dirty_bitmap_enable_successor(), Vladimir Sementsov-Ogievskiy, 2017/02/07
- [Qemu-block] [PATCH 11/17] migration: add is_active_iterate handler, Vladimir Sementsov-Ogievskiy, 2017/02/07
- [Qemu-block] [PATCH 16/17] iotests: add dirty bitmap migration test, Vladimir Sementsov-Ogievskiy, 2017/02/07
- [Qemu-block] [PATCH 05/17] block: add bdrv_next_dirty_bitmap(), Vladimir Sementsov-Ogievskiy, 2017/02/07
- [Qemu-block] [PATCH 15/17] iotests: add default node-name, Vladimir Sementsov-Ogievskiy, 2017/02/07
- [Qemu-block] [PATCH 14/17] qmp: add x-debug-block-dirty-bitmap-sha256, Vladimir Sementsov-Ogievskiy, 2017/02/07
- [Qemu-block] [PATCH 08/17] block/dirty-bitmap: add bdrv_dirty_bitmap_release_successor, Vladimir Sementsov-Ogievskiy, 2017/02/07
- [Qemu-block] [PATCH 03/17] migration: split common postcopy out of ram postcopy,
Vladimir Sementsov-Ogievskiy <=
- [Qemu-block] [PATCH 09/17] migration: include migrate_dirty_bitmaps in migrate_postcopy, Vladimir Sementsov-Ogievskiy, 2017/02/07
- [Qemu-block] [PATCH 17/17] iotests: add dirty bitmap postcopy test, Vladimir Sementsov-Ogievskiy, 2017/02/07
- Re: [Qemu-block] [Qemu-devel] [PATCH v5 00/17] Dirty bitmaps postcopy migration, Fam Zheng, 2017/02/13