[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH COLO-Frame v9 29/32] COLO: Separate the process of s
From: |
zhanghailiang |
Subject: |
[Qemu-devel] [PATCH COLO-Frame v9 29/32] COLO: Separate the process of saving/loading ram and device state |
Date: |
Wed, 2 Sep 2015 16:23:16 +0800 |
We separate the process of saving/loading ram and device state when do
checkpoint,
we add new helpers for save/load ram/device. With this change, we can directly
transfer ram from master to slave without using QEMUSizeBuffer as assistant,
which also reduce the size of extra memory been used during checkpoint.
Besides, we move the colo_flush_ram_cache to the proper position after the
above change.
Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
---
include/sysemu/sysemu.h | 5 ++
migration/colo.c | 43 +++++++++++----
migration/ram.c | 8 ---
migration/savevm.c | 142 +++++++++++++++++++++++++++++++++++++++++++++++-
4 files changed, 177 insertions(+), 21 deletions(-)
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index e576f65..637959f 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -93,7 +93,12 @@ int qemu_savevm_state_iterate(QEMUFile *f);
void qemu_savevm_state_complete(QEMUFile *f);
void qemu_savevm_state_cancel(void);
uint64_t qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size);
+int qemu_save_ram_state(QEMUFile *f);
+int qemu_save_device_state(QEMUFile *f);
int qemu_loadvm_state(QEMUFile *f);
+int qemu_loadvm_state_begin(QEMUFile *f);
+int qemu_load_ram_state(QEMUFile *f);
+int qemu_load_device_state(QEMUFile *f);
typedef enum DisplayType
{
diff --git a/migration/colo.c b/migration/colo.c
index e7d733e..22ca7e5 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -252,21 +252,32 @@ static int colo_do_checkpoint_transaction(MigrationState
*s,
goto out;
}
+ ret = colo_ctl_put(s->to_dst_file, COLO_CMD_VMSTATE_SEND, 0);
+ if (ret < 0) {
+ goto out;
+ }
/* Disable block migration */
s->params.blk = 0;
s->params.shared = 0;
- qemu_savevm_state_header(trans);
- qemu_savevm_state_begin(trans, &s->params);
- qemu_mutex_lock_iothread();
- qemu_savevm_state_complete(trans);
- qemu_mutex_unlock_iothread();
-
- qemu_fflush(trans);
+ qemu_savevm_state_begin(s->to_dst_file, &s->params);
+ ret = qemu_file_get_error(s->to_dst_file);
+ if (ret < 0) {
+ error_report("save vm state begin error\n");
+ goto out;
+ }
- ret = colo_ctl_put(s->to_dst_file, COLO_CMD_VMSTATE_SEND, 0);
+ qemu_mutex_lock_iothread();
+ /* Note: device state is saved into buffer */
+ ret = qemu_save_device_state(trans);
if (ret < 0) {
+ error_report("save device state error\n");
+ qemu_mutex_unlock_iothread();
goto out;
}
+ qemu_fflush(trans);
+ qemu_save_ram_state(s->to_dst_file);
+ qemu_mutex_unlock_iothread();
+
/* we send the total size of the vmstate first */
size = qsb_get_length(buffer);
ret = colo_ctl_put(s->to_dst_file, COLO_CMD_VMSTATE_SIZE, size);
@@ -566,6 +577,16 @@ void *colo_process_incoming_thread(void *opaque)
goto out;
}
+ ret = qemu_loadvm_state_begin(mis->from_src_file);
+ if (ret < 0) {
+ error_report("load vm state begin error, ret=%d", ret);
+ goto out;
+ }
+ ret = qemu_load_ram_state(mis->from_src_file);
+ if (ret < 0) {
+ error_report("load ram state error");
+ goto out;
+ }
/* read the VM state total size first */
total_size = colo_ctl_get(mis->from_src_file, COLO_CMD_VMSTATE_SIZE);
if (total_size <= 0) {
@@ -594,8 +615,10 @@ void *colo_process_incoming_thread(void *opaque)
qemu_mutex_lock_iothread();
qemu_system_reset(VMRESET_SILENT);
vmstate_loading = true;
- if (qemu_loadvm_state(fb) < 0) {
- error_report("COLO: loadvm failed");
+ colo_flush_ram_cache();
+ ret = qemu_load_device_state(fb);
+ if (ret < 0) {
+ error_report("COLO: load device state failed\n");
vmstate_loading = false;
qemu_mutex_unlock_iothread();
goto out;
diff --git a/migration/ram.c b/migration/ram.c
index 782febd..8476783 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1522,7 +1522,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
int flags = 0, ret = 0;
static uint64_t seq_iter;
int len = 0;
- bool need_flush = false;
seq_iter++;
@@ -1592,7 +1591,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
break;
}
- need_flush = true;
ch = qemu_get_byte(f);
ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
break;
@@ -1604,7 +1602,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
break;
}
- need_flush = true;
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
break;
case RAM_SAVE_FLAG_COMPRESS_PAGE:
@@ -1637,7 +1634,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
ret = -EINVAL;
break;
}
- need_flush = true;
break;
case RAM_SAVE_FLAG_EOS:
/* normal exit */
@@ -1658,10 +1654,6 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
rcu_read_unlock();
- if (!ret && ram_cache_enable && need_flush) {
- DPRINTF("Flush ram_cache\n");
- colo_flush_ram_cache();
- }
DPRINTF("Completed load of VM with exit code %d seq iteration "
"%" PRIu64 "\n", ret, seq_iter);
return ret;
diff --git a/migration/savevm.c b/migration/savevm.c
index 069645b..64e4bae 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -755,6 +755,10 @@ void qemu_savevm_state_begin(QEMUFile *f,
break;
}
}
+ if (migration_in_colo_state()) {
+ qemu_put_byte(f, QEMU_VM_EOF);
+ qemu_fflush(f);
+ }
}
/*
@@ -952,13 +956,44 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
return ret;
}
-static int qemu_save_device_state(QEMUFile *f)
+int qemu_save_ram_state(QEMUFile *f)
{
SaveStateEntry *se;
+ int ret = 0;
- qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
- qemu_put_be32(f, QEMU_VM_FILE_VERSION);
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (!se->ops || !se->ops->save_live_complete) {
+ continue;
+ }
+ if (se->ops && se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ continue;
+ }
+ }
+ trace_savevm_section_start(se->idstr, se->section_id);
+
+ save_section_header(f, se, QEMU_VM_SECTION_END);
+
+ ret = se->ops->save_live_complete(f, se->opaque);
+ trace_savevm_section_end(se->idstr, se->section_id, ret);
+ save_section_footer(f, se);
+ if (ret < 0) {
+ qemu_file_set_error(f, ret);
+ return ret;
+ }
+ }
+ qemu_put_byte(f, QEMU_VM_EOF);
+ return 0;
+}
+
+int qemu_save_device_state(QEMUFile *f)
+{
+ SaveStateEntry *se;
+
+ if (!migration_in_colo_state()) {
+ qemu_savevm_state_header(f);
+ }
cpu_synchronize_all_states();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
@@ -1267,6 +1302,107 @@ out:
return ret;
}
+int qemu_loadvm_state_begin(QEMUFile *f)
+{
+ uint8_t section_type;
+ int ret = -1;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+
+ if (!mis) {
+ error_report("qemu_loadvm_state_begin");
+ return -EINVAL;
+ }
+ /* CleanUp */
+ loadvm_free_handlers(mis);
+
+ if (qemu_savevm_state_blocked(NULL)) {
+ return -EINVAL;
+ }
+
+ if (!savevm_state.skip_configuration) {
+ if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
+ error_report("Configuration section missing");
+ return -EINVAL;
+ }
+ ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
+
+ if (ret) {
+ return ret;
+ }
+ }
+
+ while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+ if (section_type != QEMU_VM_SECTION_START) {
+ error_report("QEMU_VM_SECTION_START");
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = qemu_loadvm_section_start_full(f, mis);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+ ret = qemu_file_get_error(f);
+ if (ret == 0) {
+ return 0;
+ }
+out:
+ return ret;
+}
+
+int qemu_load_ram_state(QEMUFile *f)
+{
+ uint8_t section_type;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ int ret = -1;
+
+ while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+ if (section_type != QEMU_VM_SECTION_PART &&
+ section_type != QEMU_VM_SECTION_END) {
+ error_report("load ram state, not get "
+ "QEMU_VM_SECTION_FULL or QEMU_VM_SECTION_END");
+ return -EINVAL;
+ }
+ ret = qemu_loadvm_section_part_end(f, mis);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+ ret = qemu_file_get_error(f);
+ if (ret == 0) {
+ return 0;
+ }
+out:
+ return ret;
+}
+
+int qemu_load_device_state(QEMUFile *f)
+{
+ uint8_t section_type;
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ int ret = -1;
+
+ while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
+ if (section_type != QEMU_VM_SECTION_FULL) {
+ error_report("load device state error: "
+ "Not get QEMU_VM_SECTION_FULL");
+ return -EINVAL;
+ }
+ ret = qemu_loadvm_section_start_full(f, mis);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+
+ ret = qemu_file_get_error(f);
+
+ cpu_synchronize_all_post_init();
+ if (ret == 0) {
+ return 0;
+ }
+out:
+ return ret;
+}
static BlockDriverState *find_vmstate_bs(void)
{
BlockDriverState *bs = NULL;
--
1.8.3.1
- [Qemu-devel] [PATCH COLO-Frame v9 17/32] COLO failover: Introduce a new command to trigger a failover, (continued)
- [Qemu-devel] [PATCH COLO-Frame v9 17/32] COLO failover: Introduce a new command to trigger a failover, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 21/32] COLO: implement default failover treatment, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 10/32] COLO: Add a new RunState RUN_STATE_COLO, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 23/32] COLO failover: Shutdown related socket fd when do failover, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 18/32] COLO failover: Introduce state to record failover process, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 24/32] COLO failover: Don't do failover during loading VM's state, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 22/32] qmp event: Add event notification for COLO error, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 26/32] COLO: Implement shutdown checkpoint, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 30/32] COLO: Split qemu_savevm_state_begin out of checkpoint process, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 25/32] COLO: Control the checkpoint delay time by migrate-set-parameters command, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 29/32] COLO: Separate the process of saving/loading ram and device state,
zhanghailiang <=
- [Qemu-devel] [PATCH COLO-Frame v9 28/32] savevm: Split load vm state function qemu_loadvm_state, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 32/32] COLO: Add net packets treatment into COLO, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 31/32] COLO: Add block replication into colo process, zhanghailiang, 2015/09/02
- [Qemu-devel] [PATCH COLO-Frame v9 27/32] COLO: Update the global runstate after going into colo state, zhanghailiang, 2015/09/02
- Re: [Qemu-devel] [PATCH COLO-Frame v9 00/32] COarse-grain LOck-stepping(COLO) Virtual Machines for Non-stop Service (FT), Yang Hongyang, 2015/09/02
- Re: [Qemu-devel] [PATCH COLO-Frame v9 00/32] COarse-grain LOck-stepping(COLO) Virtual Machines for Non-stop Service (FT), zhanghailiang, 2015/09/08
- Re: [Qemu-devel] [PATCH COLO-Frame v9 00/32] COarse-grain LOck-stepping(COLO) Virtual Machines for Non-stop Service (FT), zhanghailiang, 2015/09/15