[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PULL 07/25] COLO: Load dirty pages into SVM's RAM cache fi
From: |
Jason Wang |
Subject: |
[Qemu-devel] [PULL 07/25] COLO: Load dirty pages into SVM's RAM cache firstly |
Date: |
Wed, 26 Sep 2018 11:16:32 +0800 |
From: Zhang Chen <address@hidden>
We should not load PVM's state directly into SVM, because there maybe some
errors happen when SVM is receving data, which will break SVM.
We need to ensure receving all data before load the state into SVM. We use
an extra memory to cache these data (PVM's ram). The ram cache in secondary side
is initially the same as SVM/PVM's memory. And in the process of checkpoint,
we cache the dirty pages of PVM into this ram cache firstly, so this ram cache
always the same as PVM's memory at every checkpoint, then we flush this cached
ram
to SVM after we receive all PVM's state.
Signed-off-by: zhanghailiang <address@hidden>
Signed-off-by: Li Zhijian <address@hidden>
Signed-off-by: Zhang Chen <address@hidden>
Signed-off-by: Zhang Chen <address@hidden>
Reviewed-by: Dr. David Alan Gilbert <address@hidden>
Signed-off-by: Jason Wang <address@hidden>
---
include/exec/ram_addr.h | 1 +
migration/migration.c | 7 ++++
migration/ram.c | 83 ++++++++++++++++++++++++++++++++++++++++-
migration/ram.h | 4 ++
migration/savevm.c | 2 +-
5 files changed, 94 insertions(+), 3 deletions(-)
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 3abb639056..9ecd911c3e 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -27,6 +27,7 @@ struct RAMBlock {
struct rcu_head rcu;
struct MemoryRegion *mr;
uint8_t *host;
+ uint8_t *colo_cache; /* For colo, VM's ram cache */
ram_addr_t offset;
ram_addr_t used_length;
ram_addr_t max_length;
diff --git a/migration/migration.c b/migration/migration.c
index 3f147e5a74..9b6f7a9fc3 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -444,6 +444,11 @@ static void process_incoming_migration_co(void *opaque)
exit(EXIT_FAILURE);
}
+ if (colo_init_ram_cache() < 0) {
+ error_report("Init ram cache failed");
+ exit(EXIT_FAILURE);
+ }
+
qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
mis->have_colo_incoming_thread = true;
@@ -451,6 +456,8 @@ static void process_incoming_migration_co(void *opaque)
/* Wait checkpoint incoming thread exit before free resource */
qemu_thread_join(&mis->colo_incoming_thread);
+ /* We hold the global iothread lock, so it is safe here */
+ colo_release_ram_cache();
}
if (ret < 0) {
diff --git a/migration/ram.c b/migration/ram.c
index f6fd8e5e09..5bc8765b70 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -3398,6 +3398,20 @@ static inline void *host_from_ram_block_offset(RAMBlock
*block,
return block->host + offset;
}
+static inline void *colo_cache_from_block_offset(RAMBlock *block,
+ ram_addr_t offset)
+{
+ if (!offset_in_ramblock(block, offset)) {
+ return NULL;
+ }
+ if (!block->colo_cache) {
+ error_report("%s: colo_cache is NULL in block :%s",
+ __func__, block->idstr);
+ return NULL;
+ }
+ return block->colo_cache + offset;
+}
+
/**
* ram_handle_compressed: handle the zero page case
*
@@ -3602,6 +3616,58 @@ static void decompress_data_with_multi_threads(QEMUFile
*f,
qemu_mutex_unlock(&decomp_done_lock);
}
+/*
+ * colo cache: this is for secondary VM, we cache the whole
+ * memory of the secondary VM, it is need to hold the global lock
+ * to call this helper.
+ */
+int colo_init_ram_cache(void)
+{
+ RAMBlock *block;
+
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ block->colo_cache = qemu_anon_ram_alloc(block->used_length,
+ NULL,
+ false);
+ if (!block->colo_cache) {
+ error_report("%s: Can't alloc memory for COLO cache of block %s,"
+ "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
+ block->used_length);
+ goto out_locked;
+ }
+ memcpy(block->colo_cache, block->host, block->used_length);
+ }
+ rcu_read_unlock();
+ return 0;
+
+out_locked:
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ if (block->colo_cache) {
+ qemu_anon_ram_free(block->colo_cache, block->used_length);
+ block->colo_cache = NULL;
+ }
+ }
+
+ rcu_read_unlock();
+ return -errno;
+}
+
+/* It is need to hold the global lock to call this helper */
+void colo_release_ram_cache(void)
+{
+ RAMBlock *block;
+
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ if (block->colo_cache) {
+ qemu_anon_ram_free(block->colo_cache, block->used_length);
+ block->colo_cache = NULL;
+ }
+ }
+ rcu_read_unlock();
+}
+
/**
* ram_load_setup: Setup RAM for migration incoming side
*
@@ -3618,6 +3684,7 @@ static int ram_load_setup(QEMUFile *f, void *opaque)
xbzrle_load_setup();
ramblock_recv_map_init();
+
return 0;
}
@@ -3638,6 +3705,7 @@ static int ram_load_cleanup(void *opaque)
g_free(rb->receivedmap);
rb->receivedmap = NULL;
}
+
return 0;
}
@@ -3875,13 +3943,24 @@ static int ram_load(QEMUFile *f, void *opaque, int
version_id)
RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
RAMBlock *block = ram_block_from_stream(f, flags);
- host = host_from_ram_block_offset(block, addr);
+ /*
+ * After going into COLO, we should load the Page into colo_cache.
+ */
+ if (migration_incoming_in_colo_state()) {
+ host = colo_cache_from_block_offset(block, addr);
+ } else {
+ host = host_from_ram_block_offset(block, addr);
+ }
if (!host) {
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
ret = -EINVAL;
break;
}
- ramblock_recv_bitmap_set(block, host);
+
+ if (!migration_incoming_in_colo_state()) {
+ ramblock_recv_bitmap_set(block, host);
+ }
+
trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
}
diff --git a/migration/ram.h b/migration/ram.h
index 457bf54b8c..d009480494 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -70,4 +70,8 @@ int64_t ramblock_recv_bitmap_send(QEMUFile *file,
const char *block_name);
int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb);
+/* ram cache */
+int colo_init_ram_cache(void);
+void colo_release_ram_cache(void);
+
#endif
diff --git a/migration/savevm.c b/migration/savevm.c
index 0376e723b6..96db539064 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1932,7 +1932,7 @@ static int
loadvm_handle_recv_bitmap(MigrationIncomingState *mis,
static int loadvm_process_enable_colo(MigrationIncomingState *mis)
{
migration_incoming_enable_colo();
- return 0;
+ return colo_init_ram_cache();
}
/*
--
2.17.1
- [Qemu-devel] [PULL 19/25] docs: Add COLO status diagram to COLO-FT.txt, (continued)
- [Qemu-devel] [PULL 19/25] docs: Add COLO status diagram to COLO-FT.txt, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 20/25] clean up callback when del virtqueue, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 14/25] COLO: flush host dirty ram from cache, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 21/25] ne2000: fix possible out of bound access in ne2000_receive, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 17/25] COLO: notify net filters about checkpoint/failover event, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 18/25] COLO: quick failover process by kick COLO thread, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 13/25] savevm: split the process of different stages for loadvm/savevm, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 15/25] filter: Add handle_event method for NetFilterClass, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 11/25] qapi/migration.json: Rename COLO unknown mode to none mode., Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 05/25] COLO: Add block replication into colo process, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 07/25] COLO: Load dirty pages into SVM's RAM cache firstly,
Jason Wang <=
- [Qemu-devel] [PULL 16/25] filter-rewriter: handle checkpoint and failover event, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 12/25] qapi: Add new command to query colo status, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 10/25] qmp event: Add COLO_EXIT event to notify users while exited COLO, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 09/25] COLO: Flush memory data from ram cache, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 08/25] ram/COLO: Record the dirty pages that SVM received, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 01/25] filter-rewriter: Add TCP state machine and fix memory leak in connection_track_table, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 04/25] COLO: integrate colo compare with colo frame, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 03/25] colo-compare: use notifier to notify packets comparing result, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 02/25] colo-compare: implement the process of checkpoint, Jason Wang, 2018/09/25
- [Qemu-devel] [PULL 06/25] COLO: Remove colo_state migration struct, Jason Wang, 2018/09/25