[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH 03/14] protect the ramlist with a separate mutex
From: |
Paolo Bonzini |
Subject: |
Re: [Qemu-devel] [PATCH 03/14] protect the ramlist with a separate mutex |
Date: |
Fri, 21 Sep 2012 16:44:15 +0200 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:15.0) Gecko/20120911 Thunderbird/15.0.1 |
Il 21/09/2012 16:08, Juan Quintela ha scritto:
> From: Umesh Deshpande <address@hidden>
>
> Add the new mutex that protects shared state between ram_save_live
> and the iothread. If the iothread mutex has to be taken together
> with the ramlist mutex, the iothread shall always be _outside_.
>
> Signed-off-by: Paolo Bonzini <address@hidden>
> Signed-off-by: Umesh Deshpande <address@hidden>
> Signed-off-by: Juan Quintela <address@hidden>
> ---
> arch_init.c | 9 ++++++++-
> cpu-all.h | 8 ++++++++
> exec.c | 23 +++++++++++++++++++++--
> 3 files changed, 37 insertions(+), 3 deletions(-)
>
> diff --git a/arch_init.c b/arch_init.c
> index eb33fdd..0d963b4 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -553,7 +553,6 @@ static void ram_migration_cancel(void *opaque)
> migration_end();
> }
>
> -
> static void reset_ram_globals(void)
> {
> last_block = NULL;
> @@ -573,6 +572,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
> bitmap_set(migration_bitmap, 1, ram_pages);
> migration_dirty_pages = ram_pages;
>
> + qemu_mutex_lock_ramlist();
> migration_bitmap_sync();
> bytes_transferred = 0;
> reset_ram_globals();
> @@ -600,6 +600,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
> qemu_put_be64(f, block->length);
> }
>
> + qemu_mutex_unlock_ramlist();
> qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
>
> return 0;
> @@ -614,6 +615,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
> uint64_t expected_downtime;
> MigrationState *s = migrate_get_current();
>
> + qemu_mutex_lock_ramlist();
> +
> if (ram_list.version != last_version) {
> reset_ram_globals();
> }
> @@ -662,6 +665,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
> bwidth = 0.000001;
> }
>
> + qemu_mutex_unlock_ramlist();
> qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
>
> expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
> @@ -682,6 +686,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
> {
> migration_bitmap_sync();
>
> + qemu_mutex_lock_ramlist();
> +
> /* try transferring iterative blocks of memory */
>
> /* flush all remaining blocks regardless of rate limiting */
> @@ -697,6 +703,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
> }
> memory_global_dirty_log_stop();
>
> + qemu_mutex_unlock_ramlist();
> qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
>
> g_free(migration_bitmap);
> diff --git a/cpu-all.h b/cpu-all.h
> index 6576229..2b0a640 100644
> --- a/cpu-all.h
> +++ b/cpu-all.h
> @@ -22,6 +22,7 @@
> #include "qemu-common.h"
> #include "qemu-tls.h"
> #include "cpu-common.h"
> +#include "qemu-thread.h"
>
> /* some important defines:
> *
> @@ -487,7 +488,9 @@ typedef struct RAMBlock {
> ram_addr_t offset;
> ram_addr_t length;
> uint32_t flags;
> + /* Protected by the iothread lock. */
> QLIST_ENTRY(RAMBlock) next_mru;
> + /* Protected by the ramlist lock. */
> QLIST_ENTRY(RAMBlock) next;
> char idstr[256];
> #if defined(__linux__) && !defined(TARGET_S390X)
> @@ -496,9 +499,12 @@ typedef struct RAMBlock {
> } RAMBlock;
>
> typedef struct RAMList {
> + QemuMutex mutex;
> + /* Protected by the iothread lock. */
> uint8_t *phys_dirty;
> uint32_t version;
> QLIST_HEAD(, RAMBlock) blocks_mru;
> + /* Protected by the ramlist lock. */
> QLIST_HEAD(, RAMBlock) blocks;
> } RAMList;
> extern RAMList ram_list;
> @@ -520,6 +526,8 @@ void dump_exec_info(FILE *f, fprintf_function
> cpu_fprintf);
> #endif /* !CONFIG_USER_ONLY */
>
> ram_addr_t last_ram_offset(void);
> +void qemu_mutex_lock_ramlist(void);
> +void qemu_mutex_unlock_ramlist(void);
>
> int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
> uint8_t *buf, int len, int is_write);
> diff --git a/exec.c b/exec.c
> index e9d1509..3a8a4dd 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -637,6 +637,7 @@ bool tcg_enabled(void)
>
> void cpu_exec_init_all(void)
> {
> + qemu_mutex_init(&ram_list.mutex);
> #if !defined(CONFIG_USER_ONLY)
> memory_map_init();
> io_mem_init();
> @@ -2364,6 +2365,16 @@ static long gethugepagesize(const char *path)
> return fs.f_bsize;
> }
>
> +void qemu_mutex_lock_ramlist(void)
> +{
> + qemu_mutex_lock(&ram_list.mutex);
> +}
> +
> +void qemu_mutex_unlock_ramlist(void)
> +{
> + qemu_mutex_unlock(&ram_list.mutex);
> +}
> +
> static void *file_ram_alloc(RAMBlock *block,
> ram_addr_t memory,
> const char *path)
> @@ -2519,6 +2530,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char
> *name, DeviceState *dev)
> }
> pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
>
> + qemu_mutex_lock_ramlist();
> QLIST_FOREACH(block, &ram_list.blocks, next) {
> if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
> fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
> @@ -2526,6 +2538,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char
> *name, DeviceState *dev)
> abort();
> }
> }
> + qemu_mutex_unlock_ramlist();
> }
>
> static int memory_try_enable_merging(void *addr, size_t len)
> @@ -2549,6 +2562,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size,
> void *host,
> size = TARGET_PAGE_ALIGN(size);
> new_block = g_malloc0(sizeof(*new_block));
>
> + qemu_mutex_lock_ramlist();
> new_block->mr = mr;
> new_block->offset = find_ram_offset(size);
> if (host) {
> @@ -2584,6 +2598,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size,
> void *host,
> QLIST_INSERT_HEAD(&ram_list.blocks_mru, new_block, next_mru);
>
> ram_list.version++;
> + qemu_mutex_unlock_ramlist();
>
> ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
> last_ram_offset() >>
> TARGET_PAGE_BITS);
> @@ -2608,21 +2623,24 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
> {
> RAMBlock *block;
>
> + qemu_mutex_lock_ramlist();
> QLIST_FOREACH(block, &ram_list.blocks, next) {
> if (addr == block->offset) {
> QLIST_REMOVE(block, next);
> QLIST_REMOVE(block, next_mru);
> ram_list.version++;
> g_free(block);
> - return;
> + break;
> }
> }
> + qemu_mutex_unlock_ramlist();
> }
>
> void qemu_ram_free(ram_addr_t addr)
> {
> RAMBlock *block;
>
> + qemu_mutex_lock_ramlist();
> QLIST_FOREACH(block, &ram_list.blocks, next) {
> if (addr == block->offset) {
> QLIST_REMOVE(block, next);
> @@ -2653,9 +2671,10 @@ void qemu_ram_free(ram_addr_t addr)
> #endif
> }
> g_free(block);
> - return;
> + break;
> }
> }
> + qemu_mutex_unlock_ramlist();
>
> }
>
All reads and writes of ram_list.version, and the "next" list are
protected by the new mutex. The next_mru list is still protected by the
BQL. The BQL is always taken outside the ram_list mutex.
Reviewed-by: Paolo Bonzini <address@hidden>
- [Qemu-devel] [RFC 00/14] Migration thread, Juan Quintela, 2012/09/21
- [Qemu-devel] [PATCH 01/14] split MRU ram list, Juan Quintela, 2012/09/21
- [Qemu-devel] [PATCH 03/14] protect the ramlist with a separate mutex, Juan Quintela, 2012/09/21
- Re: [Qemu-devel] [PATCH 03/14] protect the ramlist with a separate mutex,
Paolo Bonzini <=
- [Qemu-devel] [PATCH 02/14] add a version number to ram_list, Juan Quintela, 2012/09/21
- [Qemu-devel] [PATCH 04/14] buffered_file: Move from using a timer to use a thread, Juan Quintela, 2012/09/21
- [Qemu-devel] [PATCH 05/14] migration: make qemu_fopen_ops_buffered() return void, Juan Quintela, 2012/09/21
- [Qemu-devel] [PATCH 09/14] migration: take finer locking, Juan Quintela, 2012/09/21
- [Qemu-devel] [PATCH 08/14] migration: remove unfreeze logic, Juan Quintela, 2012/09/21