qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 03/30] protect the ramlist with a separate mutex


From: Orit Wasserman
Subject: Re: [Qemu-devel] [PATCH 03/30] protect the ramlist with a separate mutex
Date: Sun, 21 Oct 2012 14:05:24 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:15.0) Gecko/20120911 Thunderbird/15.0.1

On 10/18/2012 09:29 AM, Juan Quintela wrote:
> From: Umesh Deshpande <address@hidden>
> 
> Add the new mutex that protects shared state between ram_save_live
> and the iothread.  If the iothread mutex has to be taken together
> with the ramlist mutex, the iothread shall always be _outside_.
> 
> Signed-off-by: Paolo Bonzini <address@hidden>
> Signed-off-by: Umesh Deshpande <address@hidden>
> Signed-off-by: Juan Quintela <address@hidden>
> ---
>  arch_init.c |  9 ++++++++-
>  cpu-all.h   |  8 ++++++++
>  exec.c      | 23 +++++++++++++++++++++--
>  3 files changed, 37 insertions(+), 3 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index b47313d..2d29828 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -553,7 +553,6 @@ static void ram_migration_cancel(void *opaque)
>      migration_end();
>  }
> 
> -
>  static void reset_ram_globals(void)
>  {
>      last_block = NULL;
> @@ -573,6 +572,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>      bitmap_set(migration_bitmap, 1, ram_pages);
>      migration_dirty_pages = ram_pages;
> 
> +    qemu_mutex_lock_ramlist();
>      bytes_transferred = 0;
>      reset_ram_globals();
> 
> @@ -600,6 +600,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>          qemu_put_be64(f, block->length);
>      }
> 
> +    qemu_mutex_unlock_ramlist();
>      qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
> 
>      return 0;
> @@ -614,6 +615,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>      uint64_t expected_downtime;
>      MigrationState *s = migrate_get_current();
> 
> +    qemu_mutex_lock_ramlist();
> +
>      if (ram_list.version != last_version) {
>          reset_ram_globals();
>      }
> @@ -662,6 +665,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>          bwidth = 0.000001;
>      }
> 
> +    qemu_mutex_unlock_ramlist();
>      qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
> 
>      expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
> @@ -682,6 +686,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
>  {
>      migration_bitmap_sync();
> 
> +    qemu_mutex_lock_ramlist();
> +
>      /* try transferring iterative blocks of memory */
> 
>      /* flush all remaining blocks regardless of rate limiting */
> @@ -697,6 +703,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
>      }
>      memory_global_dirty_log_stop();
> 
> +    qemu_mutex_unlock_ramlist();
>      qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
> 
>      g_free(migration_bitmap);
> diff --git a/cpu-all.h b/cpu-all.h
> index e07c91c..2bafcdd 100644
> --- a/cpu-all.h
> +++ b/cpu-all.h
> @@ -22,6 +22,7 @@
>  #include "qemu-common.h"
>  #include "qemu-tls.h"
>  #include "cpu-common.h"
> +#include "qemu-thread.h"
> 
>  /* some important defines:
>   *
> @@ -490,7 +491,9 @@ typedef struct RAMBlock {
>      ram_addr_t offset;
>      ram_addr_t length;
>      uint32_t flags;
> +    /* Protected by the iothread lock.  */
>      QLIST_ENTRY(RAMBlock) next_mru;
> +    /* Protected by the ramlist lock.  */
>      QLIST_ENTRY(RAMBlock) next;
>      char idstr[256];
>  #if defined(__linux__) && !defined(TARGET_S390X)
> @@ -499,9 +502,12 @@ typedef struct RAMBlock {
>  } RAMBlock;
> 
>  typedef struct RAMList {
> +    QemuMutex mutex;
> +    /* Protected by the iothread lock.  */
>      uint8_t *phys_dirty;
>      uint32_t version;
>      QLIST_HEAD(, RAMBlock) blocks_mru;
> +    /* Protected by the ramlist lock.  */
>      QLIST_HEAD(, RAMBlock) blocks;
>  } RAMList;
>  extern RAMList ram_list;
> @@ -521,6 +527,8 @@ extern int mem_prealloc;
> 
>  void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
>  ram_addr_t last_ram_offset(void);
> +void qemu_mutex_lock_ramlist(void);
> +void qemu_mutex_unlock_ramlist(void);
>  #endif /* !CONFIG_USER_ONLY */
> 
>  int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
> diff --git a/exec.c b/exec.c
> index 1e04711..cf9de92 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -637,6 +637,7 @@ bool tcg_enabled(void)
>  void cpu_exec_init_all(void)
>  {
>  #if !defined(CONFIG_USER_ONLY)
> +    qemu_mutex_init(&ram_list.mutex);
>      memory_map_init();
>      io_mem_init();
>  #endif
> @@ -2329,6 +2330,16 @@ void qemu_flush_coalesced_mmio_buffer(void)
>          kvm_flush_coalesced_mmio_buffer();
>  }
> 
> +void qemu_mutex_lock_ramlist(void)
> +{
> +    qemu_mutex_lock(&ram_list.mutex);
> +}
> +
> +void qemu_mutex_unlock_ramlist(void)
> +{
> +    qemu_mutex_unlock(&ram_list.mutex);
> +}
> +
>  #if defined(__linux__) && !defined(TARGET_S390X)
> 
>  #include <sys/vfs.h>
> @@ -2510,6 +2521,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char 
> *name, DeviceState *dev)
>      }
>      pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
> 
> +    qemu_mutex_lock_ramlist();
>      QLIST_FOREACH(block, &ram_list.blocks, next) {
>          if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
>              fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
> @@ -2517,6 +2529,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char 
> *name, DeviceState *dev)
>              abort();
>          }
>      }
> +    qemu_mutex_unlock_ramlist();
>  }
> 
>  static int memory_try_enable_merging(void *addr, size_t len)
> @@ -2540,6 +2553,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, 
> void *host,
>      size = TARGET_PAGE_ALIGN(size);
>      new_block = g_malloc0(sizeof(*new_block));
> 
> +    qemu_mutex_lock_ramlist();
>      new_block->mr = mr;
>      new_block->offset = find_ram_offset(size);
>      if (host) {
> @@ -2575,6 +2589,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, 
> void *host,
>      QLIST_INSERT_HEAD(&ram_list.blocks_mru, new_block, next_mru);
> 
>      ram_list.version++;
> +    qemu_mutex_unlock_ramlist();
> 
>      ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
>                                         last_ram_offset() >> 
> TARGET_PAGE_BITS);
> @@ -2599,21 +2614,24 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
>  {
>      RAMBlock *block;
> 
> +    qemu_mutex_lock_ramlist();
>      QLIST_FOREACH(block, &ram_list.blocks, next) {
>          if (addr == block->offset) {
>              QLIST_REMOVE(block, next);
>              QLIST_REMOVE(block, next_mru);
>              ram_list.version++;
>              g_free(block);
> -            return;
> +            break;
>          }
>      }
> +    qemu_mutex_unlock_ramlist();
>  }
> 
>  void qemu_ram_free(ram_addr_t addr)
>  {
>      RAMBlock *block;
> 
> +    qemu_mutex_lock_ramlist();
>      QLIST_FOREACH(block, &ram_list.blocks, next) {
>          if (addr == block->offset) {
>              QLIST_REMOVE(block, next);
> @@ -2644,9 +2662,10 @@ void qemu_ram_free(ram_addr_t addr)
>  #endif
>              }
>              g_free(block);
> -            return;
> +            break;
>          }
>      }
> +    qemu_mutex_unlock_ramlist();
> 
>  }
> 
Reviewed-by: Orit Wasserman <address@hidden>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]