qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 10/34] protect the ramlist with a separate mutex


From: Juan Quintela
Subject: [Qemu-devel] [PATCH 10/34] protect the ramlist with a separate mutex
Date: Wed, 19 Dec 2012 13:33:33 +0100

From: Umesh Deshpande <address@hidden>

Add the new mutex that protects shared state between ram_save_live
and the iothread.  If the iothread mutex has to be taken together
with the ramlist mutex, the iothread shall always be _outside_.

Signed-off-by: Paolo Bonzini <address@hidden>
Signed-off-by: Umesh Deshpande <address@hidden>
Signed-off-by: Juan Quintela <address@hidden>

Reviewed-by: Orit Wasserman <address@hidden>
---
 arch_init.c |  9 ++++++++-
 cpu-all.h   |  9 +++++++++
 exec.c      | 29 +++++++++++++++++++++++++++--
 3 files changed, 44 insertions(+), 3 deletions(-)

diff --git a/arch_init.c b/arch_init.c
index 136d0e8..9cee58a 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -528,7 +528,6 @@ static void ram_migration_cancel(void *opaque)
     migration_end();
 }

-
 static void reset_ram_globals(void)
 {
     last_block = NULL;
@@ -547,6 +546,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
     bitmap_set(migration_bitmap, 0, ram_pages);
     migration_dirty_pages = ram_pages;

+    qemu_mutex_lock_ramlist();
     bytes_transferred = 0;
     reset_ram_globals();

@@ -574,6 +574,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
         qemu_put_be64(f, block->length);
     }

+    qemu_mutex_unlock_ramlist();
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);

     return 0;
@@ -588,6 +589,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
     uint64_t expected_downtime;
     MigrationState *s = migrate_get_current();

+    qemu_mutex_lock_ramlist();
+
     if (ram_list.version != last_version) {
         reset_ram_globals();
     }
@@ -636,6 +639,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
         bwidth = 0.000001;
     }

+    qemu_mutex_unlock_ramlist();
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);

     expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
@@ -656,6 +660,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
 {
     migration_bitmap_sync();

+    qemu_mutex_lock_ramlist();
+
     /* try transferring iterative blocks of memory */

     /* flush all remaining blocks regardless of rate limiting */
@@ -671,6 +677,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
     }
     migration_end();

+    qemu_mutex_unlock_ramlist();
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);

     return 0;
diff --git a/cpu-all.h b/cpu-all.h
index c69d602..db79894 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -22,6 +22,7 @@
 #include "qemu-common.h"
 #include "qemu-tls.h"
 #include "cpu-common.h"
+#include "qemu-thread.h"

 /* some important defines:
  *
@@ -487,6 +488,9 @@ typedef struct RAMBlock {
     ram_addr_t length;
     uint32_t flags;
     char idstr[256];
+    /* Reads can take either the iothread or the ramlist lock.
+     * Writes must take both locks.
+     */
     QTAILQ_ENTRY(RAMBlock) next;
 #if defined(__linux__) && !defined(TARGET_S390X)
     int fd;
@@ -494,8 +498,11 @@ typedef struct RAMBlock {
 } RAMBlock;

 typedef struct RAMList {
+    QemuMutex mutex;
+    /* Protected by the iothread lock.  */
     uint8_t *phys_dirty;
     RAMBlock *mru_block;
+    /* Protected by the ramlist lock.  */
     QTAILQ_HEAD(, RAMBlock) blocks;
     uint32_t version;
 } RAMList;
@@ -516,6 +523,8 @@ extern int mem_prealloc;

 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
 ram_addr_t last_ram_offset(void);
+void qemu_mutex_lock_ramlist(void);
+void qemu_mutex_unlock_ramlist(void);
 #endif /* !CONFIG_USER_ONLY */

 int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
diff --git a/exec.c b/exec.c
index 4474d60..a43659b 100644
--- a/exec.c
+++ b/exec.c
@@ -212,6 +212,7 @@ bool memory_region_is_unassigned(MemoryRegion *mr)
 void cpu_exec_init_all(void)
 {
 #if !defined(CONFIG_USER_ONLY)
+    qemu_mutex_init(&ram_list.mutex);
     memory_map_init();
     io_mem_init();
 #endif
@@ -800,6 +801,16 @@ void qemu_flush_coalesced_mmio_buffer(void)
         kvm_flush_coalesced_mmio_buffer();
 }

+void qemu_mutex_lock_ramlist(void)
+{
+    qemu_mutex_lock(&ram_list.mutex);
+}
+
+void qemu_mutex_unlock_ramlist(void)
+{
+    qemu_mutex_unlock(&ram_list.mutex);
+}
+
 #if defined(__linux__) && !defined(TARGET_S390X)

 #include <sys/vfs.h>
@@ -981,6 +992,8 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, 
DeviceState *dev)
     }
     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);

+    /* This assumes the iothread lock is taken here too.  */
+    qemu_mutex_lock_ramlist();
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
@@ -988,6 +1001,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, 
DeviceState *dev)
             abort();
         }
     }
+    qemu_mutex_unlock_ramlist();
 }

 static int memory_try_enable_merging(void *addr, size_t len)
@@ -1011,6 +1025,8 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void 
*host,
     size = TARGET_PAGE_ALIGN(size);
     new_block = g_malloc0(sizeof(*new_block));

+    /* This assumes the iothread lock is taken here too.  */
+    qemu_mutex_lock_ramlist();
     new_block->mr = mr;
     new_block->offset = find_ram_offset(size);
     if (host) {
@@ -1056,6 +1072,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void 
*host,
     ram_list.mru_block = NULL;

     ram_list.version++;
+    qemu_mutex_unlock_ramlist();

     ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
                                        last_ram_offset() >> TARGET_PAGE_BITS);
@@ -1081,21 +1098,26 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
 {
     RAMBlock *block;

+    /* This assumes the iothread lock is taken here too.  */
+    qemu_mutex_lock_ramlist();
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
         if (addr == block->offset) {
             QTAILQ_REMOVE(&ram_list.blocks, block, next);
             ram_list.mru_block = NULL;
             ram_list.version++;
             g_free(block);
-            return;
+            break;
         }
     }
+    qemu_mutex_unlock_ramlist();
 }

 void qemu_ram_free(ram_addr_t addr)
 {
     RAMBlock *block;

+    /* This assumes the iothread lock is taken here too.  */
+    qemu_mutex_lock_ramlist();
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
         if (addr == block->offset) {
             QTAILQ_REMOVE(&ram_list.blocks, block, next);
@@ -1126,9 +1148,10 @@ void qemu_ram_free(ram_addr_t addr)
 #endif
             }
             g_free(block);
-            return;
+            break;
         }
     }
+    qemu_mutex_unlock_ramlist();

 }

@@ -1206,6 +1229,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
 {
     RAMBlock *block;

+    /* The list is protected by the iothread lock here.  */
     block = ram_list.mru_block;
     if (block && addr - block->offset < block->length) {
         goto found;
@@ -1245,6 +1269,7 @@ static void *qemu_safe_ram_ptr(ram_addr_t addr)
 {
     RAMBlock *block;

+    /* The list is protected by the iothread lock here.  */
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
         if (addr - block->offset < block->length) {
             if (xen_enabled()) {
-- 
1.7.11.7




reply via email to

[Prev in Thread] Current Thread [Next in Thread]