qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PULL 30/62] memory: use atomic ops for setting dirty memor


From: Paolo Bonzini
Subject: [Qemu-devel] [PULL 30/62] memory: use atomic ops for setting dirty memory bits
Date: Fri, 5 Jun 2015 17:15:31 +0200

From: Stefan Hajnoczi <address@hidden>

Use set_bit_atomic() and bitmap_set_atomic() so that multiple threads
can dirty memory without race conditions.

Signed-off-by: Stefan Hajnoczi <address@hidden>
Message-Id: <address@hidden>
Reviewed-by: Fam Zheng <address@hidden>
Signed-off-by: Paolo Bonzini <address@hidden>
---
 include/exec/ram_addr.h | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 9811010..9f73076 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -114,7 +114,7 @@ static inline void 
cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
                                                       unsigned client)
 {
     assert(client < DIRTY_MEMORY_NUM);
-    set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
+    set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
 }
 
 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
@@ -122,17 +122,18 @@ static inline void 
cpu_physical_memory_set_dirty_range(ram_addr_t start,
                                                        uint8_t mask)
 {
     unsigned long end, page;
+    unsigned long **d = ram_list.dirty_memory;
 
     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
     page = start >> TARGET_PAGE_BITS;
     if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
-        bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - 
page);
+        bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
     }
     if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
-        bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
+        bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
     }
     if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
-        bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
+        bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
     }
     xen_modified_memory(start, length);
 }
@@ -159,11 +160,12 @@ static inline void 
cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
         for (k = 0; k < nr; k++) {
             if (bitmap[k]) {
                 unsigned long temp = leul_to_cpu(bitmap[k]);
+                unsigned long **d = ram_list.dirty_memory;
 
-                ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= 
temp;
-                ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp;
+                atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
+                atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
                 if (tcg_enabled()) {
-                    ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
+                    atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
                 }
             }
         }
-- 
2.4.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]