qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC v6 14/14] softmmu: Protect MMIO exclusive range


From: Alvise Rigo
Subject: [Qemu-devel] [RFC v6 14/14] softmmu: Protect MMIO exclusive range
Date: Mon, 14 Dec 2015 09:41:38 +0100

As for the RAM case, also the MMIO exclusive ranges have to be protected
by other CPU's accesses. In order to do that, we flag the accessed
MemoryRegion to mark that an exclusive access has been performed and is
not concluded yet.
This flag will force the other CPUs to invalidate the exclusive range in
case of collision.

Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
 cputlb.c                | 20 +++++++++++++-------
 include/exec/memory.h   |  1 +
 softmmu_llsc_template.h | 11 ++++++++---
 softmmu_template.h      | 22 ++++++++++++++++++++++
 4 files changed, 44 insertions(+), 10 deletions(-)

diff --git a/cputlb.c b/cputlb.c
index 7c2669c..7348c5f 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -484,19 +484,25 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, 
target_ulong addr)
 /* For every vCPU compare the exclusive address and reset it in case of a
  * match. Since only one vCPU is running at once, no lock has to be held to
  * guard this operation. */
-static inline void lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
+static inline bool lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
 {
     CPUState *cpu;
+    bool ret = false;
 
     CPU_FOREACH(cpu) {
-        if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
-            ranges_overlap(cpu->excl_protected_range.begin,
-                           cpu->excl_protected_range.end -
-                           cpu->excl_protected_range.begin,
-                           addr, size)) {
-            cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
+        if (current_cpu != cpu) {
+            if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
+                ranges_overlap(cpu->excl_protected_range.begin,
+                               cpu->excl_protected_range.end -
+                               cpu->excl_protected_range.begin,
+                               addr, size)) {
+                cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
+                ret = true;
+            }
         }
     }
+
+    return ret;
 }
 
 static inline void excl_history_put_addr(CPUState *cpu, hwaddr addr)
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 2782c77..80961c2 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -181,6 +181,7 @@ struct MemoryRegion {
     bool warning_printed; /* For reservations */
     bool flush_coalesced_mmio;
     bool global_locking;
+    bool pending_excl_access; /* A vCPU issued an exclusive access */
     uint8_t vga_logging_count;
     MemoryRegion *alias;
     hwaddr alias_offset;
diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
index bbc820e..07e18ce 100644
--- a/softmmu_llsc_template.h
+++ b/softmmu_llsc_template.h
@@ -88,13 +88,17 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, 
target_ulong addr,
                 }
             }
         }
+        /* For this vCPU, just update the TLB entry, no need to flush. */
+        env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
+    } else {
+        /* Set a pending exclusive access in the MemoryRegion */
+        MemoryRegion *mr = iotlb_to_region(this,
+                                           env->iotlb[mmu_idx][index].addr);
+        mr->pending_excl_access = true;
     }
 
     cc->cpu_set_excl_protected_range(this, hw_addr, DATA_SIZE);
 
-    /* For this vCPU, just update the TLB entry, no need to flush. */
-    env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
-
     /* From now on we are in LL/SC context */
     this->ll_sc_context = 1;
 
@@ -128,6 +132,7 @@ WORD_TYPE helper_stcond_name(CPUArchState *env, 
target_ulong addr,
 
         /* Unset LL/SC context */
         cpu->ll_sc_context = 0;
+        cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
     }
 
     return ret;
diff --git a/softmmu_template.h b/softmmu_template.h
index 196beec..65cce0a 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -360,6 +360,14 @@ static inline void glue(io_write, SUFFIX)(CPUArchState 
*env,
     MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
 
     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
+
+    /* Invalidate the exclusive range that overlaps this access */
+    if (mr->pending_excl_access) {
+        if (lookup_and_reset_cpus_ll_addr(physaddr, 1 << SHIFT)) {
+            mr->pending_excl_access = false;
+        }
+    }
+
     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
         cpu_io_recompile(cpu, retaddr);
     }
@@ -503,6 +511,13 @@ void helper_le_st_name(CPUArchState *env, target_ulong 
addr, DATA_TYPE val,
                 glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
                                                          mmu_idx, index,
                                                          retaddr);
+                /* N.B.: Here excl_succeeded == 1 means that this access comes
+                 * from an exclusive instruction. */
+                if (cpu->excl_succeeded) {
+                    MemoryRegion *mr = iotlb_to_region(cpu,
+                                            env->iotlb[mmu_idx][index].addr);
+                    mr->pending_excl_access = false;
+                }
             } else {
                 glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
                                                         mmu_idx, 
index,retaddr);
@@ -652,6 +667,13 @@ void helper_be_st_name(CPUArchState *env, target_ulong 
addr, DATA_TYPE val,
                 glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
                                                          mmu_idx, index,
                                                          retaddr);
+                /* N.B.: Here excl_succeeded == 1 means that this access comes
+                 * from an exclusive instruction. */
+                if (cpu->excl_succeeded) {
+                    MemoryRegion *mr = iotlb_to_region(cpu,
+                                            env->iotlb[mmu_idx][index].addr);
+                    mr->pending_excl_access = false;
+                }
             } else {
                 glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
                                                         mmu_idx, 
index,retaddr);
-- 
2.6.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]