qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC v8 10/14] softmmu: Support MMIO exclusive accesses


From: Alvise Rigo
Subject: [Qemu-devel] [RFC v8 10/14] softmmu: Support MMIO exclusive accesses
Date: Tue, 19 Apr 2016 15:39:27 +0200

Enable exclusive accesses when the MMIO flag is set in the TLB entry.

In case a LL access is done to MMIO memory, we treat it differently from
a RAM access in that we do not rely on the EXCL bitmap to flag the page
as exclusive. In fact, we don't even need the TLB_EXCL flag to force the
slow path, since it is always forced anyway.

As for the RAM case, also the MMIO exclusive ranges have to be protected
by other CPU's accesses. In order to do that, we flag the accessed
MemoryRegion to mark that an exclusive access has been performed and is
not concluded yet. This flag will force the other CPUs to invalidate the
exclusive range in case of collision: basically, it serves the same
purpose as TLB_EXCL for the TLBEntries referring exclusive memory.

Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
 cputlb.c                |  7 +++++--
 include/exec/memory.h   |  1 +
 softmmu_llsc_template.h | 11 +++++++----
 softmmu_template.h      | 22 ++++++++++++++++++++++
 4 files changed, 35 insertions(+), 6 deletions(-)

diff --git a/cputlb.c b/cputlb.c
index e5df3a5..3cf40a3 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -29,7 +29,6 @@
 #include "exec/memory-internal.h"
 #include "exec/ram_addr.h"
 #include "tcg/tcg.h"
-#include "hw/hw.h"
 
 //#define DEBUG_TLB
 //#define DEBUG_TLB_CHECK
@@ -508,9 +507,10 @@ static inline void excl_history_put_addr(hwaddr addr)
 /* For every vCPU compare the exclusive address and reset it in case of a
  * match. Since only one vCPU is running at once, no lock has to be held to
  * guard this operation. */
-static inline void reset_other_cpus_colliding_ll_addr(hwaddr addr, hwaddr size)
+static inline bool reset_other_cpus_colliding_ll_addr(hwaddr addr, hwaddr size)
 {
     CPUState *cpu;
+    bool ret = false;
 
     CPU_FOREACH(cpu) {
         if (current_cpu != cpu &&
@@ -520,8 +520,11 @@ static inline void 
reset_other_cpus_colliding_ll_addr(hwaddr addr, hwaddr size)
                            cpu->excl_protected_range.begin,
                            addr, size)) {
             cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
+            ret = true;
         }
     }
+
+    return ret;
 }
 
 #define MMUSUFFIX _mmu
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 71e0480..bacb3ad 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -171,6 +171,7 @@ struct MemoryRegion {
     bool rom_device;
     bool flush_coalesced_mmio;
     bool global_locking;
+    bool pending_excl_access; /* A vCPU issued an exclusive access */
     uint8_t dirty_log_mask;
     ram_addr_t ram_addr;
     Object *owner;
diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
index 1e24fec..ca55502 100644
--- a/softmmu_llsc_template.h
+++ b/softmmu_llsc_template.h
@@ -84,15 +84,18 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, 
target_ulong addr,
                 }
             }
         }
+        /* For this vCPU, just update the TLB entry, no need to flush. */
+        env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
     } else {
-        hw_error("EXCL accesses to MMIO regions not supported yet.");
+        /* Set a pending exclusive access in the MemoryRegion */
+        MemoryRegion *mr = iotlb_to_region(this_cpu,
+                                           env->iotlb[mmu_idx][index].addr,
+                                           env->iotlb[mmu_idx][index].attrs);
+        mr->pending_excl_access = true;
     }
 
     cc->cpu_set_excl_protected_range(this_cpu, hw_addr, DATA_SIZE);
 
-    /* For this vCPU, just update the TLB entry, no need to flush. */
-    env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
-
     /* From now on we are in LL/SC context */
     this_cpu->ll_sc_context = true;
 
diff --git a/softmmu_template.h b/softmmu_template.h
index 2934a0c..2dc5e01 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -360,6 +360,28 @@ static inline void glue(io_write, SUFFIX)(CPUArchState 
*env,
     MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
 
     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
+
+    /* While for normal RAM accesses we define exclusive memory at TLBEntry
+     * granularity, for MMIO memory we use a MemoryRegion granularity.
+     * The pending_excl_access flag is the analogous of TLB_EXCL. */
+    if (unlikely(mr->pending_excl_access)) {
+        if (cpu->excl_succeeded) {
+            /* This SC access finalizes the LL/SC pair, thus the MemoryRegion
+             * has no pending exclusive access anymore.
+             * N.B.: Here excl_succeeded == true means that this access
+             * comes from an exclusive instruction. */
+            MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
+                                               iotlbentry->attrs);
+            mr->pending_excl_access = false;
+        } else {
+            /* This is a normal MMIO write access. Check if it collides
+             * with an existing exclusive range. */
+            if (reset_other_cpus_colliding_ll_addr(physaddr, 1 << SHIFT)) {
+                mr->pending_excl_access = false;
+            }
+        }
+    }
+
     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
         cpu_io_recompile(cpu, retaddr);
     }
-- 
2.8.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]