[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 1/4] cputlb: Add tlb_set_asid_for_mmuidx
From: |
Richard Henderson |
Subject: |
[Qemu-devel] [PATCH 1/4] cputlb: Add tlb_set_asid_for_mmuidx |
Date: |
Mon, 29 Oct 2018 15:53:36 +0000 |
Although we can't do much with ASIDs except remember them, this
will allow cleanups within target/ that should make things clearer.
Signed-off-by: Richard Henderson <address@hidden>
---
include/exec/cpu-defs.h | 2 ++
include/exec/exec-all.h | 19 +++++++++++++++++++
accel/tcg/cputlb.c | 23 +++++++++++++++++++++++
3 files changed, 44 insertions(+)
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 6a60f94a41..8fbfe8c8e2 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -152,6 +152,8 @@ typedef struct CPUTLBDesc {
target_ulong large_page_mask;
/* The next index to use in the tlb victim table. */
size_t vindex;
+ /* The current ASID for this tlb. */
+ uint32_t asid;
} CPUTLBDesc;
/*
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 815e5b1e83..478f488704 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -226,6 +226,21 @@ void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t
idxmap);
* depend on when the guests translation ends the TB.
*/
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+/**
+ * tlb_set_asid_for_mmuidx:
+ * @cpu: Originating cpu
+ * @asid: Address Space Identifier
+ * @idxmap: bitmap of MMU indicies to set to @asid
+ * @depmap: bitmap of dependent MMU indicies
+ *
+ * Set an ASID for all of @idxmap. If any previous ASID was different,
+ * then we may flush the mmu idx. If a flush is required, then also flush
+ * all dependent mmu indicies in @depmap. This latter is typically used
+ * for secondary page resolution, for implementing virtualization within
+ * the guest.
+ */
+void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid,
+ uint16_t idxmap, uint16_t dep_idxmap);
/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
@@ -311,6 +326,10 @@ static inline void
tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
uint16_t idxmap)
{
}
+static inline void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid,
+ uint16_t idxmap, uint16_t depmap)
+{
+}
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line
*/
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index af6bd8ccf9..60b3dc2de3 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -360,6 +360,29 @@ void tlb_flush_page_all_cpus_synced(CPUState *src,
target_ulong addr)
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
+void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid, uint16_t idxmap,
+ uint16_t depmap)
+{
+ CPUArchState *env = cpu->env_ptr;
+ uint16_t work, to_flush = 0;
+
+ /*
+ * We don't support ASIDs except for trivially.
+ * If there is any change, then we must flush the TLB.
+ */
+ for (work = idxmap; work != 0; work &= work - 1) {
+ int mmu_idx = ctz32(work);
+ if (env->tlb_d[mmu_idx].asid != asid) {
+ env->tlb_d[mmu_idx].asid = asid;
+ to_flush = idxmap;
+ }
+ }
+ if (to_flush) {
+ to_flush |= depmap;
+ tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(to_flush));
+ }
+}
+
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
--
2.17.2