[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 11/15] target/riscv: mmu changes for zicfiss shadow stack prot
From: |
Deepak Gupta |
Subject: |
[PATCH v5 11/15] target/riscv: mmu changes for zicfiss shadow stack protection |
Date: |
Mon, 19 Aug 2024 17:01:25 -0700 |
zicfiss protects shadow stack using new page table encodings PTE.W=0,
PTE.R=0 and PTE.X=0. This encoding is reserved if zicfiss is not
implemented or if shadow stack are not enabled.
Loads on shadow stack memory are allowed while stores to shadow stack
memory leads to access faults. Shadow stack accesses to RO memory
leads to store page fault.
To implement special nature of shadow stack memory where only selected
stores (shadow stack stores from sspush) have to be allowed while rest
of regular stores disallowed, new MMU TLB index is created for shadow
stack.
Signed-off-by: Deepak Gupta <debug@rivosinc.com>
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
---
target/riscv/cpu_helper.c | 42 +++++++++++++++++++++++++++++++++------
target/riscv/internals.h | 3 +++
2 files changed, 39 insertions(+), 6 deletions(-)
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index d3115da28d..f74a1216b1 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -894,6 +894,8 @@ static int get_physical_address(CPURISCVState *env, hwaddr
*physical,
hwaddr ppn;
int napot_bits = 0;
target_ulong napot_mask;
+ bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
+ bool sstack_page = false;
/*
* Check if we should use the background registers for the two
@@ -1102,21 +1104,36 @@ restart:
return TRANSLATE_FAIL;
}
+ target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
/* Check for reserved combinations of RWX flags. */
- switch (pte & (PTE_R | PTE_W | PTE_X)) {
- case PTE_W:
+ switch (rwx) {
case PTE_W | PTE_X:
return TRANSLATE_FAIL;
+ case PTE_W:
+ /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
+ if (cpu_get_bcfien(env) && first_stage) {
+ sstack_page = true;
+ /* if ss index, read and write allowed. else only read allowed */
+ rwx = is_sstack_idx ? PTE_R | PTE_W : PTE_R;
+ break;
+ }
+ return TRANSLATE_FAIL;
+ case PTE_R:
+ /* shadow stack writes to readonly memory are page faults */
+ if (is_sstack_idx && access_type == MMU_DATA_STORE) {
+ return TRANSLATE_FAIL;
+ }
+ break;
}
int prot = 0;
- if (pte & PTE_R) {
+ if (rwx & PTE_R) {
prot |= PAGE_READ;
}
- if (pte & PTE_W) {
+ if (rwx & PTE_W) {
prot |= PAGE_WRITE;
}
- if (pte & PTE_X) {
+ if (rwx & PTE_X) {
bool mxr = false;
/*
@@ -1161,7 +1178,7 @@ restart:
if (!((prot >> access_type) & 1)) {
/* Access check failed */
- return TRANSLATE_FAIL;
+ return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
}
target_ulong updated_pte = pte;
@@ -1348,9 +1365,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr
addr,
break;
case MMU_DATA_LOAD:
cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+ /* shadow stack mis aligned accesses are access faults */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
+ }
break;
case MMU_DATA_STORE:
cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+ /* shadow stack mis aligned accesses are access faults */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ }
break;
default:
g_assert_not_reached();
@@ -1406,6 +1431,11 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, address, access_type, mmu_idx);
+ /* If shadow stack instruction initiated this access, treat it as store */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ access_type = MMU_DATA_STORE;
+ }
+
pmu_tlb_fill_incr_ctr(cpu, access_type);
if (two_stage_lookup) {
/* Two stage lookup */
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
index 0ac17bc5ad..ddbdee885b 100644
--- a/target/riscv/internals.h
+++ b/target/riscv/internals.h
@@ -30,12 +30,15 @@
* - U+2STAGE 0b100
* - S+2STAGE 0b101
* - S+SUM+2STAGE 0b110
+ * - Shadow stack+U 0b1000
+ * - Shadow stack+S 0b1001
*/
#define MMUIdx_U 0
#define MMUIdx_S 1
#define MMUIdx_S_SUM 2
#define MMUIdx_M 3
#define MMU_2STAGE_BIT (1 << 2)
+#define MMU_IDX_SS_WRITE (1 << 3)
static inline int mmuidx_priv(int mmu_idx)
{
--
2.44.0
- [PATCH v5 03/15] target/riscv: save and restore elp state on priv transitions, (continued)
- [PATCH v5 03/15] target/riscv: save and restore elp state on priv transitions, Deepak Gupta, 2024/08/19
- [PATCH v5 08/15] target/riscv: Add zicfiss extension, Deepak Gupta, 2024/08/19
- [PATCH v5 10/15] target/riscv: tb flag for shadow stack instructions, Deepak Gupta, 2024/08/19
- [PATCH v5 12/15] target/riscv: implement zicfiss instructions, Deepak Gupta, 2024/08/19
- [PATCH v5 06/15] target/riscv: zicfilp `lpad` impl and branch tracking, Deepak Gupta, 2024/08/19
- [PATCH v5 05/15] target/riscv: tracking indirect branches (fcfi) for zicfilp, Deepak Gupta, 2024/08/19
- [PATCH v5 07/15] disas/riscv: enable `lpad` disassembly, Deepak Gupta, 2024/08/19
- [PATCH v5 13/15] target/riscv: compressed encodings for sspush and sspopchk, Deepak Gupta, 2024/08/19
- [PATCH v5 15/15] disas/riscv: enable disassembly for compressed sspush/sspopchk, Deepak Gupta, 2024/08/19
- [PATCH v5 11/15] target/riscv: mmu changes for zicfiss shadow stack protection,
Deepak Gupta <=
[PATCH v5 14/15] disas/riscv: enable disassembly for zicfiss instructions, Deepak Gupta, 2024/08/19
[PATCH v5 09/15] target/riscv: introduce ssp and enabling controls for zicfiss, Deepak Gupta, 2024/08/19