[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 08/11] target/riscv: Add counter delegation/configuration supp
From: |
Atish Patra |
Subject: |
[PATCH v4 08/11] target/riscv: Add counter delegation/configuration support |
Date: |
Tue, 03 Dec 2024 15:14:46 -0800 |
From: Kaiwen Xue <kaiwenx@rivosinc.com>
The Smcdeleg/Ssccfg adds the support for counter delegation via
S*indcsr and Ssccfg.
It also adds a new shadow CSR scountinhibit and menvcfg enable bit (CDE)
to enable this extension and scountovf virtualization.
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
Co-developed-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
---
target/riscv/csr.c | 304 ++++++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 292 insertions(+), 12 deletions(-)
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 0985dbdca76d..a77b6ed4c9f3 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -383,6 +383,21 @@ static RISCVException aia_smode32(CPURISCVState *env, int
csrno)
return smode32(env, csrno);
}
+static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+
+ if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if (env->virt_enabled) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+
+ return smode(env, csrno);
+}
+
static bool csrind_extensions_present(CPURISCVState *env)
{
return riscv_cpu_cfg(env)->ext_smcsrind ||
riscv_cpu_cfg(env)->ext_sscsrind;
@@ -1220,10 +1235,9 @@ done:
return result;
}
-static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val,
+ uint32_t ctr_idx)
{
- int ctr_idx = csrno - CSR_MCYCLE;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = val;
@@ -1248,10 +1262,9 @@ static RISCVException write_mhpmcounter(CPURISCVState
*env, int csrno,
return RISCV_EXCP_NONE;
}
-static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
- target_ulong val)
+static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong
val,
+ uint32_t ctr_idx)
{
- int ctr_idx = csrno - CSR_MCYCLEH;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = counter->mhpmcounter_val;
uint64_t mhpmctrh_val = val;
@@ -1273,6 +1286,20 @@ static RISCVException write_mhpmcounterh(CPURISCVState
*env, int csrno,
return RISCV_EXCP_NONE;
}
+static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
+{
+ int ctr_idx = csrno - CSR_MCYCLE;
+
+ return riscv_pmu_write_ctr(env, val, ctr_idx);
+}
+
+static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
+{
+ int ctr_idx = csrno - CSR_MCYCLEH;
+
+ return riscv_pmu_write_ctrh(env, val, ctr_idx);
+}
+
RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
bool upper_half, uint32_t ctr_idx)
{
@@ -1338,6 +1365,167 @@ static RISCVException read_hpmcounterh(CPURISCVState
*env, int csrno,
return riscv_pmu_read_ctr(env, val, true, ctr_index);
}
+static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ riscv_pmu_read_ctr(env, val, false, ctr_idx);
+ } else if (wr_mask) {
+ riscv_pmu_write_ctr(env, new_val, ctr_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ riscv_pmu_read_ctr(env, val, true, ctr_idx);
+ } else if (wr_mask) {
+ riscv_pmu_write_ctrh(env, new_val, ctr_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ uint64_t mhpmevt_val = new_val;
+
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ *val = env->mhpmevent_val[evt_index];
+ if (riscv_cpu_cfg(env)->ext_sscofpmf) {
+ *val &= ~MHPMEVENT_BIT_MINH;
+ }
+ } else if (wr_mask) {
+ wr_mask &= ~MHPMEVENT_BIT_MINH;
+ mhpmevt_val = (new_val & wr_mask) |
+ (env->mhpmevent_val[evt_index] & ~wr_mask);
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ mhpmevt_val = mhpmevt_val |
+ ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
+ }
+ env->mhpmevent_val[evt_index] = mhpmevt_val;
+ riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index,
+ target_ulong *val, target_ulong new_val,
+ target_ulong wr_mask)
+{
+ uint64_t mhpmevth_val;
+ uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
+
+ if (wr_mask != 0 && wr_mask != -1) {
+ return -EINVAL;
+ }
+
+ if (!wr_mask && val) {
+ *val = env->mhpmeventh_val[evt_index];
+ if (riscv_cpu_cfg(env)->ext_sscofpmf) {
+ *val &= ~MHPMEVENTH_BIT_MINH;
+ }
+ } else if (wr_mask) {
+ wr_mask &= ~MHPMEVENTH_BIT_MINH;
+ env->mhpmeventh_val[evt_index] =
+ (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask);
+ mhpmevth_val = env->mhpmeventh_val[evt_index];
+ mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
+ riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+ switch (cfg_index) {
+ case 0: /* CYCLECFG */
+ if (wr_mask) {
+ wr_mask &= ~MCYCLECFG_BIT_MINH;
+ env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask);
+ } else {
+ *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH;
+ }
+ break;
+ case 2: /* INSTRETCFG */
+ if (wr_mask) {
+ wr_mask &= ~MINSTRETCFG_BIT_MINH;
+ env->minstretcfg = (new_val & wr_mask) |
+ (env->minstretcfg & ~wr_mask);
+ } else {
+ *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong
*val,
+ target_ulong new_val, target_ulong wr_mask)
+{
+
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ switch (cfg_index) {
+ case 0: /* CYCLECFGH */
+ if (wr_mask) {
+ wr_mask &= ~MCYCLECFGH_BIT_MINH;
+ env->mcyclecfgh = (new_val & wr_mask) |
+ (env->mcyclecfgh & ~wr_mask);
+ } else {
+ *val = env->mcyclecfgh;
+ }
+ break;
+ case 2: /* INSTRETCFGH */
+ if (wr_mask) {
+ wr_mask &= ~MINSTRETCFGH_BIT_MINH;
+ env->minstretcfgh = (new_val & wr_mask) |
+ (env->minstretcfgh & ~wr_mask);
+ } else {
+ *val = env->minstretcfgh;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
static RISCVException read_scountovf(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -1347,6 +1535,14 @@ static RISCVException read_scountovf(CPURISCVState *env,
int csrno,
target_ulong *mhpm_evt_val;
uint64_t of_bit_mask;
+ /* Virtualize scountovf for counter delegation */
+ if (riscv_cpu_cfg(env)->ext_sscofpmf &&
+ riscv_cpu_cfg(env)->ext_ssccfg &&
+ get_field(env->menvcfg, MENVCFG_CDE) &&
+ env->virt_enabled) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpm_evt_val = env->mhpmeventh_val;
of_bit_mask = MHPMEVENTH_BIT_OF;
@@ -2290,11 +2486,72 @@ static int rmw_xireg_cd(CPURISCVState *env, int csrno,
target_ulong isel, target_ulong *val,
target_ulong new_val, target_ulong wr_mask)
{
- if (!riscv_cpu_cfg(env)->ext_smcdeleg) {
- return RISCV_EXCP_ILLEGAL_INST;
+ int ret = -EINVAL;
+ int ctr_index = isel - ISELECT_CD_FIRST;
+ int isel_hpm_start = ISELECT_CD_FIRST + 3;
+
+ if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) {
+ ret = RISCV_EXCP_ILLEGAL_INST;
+ goto done;
}
- /* TODO: Implement the functionality later */
- return RISCV_EXCP_NONE;
+
+ /* Invalid siselect value for reserved */
+ if (ctr_index == 1) {
+ goto done;
+ }
+
+ /* sireg4 and sireg5 provides access RV32 only CSRs */
+ if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) &&
+ (riscv_cpu_mxl(env) != MXL_RV32)) {
+ ret = RISCV_EXCP_ILLEGAL_INST;
+ goto done;
+ }
+
+ /* Check Sscofpmf dependancy */
+ if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 &&
+ (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) {
+ goto done;
+ }
+
+ /* Check smcntrpmf dependancy */
+ if (!riscv_cpu_cfg(env)->ext_smcntrpmf &&
+ (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) &&
+ (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) {
+ goto done;
+ }
+
+ if (!get_field(env->mcounteren, BIT(ctr_index)) ||
+ !get_field(env->menvcfg, MENVCFG_CDE)) {
+ goto done;
+ }
+
+ switch (csrno) {
+ case CSR_SIREG:
+ ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask);
+ break;
+ case CSR_SIREG4:
+ ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask);
+ break;
+ case CSR_SIREG2:
+ if (ctr_index <= 2) {
+ ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask);
+ } else {
+ ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask);
+ }
+ break;
+ case CSR_SIREG5:
+ if (ctr_index <= 2) {
+ ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask);
+ } else {
+ ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask);
+ }
+ break;
+ default:
+ goto done;
+ }
+
+done:
+ return ret;
}
/*
@@ -2573,6 +2830,21 @@ static RISCVException write_mcountinhibit(CPURISCVState
*env, int csrno,
return RISCV_EXCP_NONE;
}
+static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ /* S-mode can only access the bits delegated by M-mode */
+ *val = env->mcountinhibit & env->mcounteren;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ write_mcountinhibit(env, csrno, val & env->mcounteren);
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -2675,11 +2947,13 @@ static RISCVException write_menvcfg(CPURISCVState *env,
int csrno,
target_ulong val)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
- uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
+ uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
+ MENVCFG_CBZE | MENVCFG_CDE;
if (riscv_cpu_mxl(env) == MXL_RV64) {
mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
(cfg->ext_svadu ? MENVCFG_ADUE : 0);
if (env_archcpu(env)->cfg.ext_zicfilp) {
@@ -2708,7 +2982,8 @@ static RISCVException write_menvcfgh(CPURISCVState *env,
int csrno,
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
- (cfg->ext_svadu ? MENVCFG_ADUE : 0);
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0);
uint64_t valh = (uint64_t)val << 32;
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
@@ -5493,6 +5768,11 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
write_sstateen_1_3,
.min_priv_ver = PRIV_VERSION_1_12_0 },
+ /* Supervisor Counter Delegation */
+ [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred,
+ read_scountinhibit, write_scountinhibit,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
/* Supervisor Trap Setup */
[CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
NULL, read_sstatus_i128 },
--
2.34.1
- [PATCH v4 00/11] Add RISC-V Counter delegation ISA extension support, Atish Patra, 2024/12/03
- [PATCH v4 01/11] target/riscv: Add properties for Indirect CSR Access extension, Atish Patra, 2024/12/03
- [PATCH v4 03/11] target/riscv: Enable S*stateen bits for AIA, Atish Patra, 2024/12/03
- [PATCH v4 02/11] target/riscv: Decouple AIA processing from xiselect and xireg, Atish Patra, 2024/12/03
- [PATCH v4 04/11] target/riscv: Support generic CSR indirect access, Atish Patra, 2024/12/03
- [PATCH v4 05/11] target/riscv: Add properties for counter delegation ISA extensions, Atish Patra, 2024/12/03
- [PATCH v4 06/11] target/riscv: Add counter delegation definitions, Atish Patra, 2024/12/03
- [PATCH v4 07/11] target/riscv: Add select value range check for counter delegation, Atish Patra, 2024/12/03
- [PATCH v4 10/11] target/riscv: Add implied rule for counter delegation extensions, Atish Patra, 2024/12/03
- [PATCH v4 08/11] target/riscv: Add counter delegation/configuration support,
Atish Patra <=
- [PATCH v4 11/11] target/riscv: Add configuration for S[m|s]csrind, Smcdeleg/Ssccfg, Atish Patra, 2024/12/03
- [PATCH v4 09/11] target/riscv: Invoke pmu init after feature enable, Atish Patra, 2024/12/03