[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v8 07/13] target/riscv: Implement privilege mode filtering for cy
From: |
Atish Patra |
Subject: |
[PATCH v8 07/13] target/riscv: Implement privilege mode filtering for cycle/instret |
Date: |
Thu, 11 Jul 2024 15:31:10 -0700 |
Privilege mode filtering can also be emulated for cycle/instret by
tracking host_ticks/icount during each privilege mode switch. This
patch implements that for both cycle/instret and mhpmcounters. The
first one requires Smcntrpmf while the other one requires Sscofpmf
to be enabled.
The cycle/instret are still computed using host ticks when icount
is not enabled. Otherwise, they are computed using raw icount which
is more accurate in icount mode.
Co-Developed-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
---
target/riscv/cpu.h | 11 +++++
target/riscv/cpu_helper.c | 9 +++-
target/riscv/csr.c | 117 ++++++++++++++++++++++++++++++++--------------
target/riscv/pmu.c | 92 ++++++++++++++++++++++++++++++++++++
target/riscv/pmu.h | 2 +
5 files changed, 194 insertions(+), 37 deletions(-)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index c5d289e5f4b9..d56d640b06be 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -158,6 +158,15 @@ typedef struct PMUCTRState {
target_ulong irq_overflow_left;
} PMUCTRState;
+typedef struct PMUFixedCtrState {
+ /* Track cycle and icount for each privilege mode */
+ uint64_t counter[4];
+ uint64_t counter_prev[4];
+ /* Track cycle and icount for each privilege mode when V = 1*/
+ uint64_t counter_virt[2];
+ uint64_t counter_virt_prev[2];
+} PMUFixedCtrState;
+
struct CPUArchState {
target_ulong gpr[32];
target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
@@ -354,6 +363,8 @@ struct CPUArchState {
/* PMU event selector configured values for RV32 */
target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
+ PMUFixedCtrState pmu_fixed_ctrs[2];
+
target_ulong sscratch;
target_ulong mscratch;
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 10d3fdaed376..395a1d914061 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -695,9 +695,14 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong
newpriv, bool virt_en)
{
g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
- if (icount_enabled() && newpriv != env->priv) {
- riscv_itrigger_update_priv(env);
+ if (newpriv != env->priv || env->virt_enabled != virt_en) {
+ if (icount_enabled()) {
+ riscv_itrigger_update_priv(env);
+ }
+
+ riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
}
+
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
env->priv = newpriv;
env->xl = cpu_recompute_xl(env);
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 121996edab4b..53f1a7ff00db 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -787,36 +787,16 @@ static RISCVException write_vcsr(CPURISCVState *env, int
csrno,
return RISCV_EXCP_NONE;
}
+#if defined(CONFIG_USER_ONLY)
/* User Timers and Counters */
-static target_ulong get_ticks(bool shift, bool instructions)
+static target_ulong get_ticks(bool shift)
{
- int64_t val;
- target_ulong result;
-
-#if !defined(CONFIG_USER_ONLY)
- if (icount_enabled()) {
- if (instructions) {
- val = icount_get_raw();
- } else {
- val = icount_get();
- }
- } else {
- val = cpu_get_host_ticks();
- }
-#else
- val = cpu_get_host_ticks();
-#endif
-
- if (shift) {
- result = val >> 32;
- } else {
- result = val;
- }
+ int64_t val = cpu_get_host_ticks();
+ target_ulong result = shift ? val >> 32 : val;
return result;
}
-#if defined(CONFIG_USER_ONLY)
static RISCVException read_time(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -834,14 +814,14 @@ static RISCVException read_timeh(CPURISCVState *env, int
csrno,
static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = get_ticks(false, (csrno == CSR_INSTRET));
+ *val = get_ticks(false);
return RISCV_EXCP_NONE;
}
static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = get_ticks(true, (csrno == CSR_INSTRETH));
+ *val = get_ticks(true);
return RISCV_EXCP_NONE;
}
@@ -1025,17 +1005,82 @@ static RISCVException write_mhpmeventh(CPURISCVState
*env, int csrno,
return RISCV_EXCP_NONE;
}
+static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
+ int counter_idx,
+ bool upper_half)
+{
+ int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
+ uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
+ uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
+ target_ulong result = 0;
+ uint64_t curr_val = 0;
+ uint64_t cfg_val = 0;
+
+ if (counter_idx == 0) {
+ cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
+ env->mcyclecfg;
+ } else if (counter_idx == 2) {
+ cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
+ env->minstretcfg;
+ } else {
+ cfg_val = upper_half ?
+ ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
+ env->mhpmevent_val[counter_idx];
+ cfg_val &= MHPMEVENT_FILTER_MASK;
+ }
+
+ if (!cfg_val) {
+ if (icount_enabled()) {
+ curr_val = inst ? icount_get_raw() : icount_get();
+ } else {
+ curr_val = cpu_get_host_ticks();
+ }
+
+ goto done;
+ }
+
+ if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
+ curr_val += counter_arr[PRV_M];
+ }
+
+ if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
+ curr_val += counter_arr[PRV_S];
+ }
+
+ if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
+ curr_val += counter_arr[PRV_U];
+ }
+
+ if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
+ curr_val += counter_arr_virt[PRV_S];
+ }
+
+ if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
+ curr_val += counter_arr_virt[PRV_U];
+ }
+
+done:
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ result = upper_half ? curr_val >> 32 : curr_val;
+ } else {
+ result = curr_val;
+ }
+
+ return result;
+}
+
static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
target_ulong val)
{
int ctr_idx = csrno - CSR_MCYCLE;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = val;
- bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
counter->mhpmcounter_val = val;
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
- counter->mhpmcounter_prev = get_ticks(false, instr);
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
+ counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
+ ctr_idx,
false);
if (ctr_idx > 2) {
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpmctr_val = mhpmctr_val |
@@ -1058,12 +1103,13 @@ static RISCVException write_mhpmcounterh(CPURISCVState
*env, int csrno,
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = counter->mhpmcounter_val;
uint64_t mhpmctrh_val = val;
- bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
counter->mhpmcounterh_val = val;
mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
- counter->mhpmcounterh_prev = get_ticks(true, instr);
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
+ counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
+ ctr_idx,
true);
if (ctr_idx > 2) {
riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
}
@@ -1082,7 +1128,6 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState
*env, target_ulong *val,
counter->mhpmcounter_prev;
target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
counter->mhpmcounter_val;
- bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
/*
@@ -1103,8 +1148,10 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState
*env, target_ulong *val,
* The kernel computes the perf delta by subtracting the current value from
* the value it initialized previously (ctr_val).
*/
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
- *val = get_ticks(upper_half, instr) - ctr_prev + ctr_val;
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
+ *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
+ ctr_prev + ctr_val;
} else {
*val = ctr_val;
}
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index 0e7d58b8a5c2..ac648cff8d7c 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
+#include "qemu/timer.h"
#include "cpu.h"
#include "pmu.h"
#include "sysemu/cpu-timers.h"
@@ -176,6 +177,97 @@ static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t
ctr_idx)
return 0;
}
+/*
+ * Information needed to update counters:
+ * new_priv, new_virt: To correctly save starting snapshot for the newly
+ * started mode. Look at array being indexed with newprv.
+ * old_priv, old_virt: To correctly select previous snapshot for old priv
+ * and compute delta. Also to select correct counter
+ * to inc. Look at arrays being indexed with env->priv.
+ *
+ * To avoid the complexity of calling this function, we assume that
+ * env->priv and env->virt_enabled contain old priv and old virt and
+ * new priv and new virt values are passed in as arguments.
+ */
+static void riscv_pmu_icount_update_priv(CPURISCVState *env,
+ target_ulong newpriv, bool new_virt)
+{
+ uint64_t *snapshot_prev, *snapshot_new;
+ uint64_t current_icount;
+ uint64_t *counter_arr;
+ uint64_t delta;
+
+ if (icount_enabled()) {
+ current_icount = icount_get_raw();
+ } else {
+ current_icount = cpu_get_host_ticks();
+ }
+
+ if (env->virt_enabled) {
+ counter_arr = env->pmu_fixed_ctrs[1].counter_virt;
+ snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev;
+ } else {
+ counter_arr = env->pmu_fixed_ctrs[1].counter;
+ snapshot_prev = env->pmu_fixed_ctrs[1].counter_prev;
+ }
+
+ if (new_virt) {
+ snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev;
+ } else {
+ snapshot_new = env->pmu_fixed_ctrs[1].counter_prev;
+ }
+
+ /*
+ * new_priv can be same as env->priv. So we need to calculate
+ * delta first before updating snapshot_new[new_priv].
+ */
+ delta = current_icount - snapshot_prev[env->priv];
+ snapshot_new[newpriv] = current_icount;
+
+ counter_arr[env->priv] += delta;
+}
+
+static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
+ target_ulong newpriv, bool new_virt)
+{
+ uint64_t *snapshot_prev, *snapshot_new;
+ uint64_t current_ticks;
+ uint64_t *counter_arr;
+ uint64_t delta;
+
+ if (icount_enabled()) {
+ current_ticks = icount_get();
+ } else {
+ current_ticks = cpu_get_host_ticks();
+ }
+
+ if (env->virt_enabled) {
+ counter_arr = env->pmu_fixed_ctrs[0].counter_virt;
+ snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev;
+ } else {
+ counter_arr = env->pmu_fixed_ctrs[0].counter;
+ snapshot_prev = env->pmu_fixed_ctrs[0].counter_prev;
+ }
+
+ if (new_virt) {
+ snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev;
+ } else {
+ snapshot_new = env->pmu_fixed_ctrs[0].counter_prev;
+ }
+
+ delta = current_ticks - snapshot_prev[env->priv];
+ snapshot_new[newpriv] = current_ticks;
+
+ counter_arr[env->priv] += delta;
+}
+
+void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
+ bool new_virt)
+{
+ riscv_pmu_cycle_update_priv(env, newpriv, new_virt);
+ riscv_pmu_icount_update_priv(env, newpriv, new_virt);
+}
+
int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
{
uint32_t ctr_idx;
diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
index 7c0ad661e050..ca40cfeed647 100644
--- a/target/riscv/pmu.h
+++ b/target/riscv/pmu.h
@@ -34,5 +34,7 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum
riscv_pmu_event_idx event_idx);
void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
uint32_t ctr_idx);
+void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
+ bool new_virt);
#endif /* RISCV_PMU_H */
--
2.34.1
- [PATCH v8 00/13] Add RISC-V ISA extension smcntrpmf support, Atish Patra, 2024/07/11
- [PATCH v8 01/13] target/riscv: Combine set_mode and set_virt functions., Atish Patra, 2024/07/11
- [PATCH v8 04/13] target/riscv: Add cycle & instret privilege mode filtering definitions, Atish Patra, 2024/07/11
- [PATCH v8 03/13] target/riscv: Add cycle & instret privilege mode filtering properties, Atish Patra, 2024/07/11
- [PATCH v8 02/13] target/riscv: Fix the predicate functions for mhpmeventhX CSRs, Atish Patra, 2024/07/11
- [PATCH v8 06/13] target/riscv: Only set INH fields if priv mode is available, Atish Patra, 2024/07/11
- [PATCH v8 07/13] target/riscv: Implement privilege mode filtering for cycle/instret,
Atish Patra <=
- [PATCH v8 05/13] target/riscv: Add cycle & instret privilege mode filtering support, Atish Patra, 2024/07/11
- [PATCH v8 08/13] target/riscv: Save counter values during countinhibit update, Atish Patra, 2024/07/11
- [PATCH v8 09/13] target/riscv: Enforce WARL behavior for scounteren/hcounteren, Atish Patra, 2024/07/11
- [PATCH v8 12/13] target/riscv: Do not setup pmu timer if OF is disabled, Atish Patra, 2024/07/11
- [PATCH v8 11/13] target/riscv: More accurately model priv mode filtering., Atish Patra, 2024/07/11
- [PATCH v8 13/13] target/riscv: Expose the Smcntrpmf config, Atish Patra, 2024/07/11
- [PATCH v8 10/13] target/riscv: Start counters from both mhpmcounter and mcountinhibit, Atish Patra, 2024/07/11
- Re: [PATCH v8 00/13] Add RISC-V ISA extension smcntrpmf support, Alistair Francis, 2024/07/14