[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v12 09/10] hvf: arm: Add rudimentary PMC support
From: |
Alexander Graf |
Subject: |
[PATCH v12 09/10] hvf: arm: Add rudimentary PMC support |
Date: |
Thu, 16 Sep 2021 17:54:03 +0200 |
We can expose cycle counters on the PMU easily. To be as compatible as
possible, let's do so, but make sure we don't expose any other architectural
counters that we can not model yet.
This allows OSs to work that require PMU support.
Signed-off-by: Alexander Graf <agraf@csgraf.de>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/hvf/hvf.c | 179 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 179 insertions(+)
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index 996f93a69d..bff3e0cde7 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -42,6 +42,18 @@
#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
+#define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0)
+#define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0)
+#define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
+#define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
+#define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
+#define SYSREG_PMOVSCLR_EL0 SYSREG(3, 3, 9, 12, 3)
+#define SYSREG_PMSWINC_EL0 SYSREG(3, 3, 9, 12, 4)
+#define SYSREG_PMSELR_EL0 SYSREG(3, 3, 9, 12, 5)
+#define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6)
+#define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7)
+#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0)
+#define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7)
#define WFX_IS_WFE (1 << 0)
@@ -728,6 +740,40 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg,
uint32_t rt)
val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
gt_cntfrq_period_ns(arm_cpu);
break;
+ case SYSREG_PMCR_EL0:
+ val = env->cp15.c9_pmcr;
+ break;
+ case SYSREG_PMCCNTR_EL0:
+ pmu_op_start(env);
+ val = env->cp15.c15_ccnt;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMCNTENCLR_EL0:
+ val = env->cp15.c9_pmcnten;
+ break;
+ case SYSREG_PMOVSCLR_EL0:
+ val = env->cp15.c9_pmovsr;
+ break;
+ case SYSREG_PMSELR_EL0:
+ val = env->cp15.c9_pmselr;
+ break;
+ case SYSREG_PMINTENCLR_EL1:
+ val = env->cp15.c9_pminten;
+ break;
+ case SYSREG_PMCCFILTR_EL0:
+ val = env->cp15.pmccfiltr_el0;
+ break;
+ case SYSREG_PMCNTENSET_EL0:
+ val = env->cp15.c9_pmcnten;
+ break;
+ case SYSREG_PMUSERENR_EL0:
+ val = env->cp15.c9_pmuserenr;
+ break;
+ case SYSREG_PMCEID0_EL0:
+ case SYSREG_PMCEID1_EL0:
+ /* We can't really count anything yet, declare all events invalid */
+ val = 0;
+ break;
case SYSREG_OSLSR_EL1:
val = env->cp15.oslsr_el1;
break;
@@ -758,6 +804,82 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg,
uint32_t rt)
return 0;
}
+static void pmu_update_irq(CPUARMState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
+ (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
+}
+
+static bool pmu_event_supported(uint16_t number)
+{
+ return false;
+}
+
+/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
+ * the current EL, security state, and register configuration.
+ */
+static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
+{
+ uint64_t filter;
+ bool enabled, filtered = true;
+ int el = arm_current_el(env);
+
+ enabled = (env->cp15.c9_pmcr & PMCRE) &&
+ (env->cp15.c9_pmcnten & (1 << counter));
+
+ if (counter == 31) {
+ filter = env->cp15.pmccfiltr_el0;
+ } else {
+ filter = env->cp15.c14_pmevtyper[counter];
+ }
+
+ if (el == 0) {
+ filtered = filter & PMXEVTYPER_U;
+ } else if (el == 1) {
+ filtered = filter & PMXEVTYPER_P;
+ }
+
+ if (counter != 31) {
+ /*
+ * If not checking PMCCNTR, ensure the counter is setup to an event we
+ * support
+ */
+ uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
+ if (!pmu_event_supported(event)) {
+ return false;
+ }
+ }
+
+ return enabled && !filtered;
+}
+
+static void pmswinc_write(CPUARMState *env, uint64_t value)
+{
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ /* Increment a counter's count iff: */
+ if ((value & (1 << i)) && /* counter's bit is set */
+ /* counter is enabled and not filtered */
+ pmu_counter_enabled(env, i) &&
+ /* counter is SW_INCR */
+ (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
+ /*
+ * Detect if this write causes an overflow since we can't predict
+ * PMSWINC overflows like we can for other events
+ */
+ uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
+
+ if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
+ env->cp15.c9_pmovsr |= (1 << i);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c14_pmevcntr[i] = new_pmswinc;
+ }
+ }
+}
+
static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
@@ -772,6 +894,63 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg,
uint64_t val)
val);
switch (reg) {
+ case SYSREG_PMCCNTR_EL0:
+ pmu_op_start(env);
+ env->cp15.c15_ccnt = val;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMCR_EL0:
+ pmu_op_start(env);
+
+ if (val & PMCRC) {
+ /* The counter has been reset */
+ env->cp15.c15_ccnt = 0;
+ }
+
+ if (val & PMCRP) {
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ env->cp15.c14_pmevcntr[i] = 0;
+ }
+ }
+
+ env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
+ env->cp15.c9_pmcr |= (val & PMCR_WRITEABLE_MASK);
+
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMUSERENR_EL0:
+ env->cp15.c9_pmuserenr = val & 0xf;
+ break;
+ case SYSREG_PMCNTENSET_EL0:
+ env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
+ break;
+ case SYSREG_PMCNTENCLR_EL0:
+ env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
+ break;
+ case SYSREG_PMINTENCLR_EL1:
+ pmu_op_start(env);
+ env->cp15.c9_pminten |= val;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMOVSCLR_EL0:
+ pmu_op_start(env);
+ env->cp15.c9_pmovsr &= ~val;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMSWINC_EL0:
+ pmu_op_start(env);
+ pmswinc_write(env, val);
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMSELR_EL0:
+ env->cp15.c9_pmselr = val & 0x1f;
+ break;
+ case SYSREG_PMCCFILTR_EL0:
+ pmu_op_start(env);
+ env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
+ pmu_op_finish(env);
+ break;
case SYSREG_OSLAR_EL1:
env->cp15.oslsr_el1 = val & 1;
break;
--
2.30.1 (Apple Git-130)
- Re: [PATCH v12 04/10] hvf: Add Apple Silicon support, (continued)
- [PATCH v12 01/10] arm: Move PMC register definitions to internals.h, Alexander Graf, 2021/09/16
- [PATCH v12 08/10] arm: Add Hypervisor.framework build target, Alexander Graf, 2021/09/16
- [PATCH v12 07/10] hvf: arm: Implement PSCI handling, Alexander Graf, 2021/09/16
- [PATCH v12 10/10] arm: tcg: Adhere to SMCCC 1.3 section 5.2, Alexander Graf, 2021/09/16
- [PATCH v12 05/10] arm/hvf: Add a WFI handler, Alexander Graf, 2021/09/16
- [PATCH v12 06/10] hvf: arm: Implement -cpu host, Alexander Graf, 2021/09/16
- [PATCH v12 09/10] hvf: arm: Add rudimentary PMC support,
Alexander Graf <=
- Re: [PATCH v12 00/10] hvf: Implement Apple Silicon Support, Peter Maydell, 2021/09/20
- Re: [PATCH v12 00/10] hvf: Implement Apple Silicon Support, Peter Maydell, 2021/09/20
- Re: [PATCH v12 00/10] hvf: Implement Apple Silicon Support, Philippe Mathieu-Daudé, 2021/09/20
- Re: [PATCH v12 00/10] hvf: Implement Apple Silicon Support, Alexander Graf, 2021/09/20
- Re: [PATCH v12 00/10] hvf: Implement Apple Silicon Support, Philippe Mathieu-Daudé, 2021/09/21
- Re: [PATCH v12 00/10] hvf: Implement Apple Silicon Support, Alexander Graf, 2021/09/21
- Re: [PATCH v12 00/10] hvf: Implement Apple Silicon Support, Philippe Mathieu-Daudé, 2021/09/25
- Re: [PATCH v12 00/10] hvf: Implement Apple Silicon Support, Peter Maydell, 2021/09/25