[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH for-10.0 04/11] hw/riscv/riscv-iommu: add riscv_iommu_hpm_incr_ct
From: |
Daniel Henrique Barboza |
Subject: |
[PATCH for-10.0 04/11] hw/riscv/riscv-iommu: add riscv_iommu_hpm_incr_ctr() |
Date: |
Thu, 5 Dec 2024 10:29:56 -0300 |
From: Tomasz Jeznach <tjeznach@rivosinc.com>
This function will increment a specific counter, generating an interrupt
when an overflow occurs.
Some extra changes in riscv-iommu.c were required to add this new
helper in riscv-iommu-hpm.c:
- RISCVIOMMUContext was moved to riscv-iommu.h, making it visible in
riscv-iommu-hpm.c;
- riscv_iommu_notify() is now public.
No behavior change is made since HPM support is not being advertised
yet.
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
---
hw/riscv/riscv-iommu-hpm.c | 114 +++++++++++++++++++++++++++++++++++++
hw/riscv/riscv-iommu-hpm.h | 2 +
hw/riscv/riscv-iommu.c | 43 +++++++++-----
hw/riscv/riscv-iommu.h | 18 ++++++
4 files changed, 162 insertions(+), 15 deletions(-)
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
index 5833ab8956..8eca5ee17e 100644
--- a/hw/riscv/riscv-iommu-hpm.c
+++ b/hw/riscv/riscv-iommu-hpm.c
@@ -52,3 +52,117 @@ uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
return (ctr_val + get_cycles() - ctr_prev) |
(cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
}
+
+static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
+{
+ const uint32_t off = ctr_idx << 3;
+ uint64_t cntr_val;
+
+ cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
+ stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
+
+ /* Handle the overflow scenario. */
+ if (cntr_val == UINT64_MAX) {
+ /*
+ * Generate interrupt only if OF bit is clear. +1 to offset the cycle
+ * register OF bit.
+ */
+ const uint32_t ovf =
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
+ BIT(ctr_idx + 1), 0);
+ if (!get_field(ovf, BIT(ctr_idx + 1))) {
+ riscv_iommu_reg_mod64(s,
+ RISCV_IOMMU_REG_IOHPMEVT_BASE + off,
+ RISCV_IOMMU_IOHPMEVT_OF,
+ 0);
+ riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
+ }
+ }
+}
+
+void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
+ unsigned event_id)
+{
+ const uint32_t inhibit = riscv_iommu_reg_get32(
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
+ uint32_t did_gscid;
+ uint32_t pid_pscid;
+ uint32_t ctr_idx;
+ gpointer value;
+ uint32_t ctrs;
+ uint64_t evt;
+
+ if (!(s->cap & RISCV_IOMMU_CAP_HPM)) {
+ return;
+ }
+
+ value = g_hash_table_lookup(s->hpm_event_ctr_map,
+ GUINT_TO_POINTER(event_id));
+ if (value == NULL) {
+ return;
+ }
+
+ for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) {
+ ctr_idx = ctz32(ctrs);
+ if (get_field(inhibit, BIT(ctr_idx + 1))) {
+ continue;
+ }
+
+ evt = riscv_iommu_reg_get64(s,
+ RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3));
+
+ /*
+ * It's quite possible that event ID has been changed in counter
+ * but hashtable hasn't been updated yet. We don't want to increment
+ * counter for the old event ID.
+ */
+ if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) {
+ continue;
+ }
+
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) {
+ did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
+ pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
+ } else {
+ did_gscid = ctx->devid;
+ pid_pscid = ctx->process_id;
+ }
+
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) {
+ /*
+ * If the transaction does not have a valid process_id, counter
+ * increments if device_id matches DID_GSCID. If the transaction
+ * has a valid process_id, counter increments if device_id
+ * matches DID_GSCID and process_id matches PID_PSCID. See
+ * IOMMU Specification, Chapter 5.23. Performance-monitoring
+ * event selector.
+ */
+ if (ctx->process_id &&
+ get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) {
+ continue;
+ }
+ }
+
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) {
+ uint32_t mask = ~0;
+
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) {
+ /*
+ * 1001 1011 mask = GSCID
+ * 0000 0111 mask = mask ^ (mask + 1)
+ * 1111 1000 mask = ~mask;
+ */
+ mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID);
+ mask = mask ^ (mask + 1);
+ mask = ~mask;
+ }
+
+ if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) !=
+ (did_gscid & mask)) {
+ continue;
+ }
+ }
+
+ hpm_incr_ctr(s, ctr_idx);
+ }
+}
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
index 231c110ff2..411d869dce 100644
--- a/hw/riscv/riscv-iommu-hpm.h
+++ b/hw/riscv/riscv-iommu-hpm.h
@@ -23,5 +23,7 @@
#include "hw/riscv/riscv-iommu.h"
uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
+void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
+ unsigned event_id);
#endif
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
index 01df25418c..5ce0d24359 100644
--- a/hw/riscv/riscv-iommu.c
+++ b/hw/riscv/riscv-iommu.c
@@ -39,7 +39,6 @@
#define PPN_PHYS(ppn) ((ppn) << TARGET_PAGE_BITS)
#define PPN_DOWN(phy) ((phy) >> TARGET_PAGE_BITS)
-typedef struct RISCVIOMMUContext RISCVIOMMUContext;
typedef struct RISCVIOMMUEntry RISCVIOMMUEntry;
/* Device assigned I/O address space */
@@ -52,19 +51,6 @@ struct RISCVIOMMUSpace {
QLIST_ENTRY(RISCVIOMMUSpace) list;
};
-/* Device translation context state. */
-struct RISCVIOMMUContext {
- uint64_t devid:24; /* Requester Id, AKA device_id */
- uint64_t process_id:20; /* Process ID. PASID for PCIe */
- uint64_t tc; /* Translation Control */
- uint64_t ta; /* Translation Attributes */
- uint64_t satp; /* S-Stage address translation and protection
*/
- uint64_t gatp; /* G-Stage address translation and protection
*/
- uint64_t msi_addr_mask; /* MSI filtering - address mask */
- uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
- uint64_t msiptp; /* MSI redirection page table pointer */
-};
-
/* Address translation cache entry */
struct RISCVIOMMUEntry {
uint64_t iova:44; /* IOVA Page Number */
@@ -93,7 +79,7 @@ static uint8_t riscv_iommu_get_icvec_vector(uint32_t icvec,
uint32_t vec_type)
}
}
-static void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
+void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
{
uint32_t ipsr, icvec, vector;
@@ -415,6 +401,13 @@ static int riscv_iommu_spa_fetch(RISCVIOMMUState *s,
RISCVIOMMUContext *ctx,
}
}
+
+ if (pass == S_STAGE) {
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_S_VS_WALKS);
+ } else {
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_G_WALKS);
+ }
+
/* Read page table entry */
if (sc[pass].ptesize == 4) {
uint32_t pte32 = 0;
@@ -933,6 +926,7 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s,
RISCVIOMMUContext *ctx)
/* Device directory tree walk */
for (; depth-- > 0; ) {
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
/*
* Select device id index bits based on device directory tree level
* and device context format.
@@ -960,6 +954,8 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s,
RISCVIOMMUContext *ctx)
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_DDTE_PPN));
}
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
+
/* index into device context entry page */
addr |= (ctx->devid * dc_len) & ~TARGET_PAGE_MASK;
@@ -1025,6 +1021,8 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s,
RISCVIOMMUContext *ctx)
}
for (depth = mode - RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8; depth-- > 0; ) {
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
+
/*
* Select process id index bits based on process directory tree
* level. See IOMMU Specification, 2.2. Process-Directory-Table.
@@ -1042,6 +1040,8 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s,
RISCVIOMMUContext *ctx)
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PC_FSC_PPN));
}
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
+
/* Leaf entry in PDT */
addr |= (ctx->process_id << 4) & ~TARGET_PAGE_MASK;
if (dma_memory_read(s->target_as, addr, &dc.ta, sizeof(uint64_t) * 2,
@@ -1347,6 +1347,8 @@ static int riscv_iommu_translate(RISCVIOMMUState *s,
RISCVIOMMUContext *ctx,
GHashTable *iot_cache;
int fault;
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_URQ);
+
iot_cache = g_hash_table_ref(s->iot_cache);
/*
* TC[32] is reserved for custom extensions, used here to temporarily
@@ -1357,6 +1359,7 @@ static int riscv_iommu_translate(RISCVIOMMUState *s,
RISCVIOMMUContext *ctx,
/* Check for ATS request. */
if (iotlb->perm == IOMMU_NONE) {
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_ATS_RQ);
/* Check if ATS is disabled. */
if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS)) {
enable_pri = false;
@@ -1375,6 +1378,8 @@ static int riscv_iommu_translate(RISCVIOMMUState *s,
RISCVIOMMUContext *ctx,
goto done;
}
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_TLB_MISS);
+
/* Translate using device directory / page table information. */
fault = riscv_iommu_spa_fetch(s, ctx, iotlb);
@@ -2274,6 +2279,10 @@ static void riscv_iommu_realize(DeviceState *dev, Error
**errp)
memory_region_init_io(&s->trap_mr, OBJECT(dev), &riscv_iommu_trap_ops, s,
"riscv-iommu-trap", ~0ULL);
address_space_init(&s->trap_as, &s->trap_mr, "riscv-iommu-trap-as");
+
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
+ s->hpm_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
+ }
}
static void riscv_iommu_unrealize(DeviceState *dev)
@@ -2282,6 +2291,10 @@ static void riscv_iommu_unrealize(DeviceState *dev)
g_hash_table_unref(s->iot_cache);
g_hash_table_unref(s->ctx_cache);
+
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
+ g_hash_table_unref(s->hpm_event_ctr_map);
+ }
}
void riscv_iommu_reset(RISCVIOMMUState *s)
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
index 380f7e81d1..a21ab51491 100644
--- a/hw/riscv/riscv-iommu.h
+++ b/hw/riscv/riscv-iommu.h
@@ -90,12 +90,30 @@ struct RISCVIOMMUState {
/* HPM cycle counter */
uint64_t hpmcycle_val; /* Current value of cycle register */
uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */
+
+ /* HPM event counters */
+ GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
};
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
Error **errp);
void riscv_iommu_set_cap_igs(RISCVIOMMUState *s, riscv_iommu_igs_mode mode);
void riscv_iommu_reset(RISCVIOMMUState *s);
+void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type);
+
+typedef struct RISCVIOMMUContext RISCVIOMMUContext;
+/* Device translation context state. */
+struct RISCVIOMMUContext {
+ uint64_t devid:24; /* Requester Id, AKA device_id */
+ uint64_t process_id:20; /* Process ID. PASID for PCIe */
+ uint64_t tc; /* Translation Control */
+ uint64_t ta; /* Translation Attributes */
+ uint64_t satp; /* S-Stage address translation and protection
*/
+ uint64_t gatp; /* G-Stage address translation and protection
*/
+ uint64_t msi_addr_mask; /* MSI filtering - address mask */
+ uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
+ uint64_t msiptp; /* MSI redirection page table pointer */
+};
/* private helpers */
--
2.47.1
- [PATCH for-10.0 00/11] riscv: IOMMU HPM support, Daniel Henrique Barboza, 2024/12/05
- [PATCH for-10.0 01/11] hw/riscv/riscv-iommu.h: add missing headers, Daniel Henrique Barboza, 2024/12/05
- [PATCH for-10.0 08/11] hw/riscv/riscv-iommu: add hpm events mmio write, Daniel Henrique Barboza, 2024/12/05
- [PATCH for-10.0 02/11] hw/riscv/riscv-iommu-bits.h: HPM bits, Daniel Henrique Barboza, 2024/12/05
- [PATCH for-10.0 06/11] hw/riscv/riscv-iommu: add IOCOUNTINH mmio writes, Daniel Henrique Barboza, 2024/12/05
- [PATCH for-10.0 07/11] hw/riscv/riscv-iommu: add IOHPMCYCLES mmio write, Daniel Henrique Barboza, 2024/12/05
- [PATCH for-10.0 04/11] hw/riscv/riscv-iommu: add riscv_iommu_hpm_incr_ctr(),
Daniel Henrique Barboza <=
- [PATCH for-10.0 10/11] hw/riscv: add IOMMU HPM trace events, Daniel Henrique Barboza, 2024/12/05
- [PATCH for-10.0 09/11] hw/riscv/riscv-iommu.c: add RISCV_IOMMU_CAP_HPM cap, Daniel Henrique Barboza, 2024/12/05
- [PATCH for-10.0 03/11] hw/riscv/riscv-iommu: add riscv-iommu-hpm file, Daniel Henrique Barboza, 2024/12/05
- [PATCH for-10.0 11/11] docs/specs/riscv-iommu.rst: add HPM support info, Daniel Henrique Barboza, 2024/12/05
- [PATCH for-10.0 05/11] hw/riscv/riscv-iommu: instantiate hpm_timer, Daniel Henrique Barboza, 2024/12/05