[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v4 12/19] hw/arm/smmu: Support nesting in smmuv3_range_inval(
From: |
Jean-Philippe Brucker |
Subject: |
Re: [PATCH v4 12/19] hw/arm/smmu: Support nesting in smmuv3_range_inval() |
Date: |
Thu, 4 Jul 2024 19:32:36 +0100 |
On Mon, Jul 01, 2024 at 11:02:34AM +0000, Mostafa Saleh wrote:
> With nesting, we would need to invalidate IPAs without
> over-invalidating stage-1 IOVAs. This can be done by
> distinguishing IPAs in the TLBs by having ASID=-1.
> To achieve that, rework the invalidation for IPAs to have a
> separate function, while for IOVA invalidation ASID=-1 means
> invalidate for all ASIDs.
>
> Reviewed-by: Eric Auger <eric.auger@redhat.com>
> Signed-off-by: Mostafa Saleh <smostafa@google.com>
> ---
> hw/arm/smmu-common.c | 47 ++++++++++++++++++++++++++++++++++++
> hw/arm/smmuv3.c | 23 ++++++++++++------
> hw/arm/trace-events | 2 +-
> include/hw/arm/smmu-common.h | 3 ++-
> 4 files changed, 66 insertions(+), 9 deletions(-)
>
> diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
> index 71afd486ba..5bf9eadeff 100644
> --- a/hw/arm/smmu-common.c
> +++ b/hw/arm/smmu-common.c
> @@ -195,6 +195,25 @@ static gboolean
> smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value,
> ((entry->iova & ~info->mask) == info->iova);
> }
>
> +static gboolean smmu_hash_remove_by_vmid_ipa(gpointer key, gpointer value,
> + gpointer user_data)
> +{
> + SMMUTLBEntry *iter = (SMMUTLBEntry *)value;
> + IOMMUTLBEntry *entry = &iter->entry;
> + SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data;
> + SMMUIOTLBKey iotlb_key = *(SMMUIOTLBKey *)key;
> +
> + if (info->asid >= 0) {
Should this test SMMU_IOTLB_ASID(iotlb_key) instead?
> + /* This is a stage-1 address. */
> + return false;
> + }
> + if (info->vmid != SMMU_IOTLB_VMID(iotlb_key)) {
> + return false;
> + }
> + return ((info->iova & ~entry->addr_mask) == entry->iova) ||
> + ((entry->iova & ~info->mask) == info->iova);
> +}
> +
> void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
> uint8_t tg, uint64_t num_pages, uint8_t ttl)
> {
> @@ -223,6 +242,34 @@ void smmu_iotlb_inv_iova(SMMUState *s, int asid, int
> vmid, dma_addr_t iova,
> &info);
> }
>
> +/*
> + * Similar to smmu_iotlb_inv_iova(), but for Stage-2, ASID is always -1,
> + * in Stage-1 invalidation ASID = -1, means don't care.
> + */
> +void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg,
> + uint64_t num_pages, uint8_t ttl)
> +{
> + uint8_t granule = tg ? tg * 2 + 10 : 12;
> + int asid = -1;
> +
> + if (ttl && (num_pages == 1)) {
> + SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, ipa, tg, ttl);
> +
> + if (g_hash_table_remove(s->iotlb, &key)) {
> + return;
> + }
> + }
> +
> + SMMUIOTLBPageInvInfo info = {
> + .iova = ipa,
> + .vmid = vmid,
> + .mask = (num_pages * 1 << granule) - 1};
Since multiplication takes precedence over shift this looks strange.
We could just remove "* 1" here and in smmu_iotlb_inv_iova() to avoid the
confusion?
Thanks,
Jean
> +
> + g_hash_table_foreach_remove(s->iotlb,
> + smmu_hash_remove_by_vmid_ipa,
> + &info);
> +}
> +
> void smmu_iotlb_inv_asid(SMMUState *s, int asid)
> {
> trace_smmu_iotlb_inv_asid(asid);
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index 86f95c1e40..e5ecd93258 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -1136,7 +1136,7 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int
> asid, int vmid,
> }
> }
>
> -static void smmuv3_range_inval(SMMUState *s, Cmd *cmd)
> +static void smmuv3_range_inval(SMMUState *s, Cmd *cmd, SMMUStage stage)
> {
> dma_addr_t end, addr = CMD_ADDR(cmd);
> uint8_t type = CMD_TYPE(cmd);
> @@ -1161,9 +1161,13 @@ static void smmuv3_range_inval(SMMUState *s, Cmd *cmd)
> }
>
> if (!tg) {
> - trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
> + trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf, stage);
> smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1);
> - smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl);
> + if (stage == SMMU_STAGE_1) {
> + smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl);
> + } else {
> + smmu_iotlb_inv_ipa(s, vmid, addr, tg, 1, ttl);
> + }
> return;
> }
>
> @@ -1179,9 +1183,14 @@ static void smmuv3_range_inval(SMMUState *s, Cmd *cmd)
> uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
>
> num_pages = (mask + 1) >> granule;
> - trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
> + trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages,
> + ttl, leaf, stage);
> smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, num_pages);
> - smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl);
> + if (stage == SMMU_STAGE_1) {
> + smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl);
> + } else {
> + smmu_iotlb_inv_ipa(s, vmid, addr, tg, num_pages, ttl);
> + }
> addr += mask + 1;
> }
> }
> @@ -1340,7 +1349,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> cmd_error = SMMU_CERROR_ILL;
> break;
> }
> - smmuv3_range_inval(bs, &cmd);
> + smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
> break;
> case SMMU_CMD_TLBI_S12_VMALL:
> {
> @@ -1365,7 +1374,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> * As currently only either s1 or s2 are supported
> * we can reuse same function for s2.
> */
> - smmuv3_range_inval(bs, &cmd);
> + smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2);
> break;
> case SMMU_CMD_TLBI_EL3_ALL:
> case SMMU_CMD_TLBI_EL3_VA:
> diff --git a/hw/arm/trace-events b/hw/arm/trace-events
> index 09ccd39548..7d9c1703da 100644
> --- a/hw/arm/trace-events
> +++ b/hw/arm/trace-events
> @@ -46,7 +46,7 @@ smmuv3_cmdq_cfgi_ste_range(int start, int end) "start=0x%x
> - end=0x%x"
> smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x"
> smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses,
> uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit
> rate=%d)"
> smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses,
> uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit
> rate=%d)"
> -smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t
> num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d
> num_pages=0x%"PRIx64" ttl=%d leaf=%d"
> +smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t
> num_pages, uint8_t ttl, bool leaf, int stage) "vmid=%d asid=%d
> addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d stage=%d"
> smmuv3_cmdq_tlbi_nh(void) ""
> smmuv3_cmdq_tlbi_nh_asid(int asid) "asid=%d"
> smmuv3_cmdq_tlbi_s12_vmid(int vmid) "vmid=%d"
> diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
> index cf0fd3ec74..de032fdfd1 100644
> --- a/include/hw/arm/smmu-common.h
> +++ b/include/hw/arm/smmu-common.h
> @@ -216,7 +216,8 @@ void smmu_iotlb_inv_asid(SMMUState *s, int asid);
> void smmu_iotlb_inv_vmid(SMMUState *s, int vmid);
> void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
> uint8_t tg, uint64_t num_pages, uint8_t ttl);
> -
> +void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg,
> + uint64_t num_pages, uint8_t ttl);
> /* Unmap the range of all the notifiers registered to any IOMMU mr */
> void smmu_inv_notifiers_all(SMMUState *s);
>
> --
> 2.45.2.803.g4e1b14247a-goog
>
- Re: [PATCH v4 07/19] hw/arm/smmu: Introduce CACHED_ENTRY_TO_ADDR, (continued)
[PATCH v4 12/19] hw/arm/smmu: Support nesting in smmuv3_range_inval(), Mostafa Saleh, 2024/07/01
- Re: [PATCH v4 12/19] hw/arm/smmu: Support nesting in smmuv3_range_inval(),
Jean-Philippe Brucker <=
[PATCH v4 13/19] hw/arm/smmu: Introduce smmu_iotlb_inv_asid_vmid, Mostafa Saleh, 2024/07/01
[PATCH v4 11/19] hw/arm/smmu-common: Support nested translation, Mostafa Saleh, 2024/07/01