[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
RE: [PATCH v1 04/17] intel_iommu: Flush stage-2 cache in PADID-selective
From: |
Duan, Zhenzhong |
Subject: |
RE: [PATCH v1 04/17] intel_iommu: Flush stage-2 cache in PADID-selective PASID-based iotlb invalidation |
Date: |
Wed, 24 Jul 2024 05:19:54 +0000 |
>-----Original Message-----
>From: CLEMENT MATHIEU--DRIF <clement.mathieu--drif@eviden.com>
>Subject: Re: [PATCH v1 04/17] intel_iommu: Flush stage-2 cache in PADID-
>selective PASID-based iotlb invalidation
>
>
>
>On 24/07/2024 04:59, Duan, Zhenzhong wrote:
>> Caution: External email. Do not open attachments or click links, unless this
>email comes from a known sender and you know the content is safe.
>>
>>
>>> -----Original Message-----
>>> From: CLEMENT MATHIEU--DRIF <clement.mathieu--drif@eviden.com>
>>> Subject: Re: [PATCH v1 04/17] intel_iommu: Flush stage-2 cache in
>PADID-
>>> selective PASID-based iotlb invalidation
>>>
>>>
>>>
>>> On 18/07/2024 10:16, Zhenzhong Duan wrote:
>>>> Caution: External email. Do not open attachments or click links, unless
>this
>>> email comes from a known sender and you know the content is safe.
>>>>
>>>> Per spec 6.5.2.4, PADID-selective PASID-based iotlb invalidation will
>>>> flush stage-2 iotlb entries with matching domain id and pasid.
>>>>
>>>> With scalable modern mode introduced, guest could send PASID-
>selective
>>>> PASID-based iotlb invalidation to flush both stage-1 and stage-2 entries.
>>>>
>>>> Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
>>>> ---
>>>> hw/i386/intel_iommu_internal.h | 10 +++++
>>>> hw/i386/intel_iommu.c | 78
>>> ++++++++++++++++++++++++++++++++++
>>>> 2 files changed, 88 insertions(+)
>>>>
>>>> diff --git a/hw/i386/intel_iommu_internal.h
>>> b/hw/i386/intel_iommu_internal.h
>>>> index 4e0331caba..f71fc91234 100644
>>>> --- a/hw/i386/intel_iommu_internal.h
>>>> +++ b/hw/i386/intel_iommu_internal.h
>>>> @@ -440,6 +440,16 @@ typedef union VTDInvDesc VTDInvDesc;
>>>> (0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM |
>>> VTD_SL_TM)) : \
>>>> (0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
>>>>
>>>> +#define VTD_INV_DESC_PIOTLB_ALL_IN_PASID (2ULL << 4)
>>>> +#define VTD_INV_DESC_PIOTLB_PSI_IN_PASID (3ULL << 4)
>>>> +
>>>> +#define VTD_INV_DESC_PIOTLB_RSVD_VAL0 0xfff000000000ffc0ULL
>>>> +#define VTD_INV_DESC_PIOTLB_RSVD_VAL1 0xf80ULL
>>>> +
>>>> +#define VTD_INV_DESC_PIOTLB_PASID(val) (((val) >> 32) & 0xfffffULL)
>>>> +#define VTD_INV_DESC_PIOTLB_DID(val) (((val) >> 16) & \
>>>> + VTD_DOMAIN_ID_MASK)
>>>> +
>>>> /* Information about page-selective IOTLB invalidate */
>>>> struct VTDIOTLBPageInvInfo {
>>>> uint16_t domain_id;
>>>> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
>>>> index 40cbd4a0f4..075a27adac 100644
>>>> --- a/hw/i386/intel_iommu.c
>>>> +++ b/hw/i386/intel_iommu.c
>>>> @@ -2659,6 +2659,80 @@ static bool
>>> vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
>>>> return true;
>>>> }
>>>>
>>>> +static gboolean vtd_hash_remove_by_pasid(gpointer key, gpointer
>value,
>>>> + gpointer user_data)
>>>> +{
>>>> + VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
>>>> + VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
>>>> +
>>>> + return ((entry->domain_id == info->domain_id) &&
>>>> + (entry->pasid == info->pasid));
>>>> +}
>>>> +
>>>> +static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s,
>>>> + uint16_t domain_id, uint32_t
>>>> pasid)
>>>> +{
>>>> + VTDIOTLBPageInvInfo info;
>>>> + VTDAddressSpace *vtd_as;
>>>> + VTDContextEntry ce;
>>>> +
>>>> + info.domain_id = domain_id;
>>>> + info.pasid = pasid;
>>>> +
>>>> + vtd_iommu_lock(s);
>>>> + g_hash_table_foreach_remove(s->iotlb,
>vtd_hash_remove_by_pasid,
>>>> + &info);
>>>> + vtd_iommu_unlock(s);
>>>> +
>>>> + QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) {
>>>> + if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
>>>> + vtd_as->devfn, &ce) &&
>>>> + domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) {
>>>> + uint32_t rid2pasid = VTD_CE_GET_RID2PASID(&ce);
>>>> +
>>>> + if ((vtd_as->pasid != PCI_NO_PASID || pasid != rid2pasid) &&
>>>> + vtd_as->pasid != pasid) {
>>>> + continue;
>>>> + }
>>>> +
>>>> + if (!s->scalable_modern) {
>>>> + vtd_address_space_sync(vtd_as);
>>>> + }
>>>> + }
>>>> + }
>>>> +}
>>>> +
>>>> +static bool vtd_process_piotlb_desc(IntelIOMMUState *s,
>>>> + VTDInvDesc *inv_desc)
>>>> +{
>>>> + uint16_t domain_id;
>>>> + uint32_t pasid;
>>>> +
>>>> + if ((inv_desc->val[0] & VTD_INV_DESC_PIOTLB_RSVD_VAL0) ||
>>>> + (inv_desc->val[1] & VTD_INV_DESC_PIOTLB_RSVD_VAL1)) {
>>>> + error_report_once("non-zero-field-in-piotlb_inv_desc hi: 0x%"
>>> PRIx64
>>>> + " lo: 0x%" PRIx64, inv_desc->val[1], inv_desc->val[0]);
>>> This error is not formatted as the other similar messages we print when
>>> reserved bits are non-zero.
>>> Here is what we've done in vtd_process_iotlb_desc:
>> Sure, will change as below,
>>
>>> error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
>>> ", lo=0x%"PRIx64" (reserved bits unzero)",
>>> __func__, inv_desc->hi, inv_desc->lo);
>>>> + return false;
>>>> + }
>>>> +
>>>> + domain_id = VTD_INV_DESC_PIOTLB_DID(inv_desc->val[0]);
>>>> + pasid = VTD_INV_DESC_PIOTLB_PASID(inv_desc->val[0]);
>>>> + switch (inv_desc->val[0] & VTD_INV_DESC_IOTLB_G) {
>>> Not critical but why don't we have VTD_INV_DESC_PIOTLB_G?
>> Will add.
>>
>>>> + case VTD_INV_DESC_PIOTLB_ALL_IN_PASID:
>>>> + vtd_piotlb_pasid_invalidate(s, domain_id, pasid);
>>>> + break;
>>>> +
>>>> + case VTD_INV_DESC_PIOTLB_PSI_IN_PASID:
>>>> + break;
>>>> +
>>>> + default:
>>>> + error_report_once("Invalid granularity in P-IOTLB desc hi: 0x%"
>>> PRIx64
>>>> + " lo: 0x%" PRIx64, inv_desc->val[1], inv_desc->val[0]);
>>> Same comment, I think we should make the messages consistent across
>>> descriptor handlers.
>> What about below:
>>
>> diff --git a/hw/i386/intel_iommu_internal.h
>b/hw/i386/intel_iommu_internal.h
>> index 3290761595..e76fd9d377 100644
>> --- a/hw/i386/intel_iommu_internal.h
>> +++ b/hw/i386/intel_iommu_internal.h
>> @@ -479,9 +479,10 @@ typedef union VTDInvDesc VTDInvDesc;
>> #define VTD_INV_DESC_PIOTLB_ALL_IN_PASID (2ULL << 4)
>> #define VTD_INV_DESC_PIOTLB_PSI_IN_PASID (3ULL << 4)
>>
>> +/* Masks for IOTLB Invalidate Descriptor */
>> +#define VTD_INV_DESC_IOTLB_G (3ULL << 4)
>This one is already defined
Ah, typo, I mean:
+#define VTD_INV_DESC_PIOTLB_G (3ULL << 4)
Thanks
Zhenzhong
- RE: [PATCH v1 03/17] intel_iommu: Add a placeholder variable for scalable modern mode, (continued)
- RE: [PATCH v1 03/17] intel_iommu: Add a placeholder variable for scalable modern mode, Duan, Zhenzhong, 2024/07/18
- RE: [PATCH v1 03/17] intel_iommu: Add a placeholder variable for scalable modern mode, Duan, Zhenzhong, 2024/07/18
- Re: [PATCH v1 03/17] intel_iommu: Add a placeholder variable for scalable modern mode, CLEMENT MATHIEU--DRIF, 2024/07/19
- Re: [PATCH v1 03/17] intel_iommu: Add a placeholder variable for scalable modern mode, CLEMENT MATHIEU--DRIF, 2024/07/23
- RE: [PATCH v1 03/17] intel_iommu: Add a placeholder variable for scalable modern mode, Duan, Zhenzhong, 2024/07/23
- Re: [PATCH v1 03/17] intel_iommu: Add a placeholder variable for scalable modern mode, CLEMENT MATHIEU--DRIF, 2024/07/19
[PATCH v1 04/17] intel_iommu: Flush stage-2 cache in PADID-selective PASID-based iotlb invalidation, Zhenzhong Duan, 2024/07/18
[PATCH v1 05/17] intel_iommu: Rename slpte to pte, Zhenzhong Duan, 2024/07/18
[PATCH v1 06/17] intel_iommu: Implement stage-1 translation, Zhenzhong Duan, 2024/07/18
[PATCH v1 07/17] intel_iommu: Check if the input address is canonical, Zhenzhong Duan, 2024/07/18
[PATCH v1 08/17] intel_iommu: Set accessed and dirty bits during first stage translation, Zhenzhong Duan, 2024/07/18
[PATCH v1 09/17] intel_iommu: Flush stage-1 cache in iotlb invalidation, Zhenzhong Duan, 2024/07/18
[PATCH v1 10/17] intel_iommu: Process PASID-based iotlb invalidation, Zhenzhong Duan, 2024/07/18
[PATCH v1 11/17] intel_iommu: Extract device IOTLB invalidation logic, Zhenzhong Duan, 2024/07/18