[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH RFC v3 03/14] intel_iommu: renaming gpa to iova
From: |
Tian, Kevin |
Subject: |
Re: [Qemu-devel] [PATCH RFC v3 03/14] intel_iommu: renaming gpa to iova where proper |
Date: |
Fri, 20 Jan 2017 08:27:31 +0000 |
> From: Peter Xu [mailto:address@hidden
> Sent: Friday, January 13, 2017 11:06 AM
>
> There are lots of places in current intel_iommu.c codes that named
> "iova" as "gpa". It is really confusing to use a name "gpa" in these
> places (which is very easily to be understood as "Guest Physical
> Address", while it's not). To make the codes (much) easier to be read, I
> decided to do this once and for all.
>
> No functional change is made. Only literal ones.
If looking at VT-d spec (3.2 Domains and Address Translation)
Remapping hardware treats the address in inbound requests as DMA
Address. Depending on the software usage model, the DMA address
space may be the Guest-Physical Address (GPA) space of the virtual
machine to which the device is assigned, or application Virtual Address
(VA) space defined by the PASID assigned to an application, or some
abstract I/O virtual address (IOVA) space defined by software.
For simplicity, this document refers to address in requests-without-
PASID as GPA, and address in requests-with-PASID as Virtual Address
(VA) (or Guest Virtual Address (GVA), if such request is from a device
assigned to a virtual machine). The translated address is referred to
as
HPA.
It will add more readability if similar comment is added in this file - you
can say choosing iova to represent address in requests-without-PASID.
>
> Signed-off-by: Peter Xu <address@hidden>
> ---
> hw/i386/intel_iommu.c | 36 ++++++++++++++++++------------------
> 1 file changed, 18 insertions(+), 18 deletions(-)
>
> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
> index 77d467a..275c3db 100644
> --- a/hw/i386/intel_iommu.c
> +++ b/hw/i386/intel_iommu.c
> @@ -259,7 +259,7 @@ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t
> source_id,
> uint64_t *key = g_malloc(sizeof(*key));
> uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
>
> - VTD_DPRINTF(CACHE, "update iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
> + VTD_DPRINTF(CACHE, "update iotlb sid 0x%"PRIx16 " iova 0x%"PRIx64
> " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr, slpte,
> domain_id);
> if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
> @@ -575,12 +575,12 @@ static uint64_t vtd_get_slpte(dma_addr_t base_addr,
> uint32_t
> index)
> return slpte;
> }
>
> -/* Given a gpa and the level of paging structure, return the offset of
> current
> - * level.
> +/* Given an iova and the level of paging structure, return the offset
> + * of current level.
> */
> -static inline uint32_t vtd_gpa_level_offset(uint64_t gpa, uint32_t level)
> +static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level)
> {
> - return (gpa >> vtd_slpt_level_shift(level)) &
> + return (iova >> vtd_slpt_level_shift(level)) &
> ((1ULL << VTD_SL_LEVEL_BITS) - 1);
> }
>
> @@ -628,10 +628,10 @@ static bool vtd_slpte_nonzero_rsvd(uint64_t slpte,
> uint32_t
> level)
> }
> }
>
> -/* Given the @gpa, get relevant @slptep. @slpte_level will be the last level
> +/* Given the @iova, get relevant @slptep. @slpte_level will be the last level
> * of the translation, can be used for deciding the size of large page.
> */
> -static int vtd_gpa_to_slpte(VTDContextEntry *ce, uint64_t gpa, bool is_write,
> +static int vtd_gpa_to_slpte(VTDContextEntry *ce, uint64_t iova, bool
> is_write,
> uint64_t *slptep, uint32_t *slpte_level,
> bool *reads, bool *writes)
> {
> @@ -642,11 +642,11 @@ static int vtd_gpa_to_slpte(VTDContextEntry *ce,
> uint64_t gpa,
> bool is_write,
> uint32_t ce_agaw = vtd_get_agaw_from_context_entry(ce);
> uint64_t access_right_check;
>
> - /* Check if @gpa is above 2^X-1, where X is the minimum of MGAW in
> CAP_REG
> - * and AW in context-entry.
> + /* Check if @iova is above 2^X-1, where X is the minimum of MGAW
> + * in CAP_REG and AW in context-entry.
> */
> - if (gpa & ~((1ULL << MIN(ce_agaw, VTD_MGAW)) - 1)) {
> - VTD_DPRINTF(GENERAL, "error: gpa 0x%"PRIx64 " exceeds limits", gpa);
> + if (iova & ~((1ULL << MIN(ce_agaw, VTD_MGAW)) - 1)) {
> + VTD_DPRINTF(GENERAL, "error: iova 0x%"PRIx64 " exceeds limits",
> iova);
> return -VTD_FR_ADDR_BEYOND_MGAW;
> }
>
> @@ -654,13 +654,13 @@ static int vtd_gpa_to_slpte(VTDContextEntry *ce,
> uint64_t gpa,
> bool is_write,
> access_right_check = is_write ? VTD_SL_W : VTD_SL_R;
>
> while (true) {
> - offset = vtd_gpa_level_offset(gpa, level);
> + offset = vtd_iova_level_offset(iova, level);
> slpte = vtd_get_slpte(addr, offset);
>
> if (slpte == (uint64_t)-1) {
> VTD_DPRINTF(GENERAL, "error: fail to access second-level paging "
> - "entry at level %"PRIu32 " for gpa 0x%"PRIx64,
> - level, gpa);
> + "entry at level %"PRIu32 " for iova 0x%"PRIx64,
> + level, iova);
> if (level == vtd_get_level_from_context_entry(ce)) {
> /* Invalid programming of context-entry */
> return -VTD_FR_CONTEXT_ENTRY_INV;
> @@ -672,8 +672,8 @@ static int vtd_gpa_to_slpte(VTDContextEntry *ce, uint64_t
> gpa,
> bool is_write,
> *writes = (*writes) && (slpte & VTD_SL_W);
> if (!(slpte & access_right_check)) {
> VTD_DPRINTF(GENERAL, "error: lack of %s permission for "
> - "gpa 0x%"PRIx64 " slpte 0x%"PRIx64,
> - (is_write ? "write" : "read"), gpa, slpte);
> + "iova 0x%"PRIx64 " slpte 0x%"PRIx64,
> + (is_write ? "write" : "read"), iova, slpte);
> return is_write ? -VTD_FR_WRITE : -VTD_FR_READ;
> }
> if (vtd_slpte_nonzero_rsvd(slpte, level)) {
> @@ -827,7 +827,7 @@ static void vtd_do_iommu_translate(VTDAddressSpace
> *vtd_as,
> PCIBus *bus,
> /* Try to fetch slpte form IOTLB */
> iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
> if (iotlb_entry) {
> - VTD_DPRINTF(CACHE, "hit iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
> + VTD_DPRINTF(CACHE, "hit iotlb sid 0x%"PRIx16 " iova 0x%"PRIx64
> " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr,
> iotlb_entry->slpte, iotlb_entry->domain_id);
> slpte = iotlb_entry->slpte;
> @@ -2025,7 +2025,7 @@ static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion
> *iommu, hwaddr addr,
> is_write, &ret);
> VTD_DPRINTF(MMU,
> "bus %"PRIu8 " slot %"PRIu8 " func %"PRIu8 " devfn %"PRIu8
> - " gpa 0x%"PRIx64 " hpa 0x%"PRIx64, pci_bus_num(vtd_as->bus),
> + " iova 0x%"PRIx64 " hpa 0x%"PRIx64, pci_bus_num(vtd_as->bus),
> VTD_PCI_SLOT(vtd_as->devfn), VTD_PCI_FUNC(vtd_as->devfn),
> vtd_as->devfn, addr, ret.translated_addr);
> return ret;
> --
> 2.7.4
- Re: [Qemu-devel] [PATCH RFC v3 01/14] IOMMU: add option to enable VTD_CAP_CM to vIOMMU capility exposoed to guest, (continued)
- Re: [Qemu-devel] [PATCH RFC v3 01/14] IOMMU: add option to enable VTD_CAP_CM to vIOMMU capility exposoed to guest, Tian, Kevin, 2017/01/20
- Re: [Qemu-devel] [PATCH RFC v3 01/14] IOMMU: add option to enable VTD_CAP_CM to vIOMMU capility exposoed to guest, Peter Xu, 2017/01/20
- Re: [Qemu-devel] [PATCH RFC v3 01/14] IOMMU: add option to enable VTD_CAP_CM to vIOMMU capility exposoed to guest, Tian, Kevin, 2017/01/20
- Re: [Qemu-devel] [PATCH RFC v3 01/14] IOMMU: add option to enable VTD_CAP_CM to vIOMMU capility exposoed to guest, Peter Xu, 2017/01/20
[Qemu-devel] [PATCH RFC v3 02/14] intel_iommu: simplify irq region translation, Peter Xu, 2017/01/12
[Qemu-devel] [PATCH RFC v3 03/14] intel_iommu: renaming gpa to iova where proper, Peter Xu, 2017/01/12
- Re: [Qemu-devel] [PATCH RFC v3 03/14] intel_iommu: renaming gpa to iova where proper,
Tian, Kevin <=
[Qemu-devel] [PATCH RFC v3 04/14] intel_iommu: fix trace for inv desc handling, Peter Xu, 2017/01/12
[Qemu-devel] [PATCH RFC v3 05/14] intel_iommu: fix trace for addr translation, Peter Xu, 2017/01/12
[Qemu-devel] [PATCH RFC v3 06/14] intel_iommu: vtd_slpt_level_shift check level, Peter Xu, 2017/01/12
[Qemu-devel] [PATCH RFC v3 07/14] memory: add section range info for IOMMU notifier, Peter Xu, 2017/01/12