qemu-arm
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 03/13] vfio: Collect container iova range info


From: Eric Auger
Subject: Re: [PATCH v3 03/13] vfio: Collect container iova range info
Date: Thu, 19 Oct 2023 08:39:32 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Thunderbird/102.13.0

Hi Alex,

On 10/18/23 21:07, Alex Williamson wrote:
> On Wed, 11 Oct 2023 19:52:19 +0200
> Eric Auger <eric.auger@redhat.com> wrote:
>
>> Collect iova range information if VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
>> capability is supported.
>>
>> This allows to propagate the information though the IOMMU MR
>> set_iova_ranges() callback so that virtual IOMMUs
>> get aware of those aperture constraints. This is only done if
>> the info is available and the number of iova ranges is greater than
>> 0.
>>
>> A new vfio_get_info_iova_range helper is introduced matching
>> the coding style of existing vfio_get_info_dma_avail. The
>> boolean returned value isn't used though. Code is aligned
>> between both.
>>
>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>>
>> ---
>>
>> v2 -> v3:
>> - Turn nr_iovas into a int initialized to -1
>> - memory_region_iommu_set_iova_ranges only is called if nr_iovas > 0
>> - vfio_get_info_iova_range returns a bool to match
>>   vfio_get_info_dma_avail. Uniformize both code by using !hdr in
>>   the check
>> - rebase on top of vfio-next
>> ---
>>  include/hw/vfio/vfio-common.h |  2 ++
>>  hw/vfio/common.c              |  9 +++++++
>>  hw/vfio/container.c           | 44 ++++++++++++++++++++++++++++++++---
>>  3 files changed, 52 insertions(+), 3 deletions(-)
>>
>> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
>> index 7780b9073a..848ff47960 100644
>> --- a/include/hw/vfio/vfio-common.h
>> +++ b/include/hw/vfio/vfio-common.h
>> @@ -99,6 +99,8 @@ typedef struct VFIOContainer {
>>      QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
>>      QLIST_ENTRY(VFIOContainer) next;
>>      QLIST_HEAD(, VFIODevice) device_list;
>> +    int nr_iovas;
>> +    GList *iova_ranges;
> Nit, nr_iovas seems like it has a pretty weak use case here.  We can
> just test iova_ranges != NULL for calling set_iova_ranges.  In patch 13
> we can again test against NULL, which I think also negates the need to
> assert nr_iovas since the NULL test automatically catches the zero
> case.  Otherwise
>
> Reviewed-by: Alex Williamson <alex.williamson@redhat.com>

Makes sense,  all the more I am going to drop patch 13. So I will respin
and remove nr_iovas and just rely on testing iova_ranges.

Thanks!

Eric
>
>>  } VFIOContainer;
>>  
>>  typedef struct VFIOGuestIOMMU {
>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>> index 5ff5acf1d8..9d804152ba 100644
>> --- a/hw/vfio/common.c
>> +++ b/hw/vfio/common.c
>> @@ -699,6 +699,15 @@ static void vfio_listener_region_add(MemoryListener 
>> *listener,
>>              goto fail;
>>          }
>>  
>> +        if (container->nr_iovas > 0) {
>> +            ret = memory_region_iommu_set_iova_ranges(giommu->iommu_mr,
>> +                    container->iova_ranges, &err);
>> +            if (ret) {
>> +                g_free(giommu);
>> +                goto fail;
>> +            }
>> +        }
>> +
>>          ret = memory_region_register_iommu_notifier(section->mr, &giommu->n,
>>                                                      &err);
>>          if (ret) {
>> diff --git a/hw/vfio/container.c b/hw/vfio/container.c
>> index adc467210f..5122ff6d92 100644
>> --- a/hw/vfio/container.c
>> +++ b/hw/vfio/container.c
>> @@ -382,7 +382,7 @@ bool vfio_get_info_dma_avail(struct 
>> vfio_iommu_type1_info *info,
>>      /* If the capability cannot be found, assume no DMA limiting */
>>      hdr = vfio_get_iommu_type1_info_cap(info,
>>                                          VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
>> -    if (hdr == NULL) {
>> +    if (!hdr) {
>>          return false;
>>      }
>>  
>> @@ -394,6 +394,33 @@ bool vfio_get_info_dma_avail(struct 
>> vfio_iommu_type1_info *info,
>>      return true;
>>  }
>>  
>> +static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info,
>> +                                     VFIOContainer *container)
>> +{
>> +    struct vfio_info_cap_header *hdr;
>> +    struct vfio_iommu_type1_info_cap_iova_range *cap;
>> +
>> +    hdr = vfio_get_iommu_type1_info_cap(info,
>> +                                        
>> VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE);
>> +    if (!hdr) {
>> +        return false;
>> +    }
>> +
>> +    cap = (void *)hdr;
>> +
>> +    container->nr_iovas = cap->nr_iovas;
>> +    for (int i = 0; i < cap->nr_iovas; i++) {
>> +        Range *range = g_new(Range, 1);
>> +
>> +        range_set_bounds(range, cap->iova_ranges[i].start,
>> +                         cap->iova_ranges[i].end);
>> +        container->iova_ranges =
>> +            range_list_insert(container->iova_ranges, range);
>> +    }
>> +
>> +    return true;
>> +}
>> +
>>  static void vfio_kvm_device_add_group(VFIOGroup *group)
>>  {
>>      Error *err = NULL;
>> @@ -535,6 +562,12 @@ static void vfio_get_iommu_info_migration(VFIOContainer 
>> *container,
>>      }
>>  }
>>  
>> +static void vfio_free_container(VFIOContainer *container)
>> +{
>> +    g_list_free_full(container->iova_ranges, g_free);
>> +    g_free(container);
>> +}
>> +
>>  static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>>                                    Error **errp)
>>  {
>> @@ -616,6 +649,8 @@ static int vfio_connect_container(VFIOGroup *group, 
>> AddressSpace *as,
>>      container->error = NULL;
>>      container->dirty_pages_supported = false;
>>      container->dma_max_mappings = 0;
>> +    container->nr_iovas = -1;
>> +    container->iova_ranges = NULL;
>>      QLIST_INIT(&container->giommu_list);
>>      QLIST_INIT(&container->hostwin_list);
>>      QLIST_INIT(&container->vrdl_list);
>> @@ -652,6 +687,9 @@ static int vfio_connect_container(VFIOGroup *group, 
>> AddressSpace *as,
>>          if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) {
>>              container->dma_max_mappings = 65535;
>>          }
>> +
>> +        vfio_get_info_iova_range(info, container);
>> +
>>          vfio_get_iommu_info_migration(container, info);
>>          g_free(info);
>>  
>> @@ -765,7 +803,7 @@ enable_discards_exit:
>>      vfio_ram_block_discard_disable(container, false);
>>  
>>  free_container_exit:
>> -    g_free(container);
>> +    vfio_free_container(container);
>>  
>>  close_fd_exit:
>>      close(fd);
>> @@ -819,7 +857,7 @@ static void vfio_disconnect_container(VFIOGroup *group)
>>  
>>          trace_vfio_disconnect_container(container->fd);
>>          close(container->fd);
>> -        g_free(container);
>> +        vfio_free_container(container);
>>  
>>          vfio_put_address_space(space);
>>      }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]