qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v4 02/12] vfio: Create VFIOAddressSpace objects


From: Alexey Kardashevskiy
Subject: Re: [Qemu-devel] [PATCH v4 02/12] vfio: Create VFIOAddressSpace objects as needed
Date: Tue, 10 Sep 2013 18:09:23 +1000
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20130805 Thunderbird/17.0.8

On 09/06/2013 04:24 AM, Alex Williamson wrote:
> On Fri, 2013-08-30 at 20:15 +1000, Alexey Kardashevskiy wrote:
>> From: David Gibson <address@hidden>
>>
>> So far, VFIO has a notion of different logical DMA address spaces, but
>> only ever uses one (system memory).  This patch extends this, creating
>> new VFIOAddressSpace objects as necessary, according to the AddressSpace
>> reported by the PCI subsystem for this device's DMAs.
>>
>> This isn't enough yet to support guest side IOMMUs with VFIO, but it does
>> mean we could now support VFIO devices on, for example, a guest side PCI
>> host bridge which maps system memory at somewhere other than 0 in PCI
>> space.
>>
>> Signed-off-by: David Gibson <address@hidden>
>> Signed-off-by: Alexey Kardashevskiy <address@hidden>
>> ---
>>  hw/misc/vfio.c | 43 +++++++++++++++++++++++++++++++++++--------
>>  1 file changed, 35 insertions(+), 8 deletions(-)
>>
>> diff --git a/hw/misc/vfio.c b/hw/misc/vfio.c
>> index 93a316e..c16f41b 100644
>> --- a/hw/misc/vfio.c
>> +++ b/hw/misc/vfio.c
>> @@ -133,9 +133,10 @@ enum {
>>  typedef struct VFIOAddressSpace {
>>      AddressSpace *as;
>>      QLIST_HEAD(, VFIOContainer) containers;
>> +    QLIST_ENTRY(VFIOAddressSpace) list;
>>  } VFIOAddressSpace;
>>  
>> -static VFIOAddressSpace vfio_address_space_memory;
>> +QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces;
>>  
>>  struct VFIOGroup;
>>  
>> @@ -2611,10 +2612,34 @@ static int vfio_load_rom(VFIODevice *vdev)
>>      return 0;
>>  }
>>  
>> -static void vfio_address_space_init(VFIOAddressSpace *space, AddressSpace 
>> *as)
>> +static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
>>  {
>> +    VFIOAddressSpace *space;
>> +
>> +    QLIST_FOREACH(space, &vfio_address_spaces, list) {
>> +        if (space->as == as) {
>> +            return space;
>> +        }
>> +    }
>> +
>> +    /* No suitable VFIOAddressSpace, create a new one */
>> +    space = g_malloc0(sizeof(*space));
>>      space->as = as;
>>      QLIST_INIT(&space->containers);
>> +
>> +    QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
>> +
>> +    return space;
>> +}
>> +
>> +static void vfio_put_address_space(VFIOAddressSpace *space)
>> +{
>> +    if (!QLIST_EMPTY(&space->containers)) {
>> +        return;
>> +    }
>> +
>> +    QLIST_REMOVE(space, list);
>> +    g_free(space);
>>  }
>>  
>>  static int vfio_connect_container(VFIOGroup *group, VFIOAddressSpace *space)
>> @@ -2699,6 +2724,8 @@ static void vfio_disconnect_container(VFIOGroup *group)
>>      group->container = NULL;
>>  
>>      if (QLIST_EMPTY(&container->group_list)) {
>> +        VFIOAddressSpace *space = container->space;
>> +
>>          if (container->iommu_data.release) {
>>              container->iommu_data.release(container);
>>          }
>> @@ -2706,6 +2733,8 @@ static void vfio_disconnect_container(VFIOGroup *group)
>>          DPRINTF("vfio_disconnect_container: close container->fd\n");
>>          close(container->fd);
>>          g_free(container);
>> +
>> +        vfio_put_address_space(space);
>>      }
>>  }
>>  
>> @@ -3076,6 +3105,7 @@ static int vfio_initfn(PCIDevice *pdev)
>>  {
>>      VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
>>      VFIOGroup *group;
>> +    VFIOAddressSpace *space;
>>      char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
>>      ssize_t len;
>>      struct stat st;
>> @@ -3111,14 +3141,12 @@ static int vfio_initfn(PCIDevice *pdev)
>>      DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
>>              vdev->host.bus, vdev->host.slot, vdev->host.function, groupid);
>>  
>> -    if (pci_device_iommu_address_space(pdev) != &address_space_memory) {
>> -        error_report("vfio: DMA address space must be system memory");
>> -        return -EINVAL;
>> -    }
>> +    space = vfio_get_address_space(pci_device_iommu_address_space(pdev));
>>  
>> -    group = vfio_get_group(groupid, &vfio_address_space_memory);
>> +    group = vfio_get_group(groupid, space);
>>      if (!group) {
>>          error_report("vfio: failed to get group %d", groupid);
>> +        vfio_put_address_space(space);
>>          return -ENOENT;
>>      }
>>  
> 
> Kind of a code flow issue here, on teardown we have:
> 
> vfio_put_group
>   vfio_disconnect_container
>     vfio_put_address_space
> 
> On setup we do:
> 
> vfio_get_address_space
> vfio_get_group
>   vfio_connect_container
> 
> We could easily move vfio_get_address_space into vfio_get_group to make
> things a little more balanced.  It doesn't seem like too much additional
> to pass the address space through vfio_get_group into
> vfio_connect_container so that we could have a completely symmetric flow
> though.

I can do that. I will just need to call vfio_put_address_space() on every
branch which returns NULL. Or rework a bit more. So I ended up with this:

(not a patch, just cut-n-paste).
===

-static VFIOGroup *vfio_get_group(int groupid, VFIOAddressSpace *space)
+static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
 {
+    VFIOAddressSpace *space;
     VFIOGroup *group;
     char path[32];
     struct vfio_group_status status = { .argsz = sizeof(status) };

+    space = vfio_get_address_space(as);
+
     QLIST_FOREACH(group, &group_list, next) {
         if (group->groupid == groupid) {
             /* Found it.  Now is it already in the right context? */
@@ -2723,7 +2755,7 @@ static VFIOGroup *vfio_get_group(int groupid,
VFIOAddressSpace *space)
             } else {
                 error_report("vfio: group %d used in multiple address spaces",
                              group->groupid);
-                return NULL;
+                goto error_exit;
             }
         }
     }
@@ -2734,24 +2766,19 @@ static VFIOGroup *vfio_get_group(int groupid,
VFIOAddressSpace *space)
     group->fd = qemu_open(path, O_RDWR);
     if (group->fd < 0) {
         error_report("vfio: error opening %s: %m", path);
-        g_free(group);
-        return NULL;
+        goto free_group_exit;
     }

     if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
         error_report("vfio: error getting group status: %m");
-        close(group->fd);
-        g_free(group);
-        return NULL;
+        goto close_fd_exit;
     }

     if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
         error_report("vfio: error, group %d is not viable, please ensure "
                      "all devices within the iommu_group are bound to their "
                      "vfio bus driver.", groupid);
-        close(group->fd);
-        g_free(group);
-        return NULL;
+        goto close_fd_exit;
     }

     group->groupid = groupid;
@@ -2759,14 +2786,23 @@ static VFIOGroup *vfio_get_group(int groupid,
VFIOAddressSpace *space)

     if (vfio_connect_container(group, space)) {
         error_report("vfio: failed to setup container for group %d", groupid);
-        close(group->fd);
-        g_free(group);
-        return NULL;
+        goto close_fd_exit;
     }

     QLIST_INSERT_HEAD(&group_list, group, next);

     return group;
+
+close_fd_exit:
+    close(group->fd);
+
+free_group_exit:
+    g_free(group);
+
+error_exit:
+    vfio_put_address_space(space);
+
+    return NULL;
 }

===

Is that ok? Split it into 2 patches for easier review?


> 
> 
>> @@ -3339,7 +3367,6 @@ static const TypeInfo vfio_pci_dev_info = {
>>  
>>  static void register_vfio_pci_dev_type(void)
>>  {
>> -    vfio_address_space_init(&vfio_address_space_memory, 
>> &address_space_memory);
>>      type_register_static(&vfio_pci_dev_info);
>>  }
>>  
> 
> 
> 


-- 
Alexey



reply via email to

[Prev in Thread] Current Thread [Next in Thread]