qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [Qemu-ppc] [PATCH qemu v13 11/16] vfio: spapr: Add SPAP


From: David Gibson
Subject: Re: [Qemu-devel] [Qemu-ppc] [PATCH qemu v13 11/16] vfio: spapr: Add SPAPR IOMMU v2 support (DMA memory preregistering)
Date: Thu, 3 Mar 2016 17:30:13 +1100
User-agent: Mutt/1.5.24 (2015-08-30)

On Tue, Mar 01, 2016 at 08:10:36PM +1100, Alexey Kardashevskiy wrote:
> This makes use of the new "memory registering" feature. The idea is
> to provide the userspace ability to notify the host kernel about pages
> which are going to be used for DMA. Having this information, the host
> kernel can pin them all once per user process, do locked pages
> accounting (once) and not spent time on doing that in real time with
> possible failures which cannot be handled nicely in some cases.
> 
> This adds a prereg memory listener which listens on address_space_memory
> and notifies a VFIO container about memory which needs to be
> pinned/unpinned. VFIO MMIO regions (i.e. "skip dump" regions) are skipped.
> 
> As there is no per-IOMMU-type release() callback anymore, this stores
> the IOMMU type in the container so vfio_listener_release() can device
> if it needs to unregister @prereg_listener.
> 
> The feature is only enabled for SPAPR IOMMU v2. The host kernel changes
> are required. Since v2 does not need/support VFIO_IOMMU_ENABLE, this does
> not call it when v2 is detected and enabled.
> 
> This does not change the guest visible interface.
> 
> Signed-off-by: Alexey Kardashevskiy <address@hidden>
> ---
>  hw/vfio/Makefile.objs         |   1 +
>  hw/vfio/common.c              |  39 +++++++++---
>  hw/vfio/prereg.c              | 138 
> ++++++++++++++++++++++++++++++++++++++++++
>  include/hw/vfio/vfio-common.h |   4 ++
>  trace-events                  |   2 +
>  5 files changed, 175 insertions(+), 9 deletions(-)
>  create mode 100644 hw/vfio/prereg.c
> 
> diff --git a/hw/vfio/Makefile.objs b/hw/vfio/Makefile.objs
> index ceddbb8..5800e0e 100644
> --- a/hw/vfio/Makefile.objs
> +++ b/hw/vfio/Makefile.objs
> @@ -4,4 +4,5 @@ obj-$(CONFIG_PCI) += pci.o pci-quirks.o
>  obj-$(CONFIG_SOFTMMU) += platform.o
>  obj-$(CONFIG_SOFTMMU) += calxeda-xgmac.o
>  obj-$(CONFIG_SOFTMMU) += amd-xgbe.o
> +obj-$(CONFIG_SOFTMMU) += prereg.o
>  endif
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index 3aaa6b5..f2a03e0 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -531,6 +531,9 @@ static const MemoryListener vfio_iommu_listener = {
>  static void vfio_listener_release(VFIOContainer *container)
>  {
>      memory_listener_unregister(&container->iommu_listener.listener);
> +    if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
> +        memory_listener_unregister(&container->prereg_listener.listener);
> +    }
>  }
>  
>  int vfio_mmap_region(Object *obj, VFIORegion *region,
> @@ -722,8 +725,8 @@ static int vfio_connect_container(VFIOGroup *group, 
> AddressSpace *as)
>              goto free_container_exit;
>          }
>  
> -        ret = ioctl(fd, VFIO_SET_IOMMU,
> -                    v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU);
> +        container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU;
> +        ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
>          if (ret) {
>              error_report("vfio: failed to set iommu for container: %m");
>              ret = -errno;
> @@ -748,8 +751,10 @@ static int vfio_connect_container(VFIOGroup *group, 
> AddressSpace *as)
>          if ((ret == 0) && (info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
>              container->iova_pgsizes = info.iova_pgsizes;
>          }
> -    } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
> +    } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
> +               ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
>          struct vfio_iommu_spapr_tce_info info;
> +        bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU);
>  
>          ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
>          if (ret) {
> @@ -757,7 +762,9 @@ static int vfio_connect_container(VFIOGroup *group, 
> AddressSpace *as)
>              ret = -errno;
>              goto free_container_exit;
>          }
> -        ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU);
> +        container->iommu_type =
> +            v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU;
> +        ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);

It'd be nice to consolidate the setting of container->iommu_type and
then the SET_IOMMU ioctl() rather than having more or less duplicated
logic for Type1 and SPAPR, but it's not a big deal.

>          if (ret) {
>              error_report("vfio: failed to set iommu for container: %m");
>              ret = -errno;
> @@ -769,11 +776,25 @@ static int vfio_connect_container(VFIOGroup *group, 
> AddressSpace *as)
>           * when container fd is closed so we do not call it explicitly
>           * in this file.
>           */
> -        ret = ioctl(fd, VFIO_IOMMU_ENABLE);
> -        if (ret) {
> -            error_report("vfio: failed to enable container: %m");
> -            ret = -errno;
> -            goto free_container_exit;
> +        if (!v2) {
> +            ret = ioctl(fd, VFIO_IOMMU_ENABLE);
> +            if (ret) {
> +                error_report("vfio: failed to enable container: %m");
> +                ret = -errno;
> +                goto free_container_exit;
> +            }
> +        } else {
> +            container->prereg_listener.container = container;
> +            container->prereg_listener.listener = vfio_prereg_listener;
> +
> +            memory_listener_register(&container->prereg_listener.listener,
> +                                     &address_space_memory);

This assumes that the target address space of the (guest) IOMMU is
address_space_memory.  Which is fine - vfio already assumes that - but
it reminds me that it'd be nice to have an explicit check for that (I
guess it would have to go in vfio_iommu_map_notify()).  So that if
someone constructs a machine where that's not the case, it'll at least
be obvious why VFIO isn't working.

> +            if (container->error) {
> +                error_report("vfio: RAM memory listener initialization 
> failed for container");
> +                memory_listener_unregister(
> +                    &container->prereg_listener.listener);
> +                goto free_container_exit;
> +            }
>          }

Looks like you don't have an error path which will handle the case
where the prereg listener is registered, but registering the normal
PCI AS listener fails - I believe you will fail to unregister the
prereg listener in that case.

>          /*
> diff --git a/hw/vfio/prereg.c b/hw/vfio/prereg.c
> new file mode 100644
> index 0000000..66cd696
> --- /dev/null
> +++ b/hw/vfio/prereg.c
> @@ -0,0 +1,138 @@
> +/*
> + * DMA memory preregistration
> + *
> + * Authors:
> + *  Alexey Kardashevskiy <address@hidden>
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2.  See
> + * the COPYING file in the top-level directory.
> + */
> +
> +#include "qemu/osdep.h"
> +#include <sys/ioctl.h>
> +#include <linux/vfio.h>
> +
> +#include "hw/vfio/vfio-common.h"
> +#include "hw/vfio/vfio.h"
> +#include "qemu/error-report.h"
> +#include "trace.h"
> +
> +static bool vfio_prereg_listener_skipped_section(MemoryRegionSection 
> *section)
> +{
> +    return (!memory_region_is_ram(section->mr) &&
> +            !memory_region_is_iommu(section->mr)) ||
> +            memory_region_is_skip_dump(section->mr);
> +}
> +
> +static void vfio_prereg_listener_region_add(MemoryListener *listener,
> +                                            MemoryRegionSection *section)
> +{
> +    VFIOMemoryListener *vlistener = container_of(listener, 
> VFIOMemoryListener,
> +                                                 listener);
> +    VFIOContainer *container = vlistener->container;
> +    hwaddr iova;
> +    Int128 llend;
> +    int ret;
> +    hwaddr page_mask = qemu_real_host_page_mask;
> +    struct vfio_iommu_spapr_register_memory reg = {
> +        .argsz = sizeof(reg),
> +        .flags = 0,
> +    };
> +
> +    if (vfio_prereg_listener_skipped_section(section)) {
> +        trace_vfio_listener_region_add_skip(
> +                section->offset_within_address_space,
> +                section->offset_within_address_space +
> +                int128_get64(int128_sub(section->size, int128_one())));
> +        return;
> +    }

You should probably explicitly check for IOMMU regions and abort if
you find one.  An IOMMU AS appearing within address_space_memory would
be bad news.

> +    if (unlikely((section->offset_within_address_space & ~page_mask) !=
> +                 (section->offset_within_region & ~page_mask))) {
> +        error_report("%s received unaligned region", __func__);
> +        return;
> +    }
> +
> +    iova = ROUND_UP(section->offset_within_address_space, ~page_mask + 1);

iova is a terrible name here.  This is *not* an IOVA, but a real
memory address.

> +    llend = int128_make64(section->offset_within_address_space);
> +    llend = int128_add(llend, section->size);
> +    llend = int128_and(llend, int128_exts64(page_mask));
> +
> +    if (int128_ge(int128_make64(iova), llend)) {
> +        return;

IIUC, if we get here something has gone horribly wrong in our machine
setup, and we shold probably just abort.  Same goes for the similar
test in the IOVA listener, of course.

> +    }
> +
> +    memory_region_ref(section->mr);
> +
> +    reg.vaddr = (__u64) memory_region_get_ram_ptr(section->mr) +
> +        section->offset_within_region +
> +        (iova - section->offset_within_address_space);
> +    reg.size = int128_get64(llend) - iova;
> +
> +    ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
> +    trace_vfio_ram_register(reg.vaddr, reg.size, ret ? -errno : 0);
> +    if (ret) {
> +        /*
> +         * On the initfn path, store the first error in the container so we
> +         * can gracefully fail.  Runtime, there's not much we can do other
> +         * than throw a hardware error.
> +         */
> +        if (!container->initialized) {
> +            if (!container->error) {
> +                container->error = ret;
> +            }
> +        } else {
> +            hw_error("vfio: DMA mapping failed, unable to continue");

Wrong error message.

> +        }
> +    }
> +}
> +
> +static void vfio_prereg_listener_region_del(MemoryListener *listener,
> +                                            MemoryRegionSection *section)
> +{
> +    VFIOMemoryListener *vlistener = container_of(listener, 
> VFIOMemoryListener,
> +                                                 listener);
> +    VFIOContainer *container = vlistener->container;
> +    hwaddr iova, end;
> +    int ret;
> +    hwaddr page_mask = qemu_real_host_page_mask;
> +    struct vfio_iommu_spapr_register_memory reg = {
> +        .argsz = sizeof(reg),
> +        .flags = 0,
> +    };
> +
> +    if (vfio_prereg_listener_skipped_section(section)) {
> +        trace_vfio_listener_region_del_skip(
> +                section->offset_within_address_space,
> +                section->offset_within_address_space +
> +                int128_get64(int128_sub(section->size, int128_one())));
> +        return;
> +    }
> +
> +    if (unlikely((section->offset_within_address_space & ~page_mask) !=
> +                 (section->offset_within_region & ~page_mask))) {
> +        error_report("%s received unaligned region", __func__);
> +        return;
> +    }
> +
> +    iova = ROUND_UP(section->offset_within_address_space, ~page_mask + 1);
> +    end = (section->offset_within_address_space + 
> int128_get64(section->size)) &
> +        page_mask;
> +
> +    if (iova >= end) {
> +        return;
> +    }
> +
> +    reg.vaddr = (__u64) memory_region_get_ram_ptr(section->mr) +
> +        section->offset_within_region +
> +        (iova - section->offset_within_address_space);
> +    reg.size = end - iova;
> +
> +    ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, &reg);
> +    trace_vfio_ram_unregister(reg.vaddr, reg.size, ret ? -errno : 0);
> +}
> +
> +const MemoryListener vfio_prereg_listener = {
> +    .region_add = vfio_prereg_listener_region_add,
> +    .region_del = vfio_prereg_listener_region_del,
> +};
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index b6b736c..bcbc5cb 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -70,6 +70,8 @@ typedef struct VFIOContainer {
>      VFIOAddressSpace *space;
>      int fd; /* /dev/vfio/vfio, empowered by the attached groups */
>      VFIOMemoryListener iommu_listener;
> +    VFIOMemoryListener prereg_listener;
> +    unsigned iommu_type;
>      int error;
>      bool initialized;
>      /*
> @@ -146,4 +148,6 @@ extern const MemoryRegionOps vfio_region_ops;
>  extern QLIST_HEAD(vfio_group_head, VFIOGroup) vfio_group_list;
>  extern QLIST_HEAD(vfio_as_head, VFIOAddressSpace) vfio_address_spaces;
>  
> +extern const MemoryListener vfio_prereg_listener;
> +
>  #endif /* !HW_VFIO_VFIO_COMMON_H */
> diff --git a/trace-events b/trace-events
> index 4b6ea70..f5335ec 100644
> --- a/trace-events
> +++ b/trace-events
> @@ -1725,6 +1725,8 @@ vfio_disconnect_container(int fd) "close 
> container->fd=%d"
>  vfio_put_group(int fd) "close group->fd=%d"
>  vfio_get_device(const char * name, unsigned int flags, unsigned int 
> num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: 
> %u"
>  vfio_put_base_device(int fd) "close vdev->fd=%d"
> +vfio_ram_register(uint64_t va, uint64_t size, int ret) "va=%"PRIx64" 
> size=%"PRIx64" ret=%d"
> +vfio_ram_unregister(uint64_t va, uint64_t size, int ret) "va=%"PRIx64" 
> size=%"PRIx64" ret=%d"
>  
>  # hw/vfio/platform.c
>  vfio_platform_populate_regions(int region_index, unsigned long flag, 
> unsigned long size, int fd, unsigned long offset) "- region %d flags = 0x%lx, 
> size = 0x%lx, fd= %d, offset = 0x%lx"

-- 
David Gibson                    | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au  | minimalist, thank you.  NOT _the_ _other_
                                | _way_ _around_!
http://www.ozlabs.org/~dgibson

Attachment: signature.asc
Description: PGP signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]