qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2 4/4] vfio/quirks: Enable ioeventfd quirks to


From: Alex Williamson
Subject: Re: [Qemu-devel] [PATCH v2 4/4] vfio/quirks: Enable ioeventfd quirks to be handled by vfio directly
Date: Thu, 3 May 2018 10:30:34 -0600

On Thu, 3 May 2018 17:20:18 +0200
Auger Eric <address@hidden> wrote:

> Hi Alex,
> 
> On 05/01/2018 06:43 PM, Alex Williamson wrote:
> > With vfio ioeventfd support, we can program vfio-pci to perform a
> > specified BAR write when an eventfd is triggered.  This allows the
> > KVM ioeventfd to be wired directly to vfio-pci, entirely avoiding
> > userspace handling for these events.  On the same micro-benchmark
> > where the ioeventfd got us to almost 90% of performance versus
> > disabling the GeForce quirks, this gets us to within 95%.
> > 
> > Signed-off-by: Alex Williamson <address@hidden>
> > ---
> >  hw/vfio/pci-quirks.c |   50 
> > +++++++++++++++++++++++++++++++++++++++++++-------
> >  hw/vfio/pci.c        |    2 ++
> >  hw/vfio/pci.h        |    2 ++
> >  hw/vfio/trace-events |    2 +-
> >  4 files changed, 48 insertions(+), 8 deletions(-)
> > 
> > diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
> > index 4cedc733bc0a..94be27dd0a3b 100644
> > --- a/hw/vfio/pci-quirks.c
> > +++ b/hw/vfio/pci-quirks.c
> > @@ -16,6 +16,7 @@
> >  #include "qemu/range.h"
> >  #include "qapi/error.h"
> >  #include "qapi/visitor.h"
> > +#include <sys/ioctl.h>
> >  #include "hw/nvram/fw_cfg.h"
> >  #include "pci.h"
> >  #include "trace.h"
> > @@ -287,13 +288,31 @@ static VFIOQuirk *vfio_quirk_alloc(int nr_mem)
> >      return quirk;
> >  }
> >  
> > -static void vfio_ioeventfd_exit(VFIOIOEventFD *ioeventfd)
> > +static void vfio_ioeventfd_exit(VFIOPCIDevice *vdev, VFIOIOEventFD 
> > *ioeventfd)
> >  {
> >      QLIST_REMOVE(ioeventfd, next);
> > +  
> nit: unrelated new line

Fixed

> >      memory_region_del_eventfd(ioeventfd->mr, ioeventfd->addr, 
> > ioeventfd->size,
> >                                ioeventfd->match_data, ioeventfd->data,
> >                                &ioeventfd->e);
> > -    qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e), NULL, NULL, 
> > NULL);
> > +
> > +    if (ioeventfd->vfio) {
> > +        struct vfio_device_ioeventfd vfio_ioeventfd;
> > +
> > +        vfio_ioeventfd.argsz = sizeof(vfio_ioeventfd);
> > +        vfio_ioeventfd.flags = ioeventfd->size;
> > +        vfio_ioeventfd.data = ioeventfd->data;
> > +        vfio_ioeventfd.offset = ioeventfd->region->fd_offset +
> > +                                ioeventfd->region_addr;
> > +        vfio_ioeventfd.fd = -1;
> > +
> > +        ioctl(vdev->vbasedev.fd, VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd);
> > +
> > +    } else {
> > +        qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e),
> > +                            NULL, NULL, NULL);
> > +    }
> > +
> >      event_notifier_cleanup(&ioeventfd->e);
> >      trace_vfio_ioeventfd_exit(memory_region_name(ioeventfd->mr),
> >                                (uint64_t)ioeventfd->addr, ioeventfd->size,
> > @@ -307,7 +326,7 @@ static void vfio_drop_dynamic_eventfds(VFIOPCIDevice 
> > *vdev, VFIOQuirk *quirk)
> >  
> >      QLIST_FOREACH_SAFE(ioeventfd, &quirk->ioeventfds, next, tmp) {
> >          if (ioeventfd->dynamic) {
> > -            vfio_ioeventfd_exit(ioeventfd);
> > +            vfio_ioeventfd_exit(vdev, ioeventfd);
> >          }
> >      }
> >  }
> > @@ -361,13 +380,30 @@ static VFIOIOEventFD 
> > *vfio_ioeventfd_init(VFIOPCIDevice *vdev,
> >      ioeventfd->region = region;
> >      ioeventfd->region_addr = region_addr;
> >  
> > -    qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e),
> > -                        vfio_ioeventfd_handler, NULL, ioeventfd);
> > +    if (!vdev->no_vfio_ioeventfd) {
> > +        struct vfio_device_ioeventfd vfio_ioeventfd;
> > +
> > +        vfio_ioeventfd.argsz = sizeof(vfio_ioeventfd);
> > +        vfio_ioeventfd.flags = ioeventfd->size;
> > +        vfio_ioeventfd.data = ioeventfd->data;
> > +        vfio_ioeventfd.offset = ioeventfd->region->fd_offset +
> > +                                ioeventfd->region_addr;
> > +        vfio_ioeventfd.fd = event_notifier_get_fd(&ioeventfd->e);
> > +
> > +        ioeventfd->vfio = !ioctl(vdev->vbasedev.fd,
> > +                                 VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd);
> > +    }
> > +
> > +    if (!ioeventfd->vfio) {
> > +        qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e),
> > +                            vfio_ioeventfd_handler, NULL, ioeventfd);
> > +    }
> > +
> >      memory_region_add_eventfd(ioeventfd->mr, ioeventfd->addr,
> >                                ioeventfd->size, ioeventfd->match_data,
> >                                ioeventfd->data, &ioeventfd->e);
> >      trace_vfio_ioeventfd_init(memory_region_name(mr), (uint64_t)addr,
> > -                              size, data);
> > +                              size, data, ioeventfd->vfio);
> >  
> >      return ioeventfd;
> >  }
> > @@ -1835,7 +1871,7 @@ void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr)
> >  
> >      QLIST_FOREACH(quirk, &bar->quirks, next) {
> >          while (!QLIST_EMPTY(&quirk->ioeventfds)) {
> > -            vfio_ioeventfd_exit(QLIST_FIRST(&quirk->ioeventfds));
> > +            vfio_ioeventfd_exit(vdev, QLIST_FIRST(&quirk->ioeventfds));
> >          }
> >  
> >          for (i = 0; i < quirk->nr_mem; i++) {
> > diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
> > index ba1239551115..84e27c7bb2d1 100644
> > --- a/hw/vfio/pci.c
> > +++ b/hw/vfio/pci.c
> > @@ -3177,6 +3177,8 @@ static Property vfio_pci_dev_properties[] = {
> >                       no_geforce_quirks, false),
> >      DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice, no_kvm_ioeventfd,
> >                       false),
> > +    DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice, 
> > no_vfio_ioeventfd,
> > +                     false),  
> I tend to agree with Peter about the 2 options. Only the KVM
> acceleration brings benefit here?

Consolidated response in reply to Peter.  Thanks,

Alex



reply via email to

[Prev in Thread] Current Thread [Next in Thread]