qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] Re: [PATCH v3 2/3] virtio-pci: Use ioeventfd for virtqueue


From: Stefan Hajnoczi
Subject: [Qemu-devel] Re: [PATCH v3 2/3] virtio-pci: Use ioeventfd for virtqueue notify
Date: Tue, 16 Nov 2010 16:55:36 +0000

On Tue, Nov 16, 2010 at 4:02 PM, Michael S. Tsirkin <address@hidden> wrote:
> On Fri, Nov 12, 2010 at 01:24:28PM +0000, Stefan Hajnoczi wrote:
>> Virtqueue notify is currently handled synchronously in userspace virtio.  
>> This
>> prevents the vcpu from executing guest code while hardware emulation code
>> handles the notify.
>>
>> On systems that support KVM, the ioeventfd mechanism can be used to make
>> virtqueue notify a lightweight exit by deferring hardware emulation to the
>> iothread and allowing the VM to continue execution.  This model is similar to
>> how vhost receives virtqueue notifies.
>>
>> The result of this change is improved performance for userspace virtio 
>> devices.
>> Virtio-blk throughput increases especially for multithreaded scenarios and
>> virtio-net transmit throughput increases substantially.
>>
>> Some virtio devices are known to have guest drivers which expect a notify to 
>> be
>> processed synchronously and spin waiting for completion.  Only enable 
>> ioeventfd
>> for virtio-blk and virtio-net for now.
>>
>> Care must be taken not to interfere with vhost-net, which already uses
>> ioeventfd host notifiers.  The following list shows the behavior implemented 
>> in
>> this patch and is designed to take vhost-net into account:
>>
>>  * VIRTIO_CONFIG_S_DRIVER_OK -> assign host notifiers, 
>> qemu_set_fd_handler(virtio_pci_host_notifier_read)
>>  * !VIRTIO_CONFIG_S_DRIVER_OK -> qemu_set_fd_handler(NULL), deassign host 
>> notifiers
>>  * virtio_pci_set_host_notifier(true) -> qemu_set_fd_handler(NULL)
>>  * virtio_pci_set_host_notifier(false) -> 
>> qemu_set_fd_handler(virtio_pci_host_notifier_read)
>>
>> Signed-off-by: Stefan Hajnoczi <address@hidden>
>> ---
>>  hw/virtio-pci.c |  152 
>> ++++++++++++++++++++++++++++++++++++++++++++++--------
>>  hw/virtio.c     |   14 ++++-
>>  hw/virtio.h     |   13 +++++
>>  3 files changed, 153 insertions(+), 26 deletions(-)
>>
>> Now toggles host notifiers based on VIRTIO_CONFIG_S_DRIVER_OK status changes.
>> The cleanest way I could see was to introduce pre and a post set_status()
>> callbacks.  They allow a binding to hook status changes, including the status
>> change from virtio_reset().
>>
>> diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
>> index 549118d..117e855 100644
>> --- a/hw/virtio-pci.c
>> +++ b/hw/virtio-pci.c
>> @@ -83,6 +83,11 @@
>>  /* Flags track per-device state like workarounds for quirks in older 
>> guests. */
>>  #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG  (1 << 0)
>>
>> +/* Performance improves when virtqueue kick processing is decoupled from the
>> + * vcpu thread using ioeventfd for some devices. */
>> +#define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
>> +#define VIRTIO_PCI_FLAG_USE_IOEVENTFD   (1 << 
>> VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
>> +
>>  /* QEMU doesn't strictly need write barriers since everything runs in
>>   * lock-step.  We'll leave the calls to wmb() in though to make it obvious 
>> for
>>   * KVM or if kqemu gets SMP support.
>> @@ -179,12 +184,125 @@ static int virtio_pci_load_queue(void * opaque, int 
>> n, QEMUFile *f)
>>      return 0;
>>  }
>>
>> +static int virtio_pci_set_host_notifier_ioeventfd(VirtIOPCIProxy *proxy,
>> +                                                  int n, bool assign)
>> +{
>> +    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
>> +    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
>> +    int r;
>> +    if (assign) {
>> +        r = event_notifier_init(notifier, 1);
>> +        if (r < 0) {
>> +            return r;
>> +        }
>> +        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
>> +                                       proxy->addr + 
>> VIRTIO_PCI_QUEUE_NOTIFY,
>> +                                       n, assign);
>> +        if (r < 0) {
>> +            event_notifier_cleanup(notifier);
>> +        }
>> +    } else {
>> +        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
>> +                                       proxy->addr + 
>> VIRTIO_PCI_QUEUE_NOTIFY,
>> +                                       n, assign);
>> +        if (r < 0) {
>> +            return r;
>> +        }
>> +        event_notifier_cleanup(notifier);
>> +    }
>> +    return r;
>> +}
>> +
>> +static void virtio_pci_host_notifier_read(void *opaque)
>> +{
>> +    VirtQueue *vq = opaque;
>> +    EventNotifier *n = virtio_queue_get_host_notifier(vq);
>> +    if (event_notifier_test_and_clear(n)) {
>> +        virtio_queue_notify_vq(vq);
>> +    }
>> +}
>> +
>> +static void virtio_pci_set_host_notifier_fd_handler(VirtIOPCIProxy *proxy,
>> +                                                    int n, bool assign)
>> +{
>> +    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
>> +    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
>> +    if (assign) {
>> +        qemu_set_fd_handler(event_notifier_get_fd(notifier),
>> +                            virtio_pci_host_notifier_read, NULL, vq);
>> +    } else {
>> +        qemu_set_fd_handler(event_notifier_get_fd(notifier),
>> +                            NULL, NULL, NULL);
>> +    }
>> +}
>> +
>> +static int virtio_pci_set_host_notifiers(VirtIOPCIProxy *proxy, bool assign)
>> +{
>> +    int n, r;
>> +
>> +    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
>> +        if (!virtio_queue_get_num(proxy->vdev, n)) {
>> +            continue;
>> +        }
>> +
>> +        if (assign) {
>> +            r = virtio_pci_set_host_notifier_ioeventfd(proxy, n, true);
>> +            if (r < 0) {
>> +                goto assign_error;
>> +            }
>> +
>> +            virtio_pci_set_host_notifier_fd_handler(proxy, n, true);
>> +        } else {
>> +            virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
>> +            virtio_pci_set_host_notifier_ioeventfd(proxy, n, false);
>> +        }
>> +    }
>> +    return 0;
>> +
>> +assign_error:
>> +    while (--n >= 0) {
>> +        virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
>> +        virtio_pci_set_host_notifier_ioeventfd(proxy, n, false);
>> +    }
>> +    return r;
>> +}
>> +
>> +static void virtio_pci_pre_set_status(void *opaque, uint8_t oldval,
>> +                                      uint8_t newval)
>> +{
>> +    VirtIOPCIProxy *proxy = opaque;
>> +    bool changed = (oldval ^ newval) & VIRTIO_CONFIG_S_DRIVER_OK;
>> +    bool driver_ok = newval & VIRTIO_CONFIG_S_DRIVER_OK;
>> +
>> +    /* Bring up host notifiers before virtio device may request them */
>> +    if (changed && driver_ok &&
>> +        (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD)) {
>> +        if (virtio_pci_set_host_notifiers(proxy, true) < 0) {
>> +            proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
>> +        }
>> +    }
>> +}
>> +
>> +static void virtio_pci_post_set_status(void *opaque, uint8_t oldval,
>> +                                       uint8_t newval)
>> +{
>> +    VirtIOPCIProxy *proxy = opaque;
>> +    bool changed = (oldval ^ newval) & VIRTIO_CONFIG_S_DRIVER_OK;
>> +    bool driver_ok = newval & VIRTIO_CONFIG_S_DRIVER_OK;
>> +
>> +    /* Take down host notifiers after virtio device releases them */
>> +    if (changed && !driver_ok &&
>> +        (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD)) {
>> +        virtio_pci_set_host_notifiers(proxy, false);
>> +    }
>> +}
>> +
>
> This old/new value in API looks hacky ... how about either
> 1. only invoking callbacks on real change
> or
> 2. use another flag to save state, making virtio_pci_set_host_notifiers
> idempotent

Okay, will see if there is a better solution.

>
>>  static void virtio_pci_reset(DeviceState *d)
>>  {
>>      VirtIOPCIProxy *proxy = container_of(d, VirtIOPCIProxy, pci_dev.qdev);
>>      virtio_reset(proxy->vdev);
>>      msix_reset(&proxy->pci_dev);
>> -    proxy->flags = 0;
>> +    proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
>>  }
>>
>>  static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
>> @@ -480,30 +598,12 @@ assign_error:
>>  static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
>
> Might be easier to open-code this.
>
>>  {
>>      VirtIOPCIProxy *proxy = opaque;
>> -    VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
>> -    EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
>> -    int r;
>> -    if (assign) {
>> -        r = event_notifier_init(notifier, 1);
>> -        if (r < 0) {
>> -            return r;
>> -        }
>> -        r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
>> -                                       proxy->addr + 
>> VIRTIO_PCI_QUEUE_NOTIFY,
>> -                                       n, assign);
>> -        if (r < 0) {
>> -            event_notifier_cleanup(notifier);
>> -        }
>> +    if (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) {
>
> tested both here and by callers?

This is the external API that we give to virtio devices.  We don't
call this function ourselves.  Only vhost will call it.

If ioeventfd is being used we just need to turn off the fd handler so
the caller can do what they please with the fd.  And then they
deassign, we need to register the fd handler again so we get virtqueue
kick notifications.

If ioeventfd is not being used, we use the old behavior of
assigning/deassigning the ioeventfd on behalf of the caller.

Stefan



reply via email to

[Prev in Thread] Current Thread [Next in Thread]