qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v3 1/1] vhost-user interrupt management fixes


From: Michael S. Tsirkin
Subject: Re: [Qemu-devel] [PATCH v3 1/1] vhost-user interrupt management fixes
Date: Wed, 17 Feb 2016 12:04:19 +0200

Looks good overall, some comments:

On Wed, Feb 17, 2016 at 11:35:48AM +0200, Victor Kaplansky wrote:
> From: Didier Pallard <address@hidden>
> 
> Since guest_mask_notifier can not be used in vhost-user mode due
> to buffering implied by unix control socket, force
> use_mask_notifier on virtio devices of vhost-user interfaces, and
> send correct callfd to the guest at vhost start.
> 
> Using guest_notifier_mask function in vhost-user case may
> break interrupt mask paradigm, because mask/unmask is not
> really done when returning from guest_notifier_mask call, instead
> message is posted in a unix socket, and processed later.
> 
> Add an option boolean flag 'use_mask_notifier' to disable the use
> of guest_notifier_mask in virtio pci.

changelog (below) should go after --- so that git am
ignores it. We don't want a permament record of the changelog.

> v3 changes:
>  In respond to Michael S. Tsirkin comments:
>    - vhost_net.c: removed dependency on virtio-pci.h
>    - vhost.c: simplified the check for vhost-user backend,
>      replaced by checking use_mask_notifier; added comment
>      explaining why vring for vhost-user initialized in
>      unmasked state;
>    - cosmetic fixes.
> 
> v2 changes:
>  - a new boolean field is added to all virtio devices instead
>    of defining a property in some virtio-pci devices.
> 
> 

Avoid double empty lines pls.

> Signed-off-by: Victor Kaplansky <address@hidden>
> ---
>  include/hw/virtio/virtio.h |  1 +
>  hw/net/vhost_net.c         | 16 ++++++++++++++--
>  hw/virtio/vhost.c          | 13 +++++++++++++
>  hw/virtio/virtio-pci.c     | 14 ++++++++------
>  hw/virtio/virtio.c         |  1 +
>  5 files changed, 37 insertions(+), 8 deletions(-)
> 
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index 108cdb0f..3acbf999 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -90,6 +90,7 @@ struct VirtIODevice
>      VMChangeStateEntry *vmstate;
>      char *bus_name;
>      uint8_t device_endian;
> +    bool use_mask_notifier;


Hmm it isn't a mask notifier we use.
This flag tells us whether device can, and we should, mask guest notifiers.

So enable_guest_notifier_mask ?

vhost_virtqueue_mask should also probably assert that this is set
if mask == true.


>      QLIST_HEAD(, VirtQueue) *vector_queues;
>  };
>  
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 3940a04b..8c1ccadb 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -306,13 +306,25 @@ int vhost_net_start(VirtIODevice *dev, NetClientState 
> *ncs,
>      }
>  
>      for (j = 0; j < total_queues; j++) {
> +        struct vhost_net *net;
> +
>          r = vhost_net_set_vnet_endian(dev, ncs[j].peer, true);
>          if (r < 0) {
>              goto err_endian;
>          }
> -        vhost_net_set_vq_index(get_vhost_net(ncs[j].peer), j * 2);
> -    }
>  
> +        net = get_vhost_net(ncs[j].peer);
> +        vhost_net_set_vq_index(net, j * 2);
> +
> +        /* Force use_mask_notifier reset in vhost user case
> +         * Must be done before set_guest_notifier call
> +         */
> +        if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
> +                /* Force virtual device not use mask notifier */


A better comment would explain
        - why do we suppress mask notifier for vhost user only
        - what happens otherwise (where is it set to true)

> +                dev->use_mask_notifier = false;
> +        }

Can't the logic be moved to vhost-user.c? That would be cleaner.

> +     }
> + 
>      r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
>      if (r < 0) {
>          error_report("Error binding guest notifier: %d", -r);
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 7dff7554..f8031fae 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -855,8 +855,21 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
>      /* Clear and discard previous events if any. */
>      event_notifier_test_and_clear(&vq->masked_notifier);
>  
> +    /* For vhost user we set vring in unmasked state, since by the
> +     * default it masked, and guest_notifier_mask is not used anymore.
> +     */
> +    if (vdev->use_mask_notifier == false) {

!vdev->use_mask_notifier please.

> +        file.fd = 
> event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
> +        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
> +        if (r) {
> +            r = -errno;
> +            goto fail_call;
> +        }

You can just call vhost_virtqueue_mask with mask == false,
seems a bit cleaner.


> +    }
> +
>      return 0;
>  
> +fail_call:
>  fail_kick:
>  fail_alloc:
>      cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, 
> idx),
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 5494ff4a..64d263ae 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy 
> *proxy, int nvqs)
>          /* If guest supports masking, set up irqfd now.
>           * Otherwise, delay until unmasked in the frontend.
>           */
> -        if (k->guest_notifier_mask) {
> +        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
>              ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
>              if (ret < 0) {
>                  kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -822,7 +822,7 @@ undo:
>          if (vector >= msix_nr_vectors_allocated(dev)) {
>              continue;
>          }
> -        if (k->guest_notifier_mask) {
> +        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
>              kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
>          }
>          kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy 
> *proxy, int nvqs)
>          /* If guest supports masking, clean up irqfd now.
>           * Otherwise, it was cleaned when masked in the frontend.
>           */
> -        if (k->guest_notifier_mask) {
> +        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
>              kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
>          }
>          kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy 
> *proxy,
>      /* If guest supports masking, irqfd is already setup, unmask it.
>       * Otherwise, set it up now.
>       */
> -    if (k->guest_notifier_mask) {
> +    if (vdev->use_mask_notifier && k->guest_notifier_mask) {
>          k->guest_notifier_mask(vdev, queue_no, false);
>          /* Test after unmasking to avoid losing events. */
>          if (k->guest_notifier_pending &&
> @@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy 
> *proxy,
>      /* If guest supports masking, keep irqfd but mask it.
>       * Otherwise, clean it up now.
>       */ 
> -    if (k->guest_notifier_mask) {
> +    if (vdev->use_mask_notifier && k->guest_notifier_mask) {
>          k->guest_notifier_mask(vdev, queue_no, true);
>      } else {
>          kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> @@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState 
> *d, int n, bool assign,
>          event_notifier_cleanup(notifier);
>      }
>  
> -    if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
> +    if (!msix_enabled(&proxy->pci_dev) &&
> +        vdev->use_mask_notifier &&
> +        vdc->guest_notifier_mask) {
>          vdc->guest_notifier_mask(vdev, n, !assign);
>      }
>  
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index 90f25451..c0238b39 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -792,6 +792,7 @@ void virtio_reset(void *opaque)
>      vdev->queue_sel = 0;
>      vdev->status = 0;
>      vdev->isr = 0;
> +    vdev->use_mask_notifier = true;
>      vdev->config_vector = VIRTIO_NO_VECTOR;
>      virtio_notify_vector(vdev, vdev->config_vector);

Why isn't this in virtio_init?
Do we need to do this on each reset?

> -- 
> Victor



reply via email to

[Prev in Thread] Current Thread [Next in Thread]