qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v2 1/1] vhost-user interrupt management fixes


From: Victor Kaplansky
Subject: [Qemu-devel] [PATCH v2 1/1] vhost-user interrupt management fixes
Date: Tue, 16 Feb 2016 15:28:06 +0200

From: Didier Pallard <address@hidden>

Since guest_mask_notifier can not be used in vhost-user mode due
to buffering implied by unix control socket, force
use_mask_notifier on virtio devices of vhost-user interfaces, and
send correct callfd to the guest at vhost start.

Using guest_notifier_mask function in vhost-user case may
break interrupt mask paradigm, because mask/unmask is not
really done when returning from guest_notifier_mask call, instead
message is posted in a unix socket, and processed later.

Add an option boolean flag 'use_mask_notifier' to disable the use
of guest_notifier_mask in virtio pci.

Signed-off-by: Victor Kaplansky <address@hidden>
---
 include/hw/virtio/virtio.h |  1 +
 hw/net/vhost_net.c         | 24 ++++++++++++++++++++++--
 hw/virtio/vhost.c          | 13 +++++++++++++
 hw/virtio/virtio-pci.c     | 15 +++++++++------
 hw/virtio/virtio.c         |  1 +
 5 files changed, 46 insertions(+), 8 deletions(-)

diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 108cdb0f..3acbf999 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -90,6 +90,7 @@ struct VirtIODevice
     VMChangeStateEntry *vmstate;
     char *bus_name;
     uint8_t device_endian;
+    bool use_mask_notifier;
     QLIST_HEAD(, VirtQueue) *vector_queues;
 };
 
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 3940a04b..22ba5571 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -17,6 +17,7 @@
 #include "net/net.h"
 #include "net/tap.h"
 #include "net/vhost-user.h"
+#include "hw/virtio/virtio-pci.h"
 
 #include "hw/virtio/virtio-net.h"
 #include "net/vhost_net.h"
@@ -306,13 +307,32 @@ int vhost_net_start(VirtIODevice *dev, NetClientState 
*ncs,
     }
 
     for (j = 0; j < total_queues; j++) {
+        struct vhost_net *net;
+
         r = vhost_net_set_vnet_endian(dev, ncs[j].peer, true);
         if (r < 0) {
             goto err_endian;
         }
-        vhost_net_set_vq_index(get_vhost_net(ncs[j].peer), j * 2);
-    }
 
+        net = get_vhost_net(ncs[j].peer);
+        vhost_net_set_vq_index(net, j * 2);
+
+        /* Force use_mask_notifier reset in vhost user case
+         * Must be done before set_guest_notifier call
+         */
+        if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+            BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
+            DeviceState *d = DEVICE(qbus->parent);
+            if (!strcmp(object_get_typename(OBJECT(d)), TYPE_VIRTIO_NET_PCI)) {
+                VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
+                VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+                /* Force virtual device not use mask notifier */
+                vdev->use_mask_notifier = false;
+            }
+        }
+     }
+ 
     r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
     if (r < 0) {
         error_report("Error binding guest notifier: %d", -r);
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 7dff7554..80744386 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -855,8 +855,21 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
     /* Clear and discard previous events if any. */
     event_notifier_test_and_clear(&vq->masked_notifier);
 
+    /* If vhost user, register now the call eventfd, guest_notifier_mask
+     * function is not used anymore
+     */
+    if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) {
+        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
+        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
+        if (r) {
+            r = -errno;
+            goto fail_call;
+        }
+    }
+
     return 0;
 
+fail_call:
 fail_kick:
 fail_alloc:
     cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 5494ff4a..70f64cf7 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, 
int nvqs)
         /* If guest supports masking, set up irqfd now.
          * Otherwise, delay until unmasked in the frontend.
          */
-        if (k->guest_notifier_mask) {
+        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
             ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
             if (ret < 0) {
                 kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -822,7 +822,7 @@ undo:
         if (vector >= msix_nr_vectors_allocated(dev)) {
             continue;
         }
-        if (k->guest_notifier_mask) {
+        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
             kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy 
*proxy, int nvqs)
         /* If guest supports masking, clean up irqfd now.
          * Otherwise, it was cleaned when masked in the frontend.
          */
-        if (k->guest_notifier_mask) {
+        if (vdev->use_mask_notifier && k->guest_notifier_mask) {
             kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
         }
         kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy 
*proxy,
     /* If guest supports masking, irqfd is already setup, unmask it.
      * Otherwise, set it up now.
      */
-    if (k->guest_notifier_mask) {
+    if (vdev->use_mask_notifier && k->guest_notifier_mask) {
         k->guest_notifier_mask(vdev, queue_no, false);
         /* Test after unmasking to avoid losing events. */
         if (k->guest_notifier_pending &&
@@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
     /* If guest supports masking, keep irqfd but mask it.
      * Otherwise, clean it up now.
      */ 
-    if (k->guest_notifier_mask) {
+    if (vdev->use_mask_notifier && k->guest_notifier_mask) {
         k->guest_notifier_mask(vdev, queue_no, true);
     } else {
         kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
@@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, 
int n, bool assign,
         event_notifier_cleanup(notifier);
     }
 
-    if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
+    if (!msix_enabled(&proxy->pci_dev) &&
+        vdev->use_mask_notifier &&
+        vdc->guest_notifier_mask) {
         vdc->guest_notifier_mask(vdev, n, !assign);
     }
 
@@ -1879,6 +1881,7 @@ static Property virtio_pci_properties[] = {
                     VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
     DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
                     VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
+
     DEFINE_PROP_END_OF_LIST(),
 };
 
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 90f25451..c0238b39 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -792,6 +792,7 @@ void virtio_reset(void *opaque)
     vdev->queue_sel = 0;
     vdev->status = 0;
     vdev->isr = 0;
+    vdev->use_mask_notifier = true;
     vdev->config_vector = VIRTIO_NO_VECTOR;
     virtio_notify_vector(vdev, vdev->config_vector);
 
-- 
Victor



reply via email to

[Prev in Thread] Current Thread [Next in Thread]