qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH] ioeventfd: error handling cleanup


From: Michael S. Tsirkin
Subject: [Qemu-devel] [PATCH] ioeventfd: error handling cleanup
Date: Tue, 11 Jan 2011 13:49:18 +0200
User-agent: Mutt/1.5.21 (2010-09-15)

- Don't return status from start/stop functions where it's ignored
- report errors to make debugging easier
- assert on unexpected failures
- don't disable notifiers on error so that we'll
  retry when guest driver restarts

Signed-off-by: Michael S. Tsirkin <address@hidden>

--

Stefan, could you review/test this and ack/nack appropriately
please?

diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
index 70c40ee..d07ff97 100644
--- a/hw/virtio-pci.c
+++ b/hw/virtio-pci.c
@@ -195,12 +195,16 @@ static int 
virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
     if (assign) {
         r = event_notifier_init(notifier, 1);
         if (r < 0) {
+            error_report("%s: unable to init event notifier: %d",
+                         __func__, r);
             return r;
         }
         r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
                                        proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
                                        n, assign);
         if (r < 0) {
+            error_report("%s: unable to map ioeventfd: %d",
+                         __func__, r);
             event_notifier_cleanup(notifier);
         }
     } else {
@@ -208,6 +212,8 @@ static int 
virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
                                        proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
                                        n, assign);
         if (r < 0) {
+            error_report("%s: unable to unmap ioeventfd: %d",
+                         __func__, r);
             return r;
         }
 
@@ -246,14 +252,14 @@ static void 
virtio_pci_set_host_notifier_fd_handler(VirtIOPCIProxy *proxy,
     }
 }
 
-static int virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
+static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
 {
     int n, r;
 
     if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
         proxy->ioeventfd_disabled ||
         proxy->ioeventfd_started) {
-        return 0;
+        return;
     }
 
     for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
@@ -269,7 +275,7 @@ static int virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
         virtio_pci_set_host_notifier_fd_handler(proxy, n, true);
     }
     proxy->ioeventfd_started = true;
-    return 0;
+    return;
 
 assign_error:
     while (--n >= 0) {
@@ -278,19 +284,20 @@ assign_error:
         }
 
         virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
-        virtio_pci_set_host_notifier_internal(proxy, n, false);
+        r = virtio_pci_set_host_notifier_internal(proxy, n, false);
+        assert(r >= 0);
     }
     proxy->ioeventfd_started = false;
-    proxy->ioeventfd_disabled = true;
-    return r;
+    error_report("%s: failed. Fallback to a userspace (slower).", __func__);
 }
 
-static int virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
+static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
 {
+    int r;
     int n;
 
     if (!proxy->ioeventfd_started) {
-        return 0;
+        return;
     }
 
     for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
@@ -299,10 +306,10 @@ static int virtio_pci_stop_ioeventfd(VirtIOPCIProxy 
*proxy)
         }
 
         virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
-        virtio_pci_set_host_notifier_internal(proxy, n, false);
+        r = virtio_pci_set_host_notifier_internal(proxy, n, false);
+        assert(r >= 0);
     }
     proxy->ioeventfd_started = false;
-    return 0;
 }
 
 static void virtio_pci_reset(DeviceState *d)
@@ -706,7 +713,6 @@ static void virtio_init_pci(VirtIOPCIProxy *proxy, 
VirtIODevice *vdev,
     proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
     proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
     proxy->host_features = vdev->get_features(vdev, proxy->host_features);
-
 }
 
 static int virtio_blk_init_pci(PCIDevice *pci_dev)



reply via email to

[Prev in Thread] Current Thread [Next in Thread]