qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 07/24] vhost: use error_report() instead of fprintf(


From: marcandre . lureau
Subject: [Qemu-devel] [PATCH 07/24] vhost: use error_report() instead of fprintf(stderr, ...)
Date: Tue, 21 Jun 2016 12:02:35 +0200

From: Marc-André Lureau <address@hidden>

Let's use qemu proper error reporting API.

Signed-off-by: Marc-André Lureau <address@hidden>
---
 hw/virtio/vhost.c | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index e2d1614..4e23d73 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -425,11 +425,11 @@ static int vhost_verify_ring_mappings(struct vhost_dev 
*dev,
         l = vq->ring_size;
         p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
         if (!p || l != vq->ring_size) {
-            fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
+            error_report("Unable to map ring buffer for ring %d", i);
             r = -ENOMEM;
         }
         if (p != vq->ring) {
-            fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
+            error_report("Ring buffer relocated for ring %d", i);
             r = -EBUSY;
         }
         cpu_physical_memory_unmap(p, l, 0, 0);
@@ -928,8 +928,7 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
 
     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
     if (r < 0) {
-        fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
-        fflush(stderr);
+        error_report("vhost VQ %d ring restore failed: %d", idx, r);
     }
     virtio_queue_set_last_avail_idx(vdev, idx, state.num);
     virtio_queue_invalidate_signalled_used(vdev, idx);
@@ -1016,8 +1015,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
     }
 
     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
-        fprintf(stderr, "vhost backend memory slots limit is less"
-                " than current number of present memory slots\n");
+        error_report("vhost backend memory slots limit is less"
+                " than current number of present memory slots");
         close((uintptr_t)opaque);
         return -1;
     }
@@ -1119,7 +1118,7 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, 
VirtIODevice *vdev)
     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
     int i, r, e;
     if (!k->set_host_notifier) {
-        fprintf(stderr, "binding does not support host notifiers\n");
+        error_report("binding does not support host notifiers");
         r = -ENOSYS;
         goto fail;
     }
@@ -1127,7 +1126,7 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, 
VirtIODevice *vdev)
     for (i = 0; i < hdev->nvqs; ++i) {
         r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
         if (r < 0) {
-            fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, 
-r);
+            error_report("vhost VQ %d notifier binding failed: %d", i, -r);
             goto fail_vq;
         }
     }
@@ -1137,8 +1136,7 @@ fail_vq:
     while (--i >= 0) {
         e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
         if (e < 0) {
-            fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
-            fflush(stderr);
+            error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
         }
         assert (e >= 0);
     }
@@ -1161,8 +1159,7 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, 
VirtIODevice *vdev)
     for (i = 0; i < hdev->nvqs; ++i) {
         r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
         if (r < 0) {
-            fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, 
-r);
-            fflush(stderr);
+            error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
         }
         assert (r >= 0);
     }
-- 
2.7.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]