qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PATCH RFC] virtio blk dataplane: aio virtio handler


From: Michael S. Tsirkin
Subject: [Qemu-block] [PATCH RFC] virtio blk dataplane: aio virtio handler
Date: Tue, 29 Mar 2016 15:14:33 +0300

So I'm fairly unhappy about the need to rework host notifiers
at this stage in release cycle.

So I thought about the root cause, and I think the main
issue is that dataplane is now reusing the regular
virtio code but through aio.

The following, then, is an attempt to work around that for 2.6
without making major changes affecting anyone except dataplace -
it's ugly but not nearly as ugly as I thought
it will turn out to be, and we can rip it out
after 2.6.

The idea is that scsi dataplane can do a similar hack.

Note: compiled only, sending out for early flames/feedback.

--->

Subject: [PATCH RFC] virtio blk dataplane: aio virtio handler

In addition to handling IO in vcpu thread and
in io thread, blk dataplane introduces yet another mode:
handling it by aio.

This reuses the same handler as previous modes,
which triggers races as these were not designed to be reentrant.

As a temporary fix, add a separate handler just for aio,
and make regular handlers do nothing when dataplane is active.

Signed-off-by: Michael S. Tsirkin <address@hidden>
---
 include/hw/virtio/virtio-blk.h  |  2 ++
 include/hw/virtio/virtio.h      |  4 ++++
 hw/block/dataplane/virtio-blk.c | 16 ++++++++++++++++
 hw/block/virtio-blk.c           | 22 +++++++++++++++-------
 hw/virtio/virtio.c              | 36 ++++++++++++++++++++++++++++++++----
 5 files changed, 69 insertions(+), 11 deletions(-)

diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index ae84d92..df517ff 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -85,4 +85,6 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, 
MultiReqBuffer *mrb);
 
 void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb);
 
+void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq);
+
 #endif
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 2b5b248..c032067 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -142,6 +142,9 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int 
queue_size,
                             void (*handle_output)(VirtIODevice *,
                                                   VirtQueue *));
 
+void virtio_set_queue_aio(VirtQueue *vq,
+                          void (*handle_output)(VirtIODevice *, VirtQueue *));
+
 void virtio_del_queue(VirtIODevice *vdev, int n);
 
 void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num);
@@ -253,6 +256,7 @@ void virtio_queue_set_host_notifier_fd_handler(VirtQueue 
*vq, bool assign,
 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
                                                 bool assign, bool set_handler);
 void virtio_queue_notify_vq(VirtQueue *vq);
+void virtio_queue_notify_aio_vq(VirtQueue *vq);
 void virtio_irq(VirtQueue *vq);
 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
 VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 36f3d2b..72dbce8 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -184,6 +184,20 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
     g_free(s);
 }
 
+static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
+                                                VirtQueue *vq)
+{
+    VirtIOBlock *s = VIRTIO_BLK(vdev);
+
+    if (!s->dataplane) {
+        return;
+    }
+
+    assert(s->dataplane_started);
+
+    virtio_blk_handle_vq(s, vq);
+}
+
 /* Context: QEMU global mutex held */
 void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
 {
@@ -226,6 +240,7 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
 
     /* Get this show started by hooking up our callbacks */
     aio_context_acquire(s->ctx);
+    virtio_set_queue_aio(s->vq, virtio_blk_data_plane_handle_output);
     virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, true, true);
     aio_context_release(s->ctx);
     return;
@@ -262,6 +277,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
 
     /* Stop notifications for new requests from guest */
     virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, false, false);
+    virtio_set_queue_aio(s->vq, NULL);
 
     /* Drain and switch bs back to the QEMU main loop */
     blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index cb710f1..5aa884d 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -577,17 +577,18 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, 
MultiReqBuffer *mrb)
     }
 }
 
-static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
 {
-    VirtIOBlock *s = VIRTIO_BLK(vdev);
     VirtIOBlockReq *req;
     MultiReqBuffer mrb = {};
 
-    /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
-     * dataplane here instead of waiting for .set_status().
-     */
-    if (s->dataplane && !s->dataplane_started) {
-        virtio_blk_data_plane_start(s->dataplane);
+    if (s->dataplane) {
+        /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
+         * dataplane here instead of waiting for .set_status().
+         */
+        if (!s->dataplane_started) {
+            virtio_blk_data_plane_start(s->dataplane);
+        }
         return;
     }
 
@@ -604,6 +605,13 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, 
VirtQueue *vq)
     blk_io_unplug(s->blk);
 }
 
+static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+    VirtIOBlock *s = VIRTIO_BLK(vdev);
+
+    virtio_blk_handle_vq(s, vq);
+}
+
 static void virtio_blk_dma_restart_bh(void *opaque)
 {
     VirtIOBlock *s = opaque;
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 08275a9..182bc56 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -94,6 +94,7 @@ struct VirtQueue
 
     uint16_t vector;
     void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
+    void (*handle_aio_output)(VirtIODevice *vdev, VirtQueue *vq);
     VirtIODevice *vdev;
     EventNotifier guest_notifier;
     EventNotifier host_notifier;
@@ -1086,6 +1087,16 @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, 
int align)
     virtio_queue_update_rings(vdev, n);
 }
 
+void virtio_queue_notify_aio_vq(VirtQueue *vq)
+{
+    if (vq->vring.desc && vq->handle_aio_output) {
+        VirtIODevice *vdev = vq->vdev;
+
+        trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
+        vq->handle_aio_output(vdev, vq);
+    }
+}
+
 void virtio_queue_notify_vq(VirtQueue *vq)
 {
     if (vq->vring.desc && vq->handle_output) {
@@ -1141,10 +1152,19 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int 
queue_size,
     vdev->vq[i].vring.num_default = queue_size;
     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
     vdev->vq[i].handle_output = handle_output;
+    vdev->vq[i].handle_aio_output = NULL;
 
     return &vdev->vq[i];
 }
 
+void virtio_set_queue_aio(VirtQueue *vq,
+                          void (*handle_output)(VirtIODevice *, VirtQueue *))
+{
+    assert(vq->handle_output);
+
+    vq->handle_aio_output = handle_output;
+}
+
 void virtio_del_queue(VirtIODevice *vdev, int n)
 {
     if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
@@ -1778,11 +1798,11 @@ EventNotifier 
*virtio_queue_get_guest_notifier(VirtQueue *vq)
     return &vq->guest_notifier;
 }
 
-static void virtio_queue_host_notifier_read(EventNotifier *n)
+static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
 {
     VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
     if (event_notifier_test_and_clear(n)) {
-        virtio_queue_notify_vq(vq);
+        virtio_queue_notify_aio_vq(vq);
     }
 }
 
@@ -1791,14 +1811,22 @@ void 
virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
 {
     if (assign && set_handler) {
         aio_set_event_notifier(ctx, &vq->host_notifier, true,
-                               virtio_queue_host_notifier_read);
+                               virtio_queue_host_notifier_aio_read);
     } else {
         aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
     }
     if (!assign) {
         /* Test and clear notifier before after disabling event,
          * in case poll callback didn't have time to run. */
-        virtio_queue_host_notifier_read(&vq->host_notifier);
+        virtio_queue_host_notifier_aio_read(&vq->host_notifier);
+    }
+}
+
+static void virtio_queue_host_notifier_read(EventNotifier *n)
+{
+    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
+    if (event_notifier_test_and_clear(n)) {
+        virtio_queue_notify_vq(vq);
     }
 }
 
-- 
MST



reply via email to

[Prev in Thread] Current Thread [Next in Thread]