qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 32/40] block: explicitly acquire aiocontext in callb


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 32/40] block: explicitly acquire aiocontext in callbacks that need it
Date: Tue, 24 Nov 2015 19:01:23 +0100

Signed-off-by: Paolo Bonzini <address@hidden>
---
 aio-posix.c                     |  4 ----
 aio-win32.c                     |  6 ------
 block/curl.c                    | 16 +++++++++++---
 block/iscsi.c                   |  4 ++++
 block/nbd-client.c              | 14 ++++++++++--
 block/nfs.c                     |  6 ++++++
 block/sheepdog.c                | 29 +++++++++++++++----------
 block/ssh.c                     | 47 ++++++++++++++++++++---------------------
 block/win32-aio.c               | 10 +++++----
 hw/block/virtio-blk.c           |  5 ++++-
 hw/scsi/virtio-scsi-dataplane.c |  2 ++
 hw/scsi/virtio-scsi.c           |  7 ++++++
 nbd.c                           |  4 ++++
 13 files changed, 99 insertions(+), 55 deletions(-)

diff --git a/aio-posix.c b/aio-posix.c
index 2b41a02..972f3ff 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -333,9 +333,7 @@ bool aio_dispatch(AioContext *ctx)
         if (!node->deleted &&
             (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
             node->io_read) {
-            aio_context_acquire(ctx);
             node->io_read(node->opaque);
-            aio_context_release(ctx);
 
             /* aio_notify() does not count as progress */
             if (node->opaque != &ctx->notifier) {
@@ -345,9 +343,7 @@ bool aio_dispatch(AioContext *ctx)
         if (!node->deleted &&
             (revents & (G_IO_OUT | G_IO_ERR)) &&
             node->io_write) {
-            aio_context_acquire(ctx);
             node->io_write(node->opaque);
-            aio_context_release(ctx);
             progress = true;
         }
 
diff --git a/aio-win32.c b/aio-win32.c
index b025b3d..1b50019 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -244,9 +244,7 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE 
event)
         if (!node->deleted &&
             (revents || event_notifier_get_handle(node->e) == event) &&
             node->io_notify) {
-            aio_context_acquire(ctx);
             node->io_notify(node->e);
-            aio_context_release(ctx);
 
             /* aio_notify() does not count as progress */
             if (node->e != &ctx->notifier) {
@@ -257,15 +255,11 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE 
event)
         if (!node->deleted &&
             (node->io_read || node->io_write)) {
             if ((revents & G_IO_IN) && node->io_read) {
-                aio_context_acquire(ctx);
                 node->io_read(node->opaque);
-                aio_context_release(ctx);
                 progress = true;
             }
             if ((revents & G_IO_OUT) && node->io_write) {
-                aio_context_acquire(ctx);
                 node->io_write(node->opaque);
-                aio_context_release(ctx);
                 progress = true;
             }
 
diff --git a/block/curl.c b/block/curl.c
index 8994182..3d7e1cb 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -332,9 +332,8 @@ static void curl_multi_check_completion(BDRVCURLState *s)
     }
 }
 
-static void curl_multi_do(void *arg)
+static void curl_multi_do_locked(CURLState *s)
 {
-    CURLState *s = (CURLState *)arg;
     int running;
     int r;
 
@@ -348,12 +347,23 @@ static void curl_multi_do(void *arg)
 
 }
 
+static void curl_multi_do(void *arg)
+{
+    CURLState *s = (CURLState *)arg;
+
+    aio_context_acquire(s->s->aio_context);
+    curl_multi_do_locked(s);
+    aio_context_release(s->s->aio_context);
+}
+
 static void curl_multi_read(void *arg)
 {
     CURLState *s = (CURLState *)arg;
 
-    curl_multi_do(arg);
+    aio_context_acquire(s->s->aio_context);
+    curl_multi_do_locked(s);
     curl_multi_check_completion(s->s);
+    aio_context_release(s->s->aio_context);
 }
 
 static void curl_multi_timeout_do(void *arg)
diff --git a/block/iscsi.c b/block/iscsi.c
index bd1f1bf..16c3b44 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -380,8 +380,10 @@ iscsi_process_read(void *arg)
     IscsiLun *iscsilun = arg;
     struct iscsi_context *iscsi = iscsilun->iscsi;
 
+    aio_context_acquire(iscsilun->aio_context);
     iscsi_service(iscsi, POLLIN);
     iscsi_set_events(iscsilun);
+    aio_context_release(iscsilun->aio_context);
 }
 
 static void
@@ -390,8 +392,10 @@ iscsi_process_write(void *arg)
     IscsiLun *iscsilun = arg;
     struct iscsi_context *iscsi = iscsilun->iscsi;
 
+    aio_context_acquire(iscsilun->aio_context);
     iscsi_service(iscsi, POLLOUT);
     iscsi_set_events(iscsilun);
+    aio_context_release(iscsilun->aio_context);
 }
 
 static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun)
diff --git a/block/nbd-client.c b/block/nbd-client.c
index b7fd17a..b0a888d 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -56,9 +56,8 @@ static void nbd_teardown_connection(BlockDriverState *bs)
     client->sock = -1;
 }
 
-static void nbd_reply_ready(void *opaque)
+static void nbd_reply_ready_locked(BlockDriverState *bs)
 {
-    BlockDriverState *bs = opaque;
     NbdClientSession *s = nbd_get_client_session(bs);
     uint64_t i;
     int ret;
@@ -95,11 +94,22 @@ fail:
     nbd_teardown_connection(bs);
 }
 
+static void nbd_reply_ready(void *opaque)
+{
+    BlockDriverState *bs = opaque;
+
+    aio_context_acquire(bdrv_get_aio_context(bs));
+    nbd_reply_ready_locked(bs);
+    aio_context_release(bdrv_get_aio_context(bs));
+}
+
 static void nbd_restart_write(void *opaque)
 {
     BlockDriverState *bs = opaque;
 
+    aio_context_acquire(bdrv_get_aio_context(bs));
     qemu_coroutine_enter(nbd_get_client_session(bs)->send_coroutine, NULL);
+    aio_context_release(bdrv_get_aio_context(bs));
 }
 
 static int nbd_co_send_request(BlockDriverState *bs,
diff --git a/block/nfs.c b/block/nfs.c
index fd79f89..910a51e 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -75,15 +75,21 @@ static void nfs_set_events(NFSClient *client)
 static void nfs_process_read(void *arg)
 {
     NFSClient *client = arg;
+
+    aio_context_acquire(client->aio_context);
     nfs_service(client->context, POLLIN);
     nfs_set_events(client);
+    aio_context_release(client->aio_context);
 }
 
 static void nfs_process_write(void *arg)
 {
     NFSClient *client = arg;
+
+    aio_context_acquire(client->aio_context);
     nfs_service(client->context, POLLOUT);
     nfs_set_events(client);
+    aio_context_release(client->aio_context);
 }
 
 static void nfs_co_init_task(NFSClient *client, NFSRPC *task)
diff --git a/block/sheepdog.c b/block/sheepdog.c
index d80e4ed..1113043 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -621,13 +621,6 @@ static coroutine_fn int send_co_req(int sockfd, 
SheepdogReq *hdr, void *data,
     return ret;
 }
 
-static void restart_co_req(void *opaque)
-{
-    Coroutine *co = opaque;
-
-    qemu_coroutine_enter(co, NULL);
-}
-
 typedef struct SheepdogReqCo {
     int sockfd;
     AioContext *aio_context;
@@ -637,12 +630,21 @@ typedef struct SheepdogReqCo {
     unsigned int *rlen;
     int ret;
     bool finished;
+    Coroutine *co;
 } SheepdogReqCo;
 
+static void restart_co_req(void *opaque)
+{
+    SheepdogReqCo *srco = opaque;
+
+    aio_context_acquire(srco->aio_context);
+    qemu_coroutine_enter(srco->co, NULL);
+    aio_context_release(srco->aio_context);
+}
+
 static coroutine_fn void do_co_req(void *opaque)
 {
     int ret;
-    Coroutine *co;
     SheepdogReqCo *srco = opaque;
     int sockfd = srco->sockfd;
     SheepdogReq *hdr = srco->hdr;
@@ -650,9 +652,9 @@ static coroutine_fn void do_co_req(void *opaque)
     unsigned int *wlen = srco->wlen;
     unsigned int *rlen = srco->rlen;
 
-    co = qemu_coroutine_self();
+    srco->co = qemu_coroutine_self();
     aio_set_fd_handler(srco->aio_context, sockfd, false,
-                       NULL, restart_co_req, co);
+                       NULL, restart_co_req, srco);
 
     ret = send_co_req(sockfd, hdr, data, wlen);
     if (ret < 0) {
@@ -660,7 +662,7 @@ static coroutine_fn void do_co_req(void *opaque)
     }
 
     aio_set_fd_handler(srco->aio_context, sockfd, false,
-                       restart_co_req, NULL, co);
+                       restart_co_req, NULL, srco);
 
     ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
     if (ret != sizeof(*hdr)) {
@@ -688,6 +690,7 @@ out:
     aio_set_fd_handler(srco->aio_context, sockfd, false,
                        NULL, NULL, NULL);
 
+    srco->co = NULL;
     srco->ret = ret;
     srco->finished = true;
 }
@@ -917,14 +920,18 @@ static void co_read_response(void *opaque)
         s->co_recv = qemu_coroutine_create(aio_read_response);
     }
 
+    aio_context_acquire(s->aio_context);
     qemu_coroutine_enter(s->co_recv, opaque);
+    aio_context_release(s->aio_context);
 }
 
 static void co_write_request(void *opaque)
 {
     BDRVSheepdogState *s = opaque;
 
+    aio_context_acquire(s->aio_context);
     qemu_coroutine_enter(s->co_send, NULL);
+    aio_context_release(s->aio_context);
 }
 
 /*
diff --git a/block/ssh.c b/block/ssh.c
index af025c0..00cda3f 100644
--- a/block/ssh.c
+++ b/block/ssh.c
@@ -772,20 +772,34 @@ static int ssh_has_zero_init(BlockDriverState *bs)
     return has_zero_init;
 }
 
+typedef struct BDRVSSHRestart {
+    Coroutine *co;
+    AioContext *ctx;
+} BDRVSSHRestart;
+
 static void restart_coroutine(void *opaque)
 {
-    Coroutine *co = opaque;
+    BDRVSSHRestart *restart = opaque;
 
-    DPRINTF("co=%p", co);
+    DPRINTF("ctx=%p co=%p", restart->ctx, restart->co);
 
-    qemu_coroutine_enter(co, NULL);
+    aio_context_acquire(restart->ctx);
+    qemu_coroutine_enter(restart->co, NULL);
+    aio_context_release(restart->ctx);
 }
 
-static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs)
+/* A non-blocking call returned EAGAIN, so yield, ensuring the
+ * handlers are set up so that we'll be rescheduled when there is an
+ * interesting event on the socket.
+ */
+static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
 {
     int r;
     IOHandler *rd_handler = NULL, *wr_handler = NULL;
-    Coroutine *co = qemu_coroutine_self();
+    BDRVSSHRestart restart = {
+        .ctx = bdrv_get_aio_context(bs),
+        .co = qemu_coroutine_self()
+    };
 
     r = libssh2_session_block_directions(s->session);
 
@@ -800,26 +814,11 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s, 
BlockDriverState *bs)
             rd_handler, wr_handler);
 
     aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
-                       false, rd_handler, wr_handler, co);
-}
-
-static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
-                                          BlockDriverState *bs)
-{
-    DPRINTF("s->sock=%d", s->sock);
-    aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
-                       false, NULL, NULL, NULL);
-}
-
-/* A non-blocking call returned EAGAIN, so yield, ensuring the
- * handlers are set up so that we'll be rescheduled when there is an
- * interesting event on the socket.
- */
-static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
-{
-    set_fd_handler(s, bs);
+                       false, rd_handler, wr_handler, &restart);
     qemu_coroutine_yield();
-    clear_fd_handler(s, bs);
+    DPRINTF("s->sock=%d - back", s->sock);
+    aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, false,
+                       NULL, NULL, NULL);
 }
 
 /* SFTP has a function `libssh2_sftp_seek64' which seeks to a position
diff --git a/block/win32-aio.c b/block/win32-aio.c
index bbf2f01..85aac85 100644
--- a/block/win32-aio.c
+++ b/block/win32-aio.c
@@ -40,7 +40,7 @@ struct QEMUWin32AIOState {
     HANDLE hIOCP;
     EventNotifier e;
     int count;
-    bool is_aio_context_attached;
+    AioContext *aio_ctx;
 };
 
 typedef struct QEMUWin32AIOCB {
@@ -87,7 +87,9 @@ static void win32_aio_process_completion(QEMUWin32AIOState *s,
     }
 
 
+    aio_context_acquire(s->aio_ctx);
     waiocb->common.cb(waiocb->common.opaque, ret);
+    aio_context_release(s->aio_ctx);
     qemu_aio_unref(waiocb);
 }
 
@@ -175,13 +177,13 @@ void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
                                   AioContext *old_context)
 {
     aio_set_event_notifier(old_context, &aio->e, false, NULL);
-    aio->is_aio_context_attached = false;
+    aio->aio_ctx = NULL;
 }
 
 void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
                                   AioContext *new_context)
 {
-    aio->is_aio_context_attached = true;
+    aio->aio_ctx = new_context;
     aio_set_event_notifier(new_context, &aio->e, false,
                            win32_aio_completion_cb);
 }
@@ -211,7 +213,7 @@ out_free_state:
 
 void win32_aio_cleanup(QEMUWin32AIOState *aio)
 {
-    assert(!aio->is_aio_context_attached);
+    assert(!aio->aio_ctx);
     CloseHandle(aio->hIOCP);
     event_notifier_cleanup(&aio->e);
     g_free(aio);
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index e83d823..d72942e 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -147,7 +147,8 @@ static void virtio_blk_ioctl_complete(void *opaque, int 
status)
 {
     VirtIOBlockIoctlReq *ioctl_req = opaque;
     VirtIOBlockReq *req = ioctl_req->req;
-    VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
+    VirtIOBlock *s = req->dev;
+    VirtIODevice *vdev = VIRTIO_DEVICE(s);
     struct virtio_scsi_inhdr *scsi;
     struct sg_io_hdr *hdr;
 
@@ -599,6 +600,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, 
VirtQueue *vq)
         return;
     }
 
+    aio_context_acquire(blk_get_aio_context(s->blk));
     blk_io_plug(s->blk);
 
     while ((req = virtio_blk_get_request(s))) {
@@ -610,6 +612,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, 
VirtQueue *vq)
     }
 
     blk_io_unplug(s->blk);
+    aio_context_release(blk_get_aio_context(s->blk));
 }
 
 static void virtio_blk_dma_restart_bh(void *opaque)
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index b1745b2..194ce40 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -59,9 +59,11 @@ static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue 
*vq, int n)
 
 void virtio_scsi_dataplane_notify(VirtIODevice *vdev, VirtIOSCSIReq *req)
 {
+    VirtIOSCSI *s = VIRTIO_SCSI(vdev);
     if (virtio_should_notify(vdev, req->vq)) {
         event_notifier_set(virtio_queue_get_guest_notifier(req->vq));
     }
+    aio_context_release(s->ctx);
 }
 
 /* assumes s->ctx held */
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 4054ce5..8afa489 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -420,9 +420,11 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, 
VirtQueue *vq)
         virtio_scsi_dataplane_start(s);
         return;
     }
+    aio_context_acquire(s->ctx);
     while ((req = virtio_scsi_pop_req(s, vq))) {
         virtio_scsi_handle_ctrl_req(s, req);
     }
+    aio_context_release(s->ctx);
 }
 
 static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
@@ -570,6 +572,8 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, 
VirtQueue *vq)
         virtio_scsi_dataplane_start(s);
         return;
     }
+
+    aio_context_acquire(s->ctx);
     while ((req = virtio_scsi_pop_req(s, vq))) {
         if (virtio_scsi_handle_cmd_req_prepare(s, req)) {
             QTAILQ_INSERT_TAIL(&reqs, req, next);
@@ -579,6 +583,7 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, 
VirtQueue *vq)
     QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
         virtio_scsi_handle_cmd_req_submit(s, req);
     }
+    aio_context_release(s->ctx);
 }
 
 static void virtio_scsi_get_config(VirtIODevice *vdev,
@@ -732,9 +737,11 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, 
VirtQueue *vq)
         virtio_scsi_dataplane_start(s);
         return;
     }
+    aio_context_acquire(s->ctx);
     if (s->events_dropped) {
         virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
     }
+    aio_context_release(s->ctx);
 }
 
 static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
diff --git a/nbd.c b/nbd.c
index b3d9654..2867f34 100644
--- a/nbd.c
+++ b/nbd.c
@@ -1445,6 +1445,10 @@ static void nbd_restart_write(void *opaque)
 static void nbd_set_handlers(NBDClient *client)
 {
     if (client->exp && client->exp->ctx) {
+        /* Note that the handlers do not expect any concurrency; qemu-nbd
+         * does not instantiate multiple AioContexts yet, nor does it call
+         * aio_poll/aio_dispatch from multiple threads.
+         */
         aio_set_fd_handler(client->exp->ctx, client->sock,
                            true,
                            client->can_read ? nbd_read : NULL,
-- 
1.8.3.1





reply via email to

[Prev in Thread] Current Thread [Next in Thread]