qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PATCH 17/18] block: explicitly acquire aiocontext in aio c


From: Paolo Bonzini
Subject: [Qemu-block] [PATCH 17/18] block: explicitly acquire aiocontext in aio callbacks that need it
Date: Thu, 6 Aug 2015 15:36:15 +0200

todo: qed

Signed-off-by: Paolo Bonzini <address@hidden>
---
 block/archipelago.c    |  3 ---
 block/blkdebug.c       |  4 ----
 block/blkverify.c      |  9 ++++-----
 block/block-backend.c  |  4 ----
 block/curl.c           |  2 +-
 block/io.c             | 13 +++++++------
 block/linux-aio.c      |  7 ++++---
 block/mirror.c         | 12 +++++++++---
 block/null.c           |  8 --------
 block/qed.c            |  3 ---
 block/quorum.c         | 12 ++++++++++++
 block/rbd.c            |  4 ----
 block/win32-aio.c      |  2 --
 hw/block/virtio-blk.c  |  6 ++++++
 hw/scsi/scsi-disk.c    | 18 ++++++++++++++++++
 hw/scsi/scsi-generic.c | 20 +++++++++++++++++---
 thread-pool.c          | 12 +++++++++++-
 17 files changed, 89 insertions(+), 50 deletions(-)

diff --git a/block/archipelago.c b/block/archipelago.c
index 7f69a3f..855655c 100644
--- a/block/archipelago.c
+++ b/block/archipelago.c
@@ -312,12 +312,9 @@ static void qemu_archipelago_complete_aio(void *opaque)
 {
     AIORequestData *reqdata = (AIORequestData *) opaque;
     ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
-    AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
 
     qemu_bh_delete(aio_cb->bh);
-    aio_context_acquire(ctx);
     aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
-    aio_context_release(ctx);
     aio_cb->status = 0;
 
     qemu_aio_unref(aio_cb);
diff --git a/block/blkdebug.c b/block/blkdebug.c
index 2130811..bc247f4 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -458,12 +458,8 @@ out:
 static void error_callback_bh(void *opaque)
 {
     struct BlkdebugAIOCB *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
     qemu_bh_delete(acb->bh);
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, acb->ret);
-    aio_context_release(ctx);
     qemu_aio_unref(acb);
 }
 
diff --git a/block/blkverify.c b/block/blkverify.c
index 510c198..e670622 100644
--- a/block/blkverify.c
+++ b/block/blkverify.c
@@ -185,23 +185,22 @@ static BlkverifyAIOCB *blkverify_aio_get(BlockDriverState 
*bs, bool is_write,
 static void blkverify_aio_bh(void *opaque)
 {
     BlkverifyAIOCB *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
     qemu_bh_delete(acb->bh);
     if (acb->buf) {
         qemu_iovec_destroy(&acb->raw_qiov);
         qemu_vfree(acb->buf);
     }
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, acb->ret);
-    aio_context_release(ctx);
     qemu_aio_unref(acb);
 }
 
 static void blkverify_aio_cb(void *opaque, int ret)
 {
     BlkverifyAIOCB *acb = opaque;
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
+    aio_context_acquire(ctx);
     switch (++acb->done) {
     case 1:
         acb->ret = ret;
@@ -216,11 +215,11 @@ static void blkverify_aio_cb(void *opaque, int ret)
             acb->verify(acb);
         }
 
-        acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
-                             blkverify_aio_bh, acb);
+        acb->bh = aio_bh_new(ctx, blkverify_aio_bh, acb);
         qemu_bh_schedule(acb->bh);
         break;
     }
+    aio_context_release(ctx);
 }
 
 static void blkverify_verify_readv(BlkverifyAIOCB *acb)
diff --git a/block/block-backend.c b/block/block-backend.c
index 185ba32..aee8a12 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -529,12 +529,8 @@ int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
 static void error_callback_bh(void *opaque)
 {
     struct BlockBackendAIOCB *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
     qemu_bh_delete(acb->bh);
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, acb->ret);
-    aio_context_release(ctx);
     qemu_aio_unref(acb);
 }
 
diff --git a/block/curl.c b/block/curl.c
index fc70b01..0e062eb 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -713,11 +713,11 @@ static void curl_readv_bh_cb(void *p)
     curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
 
 out:
+    aio_context_release(ctx);
     if (ret != -EINPROGRESS) {
         acb->common.cb(acb->common.opaque, ret);
         qemu_aio_unref(acb);
     }
-    aio_context_release(ctx);
 }
 
 static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
diff --git a/block/io.c b/block/io.c
index 74f5705..7bfe6a1 100644
--- a/block/io.c
+++ b/block/io.c
@@ -2002,15 +2002,12 @@ static const AIOCBInfo bdrv_em_aiocb_info = {
 static void bdrv_aio_bh_cb(void *opaque)
 {
     BlockAIOCBSync *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
     if (!acb->is_write && acb->ret >= 0) {
         qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
     }
     qemu_vfree(acb->bounce);
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, acb->ret);
-    aio_context_release(ctx);
     qemu_bh_delete(acb->bh);
     acb->bh = NULL;
     qemu_aio_unref(acb);
@@ -2086,13 +2083,10 @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
 static void bdrv_co_em_bh(void *opaque)
 {
     BlockAIOCBCoroutine *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
     assert(!acb->need_bh);
     qemu_bh_delete(acb->bh);
-    aio_context_acquire(ctx);
     bdrv_co_complete(acb);
-    aio_context_release(ctx);
 }
 
 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
@@ -2243,15 +2237,19 @@ void qemu_aio_unref(void *p)
 
 typedef struct CoroutineIOCompletion {
     Coroutine *coroutine;
+    AioContext *ctx;
     int ret;
 } CoroutineIOCompletion;
 
 static void bdrv_co_io_em_complete(void *opaque, int ret)
 {
     CoroutineIOCompletion *co = opaque;
+    AioContext *ctx = co->ctx;
 
     co->ret = ret;
+    aio_context_acquire(ctx);
     qemu_coroutine_enter(co->coroutine, NULL);
+    aio_context_release(ctx);
 }
 
 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
@@ -2260,6 +2258,7 @@ static int coroutine_fn bdrv_co_io_em(BlockDriverState 
*bs, int64_t sector_num,
 {
     CoroutineIOCompletion co = {
         .coroutine = qemu_coroutine_self(),
+        .ctx = bdrv_get_aio_context(bs),
     };
     BlockAIOCB *acb;
 
@@ -2331,6 +2330,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
         BlockAIOCB *acb;
         CoroutineIOCompletion co = {
             .coroutine = qemu_coroutine_self(),
+            .ctx = bdrv_get_aio_context(bs),
         };
 
         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
@@ -2455,6 +2455,7 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, 
int64_t sector_num,
             BlockAIOCB *acb;
             CoroutineIOCompletion co = {
                 .coroutine = qemu_coroutine_self(),
+                .ctx = bdrv_get_aio_context(bs),
             };
 
             acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
diff --git a/block/linux-aio.c b/block/linux-aio.c
index bc83e5c..4f21918 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -71,8 +71,7 @@ static inline ssize_t io_event_ret(struct io_event *ev)
 /*
  * Completes an AIO request (calls the callback and frees the ACB).
  */
-static void qemu_laio_process_completion(struct qemu_laio_state *s,
-    struct qemu_laiocb *laiocb)
+static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
 {
     int ret;
 
@@ -138,7 +137,9 @@ static void qemu_laio_completion_bh(void *opaque)
         laiocb->ret = io_event_ret(&s->events[s->event_idx]);
         s->event_idx++;
 
-        qemu_laio_process_completion(s, laiocb);
+        aio_context_release(s->aio_context);
+        qemu_laio_process_completion(laiocb);
+        aio_context_acquire(s->aio_context);
     }
 
     if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
diff --git a/block/mirror.c b/block/mirror.c
index fc4d8f5..923732c 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -127,6 +127,8 @@ static void mirror_write_complete(void *opaque, int ret)
 {
     MirrorOp *op = opaque;
     MirrorBlockJob *s = op->s;
+
+    aio_context_acquire(bdrv_get_aio_context(s->common.bs));
     if (ret < 0) {
         BlockErrorAction action;
 
@@ -137,12 +139,15 @@ static void mirror_write_complete(void *opaque, int ret)
         }
     }
     mirror_iteration_done(op, ret);
+    aio_context_release(bdrv_get_aio_context(s->common.bs));
 }
 
 static void mirror_read_complete(void *opaque, int ret)
 {
     MirrorOp *op = opaque;
     MirrorBlockJob *s = op->s;
+
+    aio_context_acquire(bdrv_get_aio_context(s->common.bs));
     if (ret < 0) {
         BlockErrorAction action;
 
@@ -153,10 +158,11 @@ static void mirror_read_complete(void *opaque, int ret)
         }
 
         mirror_iteration_done(op, ret);
-        return;
+    } else {
+        bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
+                        mirror_write_complete, op);
     }
-    bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
-                    mirror_write_complete, op);
+    aio_context_release(bdrv_get_aio_context(s->common.bs));
 }
 
 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
diff --git a/block/null.c b/block/null.c
index 9bddc1b..7d08323 100644
--- a/block/null.c
+++ b/block/null.c
@@ -117,11 +117,7 @@ static const AIOCBInfo null_aiocb_info = {
 static void null_bh_cb(void *opaque)
 {
     NullAIOCB *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, 0);
-    aio_context_release(ctx);
     qemu_bh_delete(acb->bh);
     qemu_aio_unref(acb);
 }
@@ -129,11 +125,7 @@ static void null_bh_cb(void *opaque)
 static void null_timer_cb(void *opaque)
 {
     NullAIOCB *acb = opaque;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, 0);
-    aio_context_release(ctx);
     timer_deinit(&acb->timer);
     qemu_aio_unref(acb);
 }
diff --git a/block/qed.c b/block/qed.c
index adc9b73..9b2c786 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -912,15 +912,12 @@ static void qed_aio_complete_bh(void *opaque)
     BlockCompletionFunc *cb = acb->common.cb;
     void *user_opaque = acb->common.opaque;
     int ret = acb->bh_ret;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
     qemu_bh_delete(acb->bh);
     qemu_aio_unref(acb);
 
     /* Invoke callback */
-    aio_context_acquire(ctx);
     cb(user_opaque, ret);
-    aio_context_release(ctx);
 }
 
 static void qed_aio_complete(QEDAIOCB *acb, int ret)
diff --git a/block/quorum.c b/block/quorum.c
index 026528e..7c06080 100644
--- a/block/quorum.c
+++ b/block/quorum.c
@@ -149,6 +149,7 @@ static AIOCBInfo quorum_aiocb_info = {
     .cancel_async       = quorum_aio_cancel,
 };
 
+/* Called _without_ acquiring AioContext.  */
 static void quorum_aio_finalize(QuorumAIOCB *acb)
 {
     int i, ret = 0;
@@ -276,12 +277,15 @@ static void quorum_fifo_aio_cb(void *opaque, int ret)
     QuorumChildRequest *sacb = opaque;
     QuorumAIOCB *acb = sacb->parent;
     BDRVQuorumState *s = acb->common.bs->opaque;
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
 
+    aio_context_acquire(ctx);
     assert(acb->is_read && s->read_pattern == QUORUM_READ_PATTERN_FIFO);
 
     /* We try to read next child in FIFO order if we fail to read */
     if (ret < 0 && ++acb->child_iter < s->num_children) {
         read_fifo_child(acb);
+        aio_context_release(ctx);
         return;
     }
 
@@ -292,6 +296,7 @@ static void quorum_fifo_aio_cb(void *opaque, int ret)
 
     /* FIXME: rewrite failed children if acb->child_iter > 0? */
 
+    aio_context_release(ctx);
     quorum_aio_finalize(acb);
 }
 
@@ -300,8 +305,11 @@ static void quorum_aio_cb(void *opaque, int ret)
     QuorumChildRequest *sacb = opaque;
     QuorumAIOCB *acb = sacb->parent;
     BDRVQuorumState *s = acb->common.bs->opaque;
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
     bool rewrite = false;
 
+    aio_context_acquire(ctx);
+
     sacb->ret = ret;
     acb->count++;
     if (ret == 0) {
@@ -311,7 +319,9 @@ static void quorum_aio_cb(void *opaque, int ret)
     }
     assert(acb->count <= s->num_children);
     assert(acb->success_count <= s->num_children);
+
     if (acb->count < s->num_children) {
+        aio_context_release(ctx);
         return;
     }
 
@@ -322,6 +332,8 @@ static void quorum_aio_cb(void *opaque, int ret)
         quorum_has_too_much_io_failed(acb);
     }
 
+    aio_context_release(ctx);
+
     /* if no rewrite is done the code will finish right away */
     if (!rewrite) {
         quorum_aio_finalize(acb);
diff --git a/block/rbd.c b/block/rbd.c
index 6206dc3..a60a19d 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -376,7 +376,6 @@ static int qemu_rbd_create(const char *filename, QemuOpts 
*opts, Error **errp)
 static void qemu_rbd_complete_aio(RADOSCB *rcb)
 {
     RBDAIOCB *acb = rcb->acb;
-    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
     int64_t r;
 
     r = rcb->ret;
@@ -409,10 +408,7 @@ static void qemu_rbd_complete_aio(RADOSCB *rcb)
         qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
     }
     qemu_vfree(acb->bounce);
-
-    aio_context_acquire(ctx);
     acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
-    aio_context_release(ctx);
 
     qemu_aio_unref(acb);
 }
diff --git a/block/win32-aio.c b/block/win32-aio.c
index a78d149..33f7bcf 100644
--- a/block/win32-aio.c
+++ b/block/win32-aio.c
@@ -87,9 +87,7 @@ static void win32_aio_process_completion(QEMUWin32AIOState *s,
     }
 
 
-    aio_context_acquire(s->aio_ctx);
     waiocb->common.cb(waiocb->common.opaque, ret);
-    aio_context_release(s->aio_ctx);
     qemu_aio_unref(waiocb);
 }
 
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 4462ad2..fd018a3 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -87,7 +87,9 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, 
int error,
 static void virtio_blk_rw_complete(void *opaque, int ret)
 {
     VirtIOBlockReq *next = opaque;
+    VirtIOBlock *s = next->dev;
 
+    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
     while (next) {
         VirtIOBlockReq *req = next;
         next = req->mr_next;
@@ -120,12 +122,15 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
         block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
         virtio_blk_free_request(req);
     }
+    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
 }
 
 static void virtio_blk_flush_complete(void *opaque, int ret)
 {
     VirtIOBlockReq *req = opaque;
+    VirtIOBlock *s = req->dev;
 
+    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
     if (ret) {
         if (virtio_blk_handle_rw_error(req, -ret, 0)) {
             return;
@@ -135,6 +140,7 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
     block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
     virtio_blk_free_request(req);
+    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
 }
 
 #ifdef __linux__
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index bada9a7..fb82d20 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -169,6 +169,8 @@ static void scsi_aio_complete(void *opaque, int ret)
 
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
     if (r->req.io_canceled) {
         scsi_req_cancel_complete(&r->req);
@@ -184,6 +186,7 @@ static void scsi_aio_complete(void *opaque, int ret)
     scsi_req_complete(&r->req, GOOD);
 
 done:
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
     scsi_req_unref(&r->req);
 }
 
@@ -273,8 +276,10 @@ static void scsi_dma_complete(void *opaque, int ret)
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
     scsi_dma_complete_noio(r, ret);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 static void scsi_read_complete(void * opaque, int ret)
@@ -285,6 +290,8 @@ static void scsi_read_complete(void * opaque, int ret)
 
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
     if (r->req.io_canceled) {
         scsi_req_cancel_complete(&r->req);
@@ -306,6 +313,7 @@ static void scsi_read_complete(void * opaque, int ret)
 
 done:
     scsi_req_unref(&r->req);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 /* Actually issue a read to the block device.  */
@@ -355,8 +363,10 @@ static void scsi_do_read_cb(void *opaque, int ret)
     assert (r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
     scsi_do_read(opaque, ret);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 /* Read more data from scsi device into buffer.  */
@@ -481,8 +491,10 @@ static void scsi_write_complete(void * opaque, int ret)
     assert (r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
     scsi_write_complete_noio(r, ret);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 static void scsi_write_data(SCSIRequest *req)
@@ -1625,11 +1637,14 @@ static void scsi_unmap_complete(void *opaque, int ret)
 {
     UnmapCBData *data = opaque;
     SCSIDiskReq *r = data->r;
+    SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
 
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     scsi_unmap_complete_noio(data, ret);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
@@ -1696,6 +1711,8 @@ static void scsi_write_same_complete(void *opaque, int 
ret)
 
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
     block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
     if (r->req.io_canceled) {
         scsi_req_cancel_complete(&r->req);
@@ -1730,6 +1747,7 @@ done:
     scsi_req_unref(&r->req);
     qemu_vfree(data->iov.iov_base);
     g_free(data);
+    aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
 }
 
 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index 1b6350b..503ea18 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -145,10 +145,14 @@ done:
 static void scsi_command_complete(void *opaque, int ret)
 {
     SCSIGenericReq *r = (SCSIGenericReq *)opaque;
+    SCSIDevice *s = r->req.dev;
 
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
+
+    aio_context_acquire(blk_get_aio_context(s->conf.blk));
     scsi_command_complete_noio(r, ret);
+    aio_context_release(blk_get_aio_context(s->conf.blk));
 }
 
 static int execute_command(BlockBackend *blk,
@@ -184,9 +188,11 @@ static void scsi_read_complete(void * opaque, int ret)
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
     if (ret || r->req.io_canceled) {
         scsi_command_complete_noio(r, ret);
-        return;
+        goto done;
     }
 
     len = r->io_header.dxfer_len - r->io_header.resid;
@@ -195,7 +201,7 @@ static void scsi_read_complete(void * opaque, int ret)
     r->len = -1;
     if (len == 0) {
         scsi_command_complete_noio(r, 0);
-        return;
+        goto done;
     }
 
     /* Snoop READ CAPACITY output to set the blocksize.  */
@@ -212,6 +218,9 @@ static void scsi_read_complete(void * opaque, int ret)
 
     scsi_req_data(&r->req, len);
     scsi_req_unref(&r->req);
+
+done:
+    aio_context_release(blk_get_aio_context(s->conf.blk));
 }
 
 /* Read more data from scsi device into buffer.  */
@@ -247,9 +256,11 @@ static void scsi_write_complete(void * opaque, int ret)
     assert(r->req.aiocb != NULL);
     r->req.aiocb = NULL;
 
+    aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
     if (ret || r->req.io_canceled) {
         scsi_command_complete_noio(r, ret);
-        return;
+        goto done;
     }
 
     if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
@@ -259,6 +270,9 @@ static void scsi_write_complete(void * opaque, int ret)
     }
 
     scsi_command_complete_noio(r, ret);
+
+done:
+    aio_context_release(blk_get_aio_context(s->conf.blk));
 }
 
 /* Write data to a scsi device.  Returns nonzero on failure.
diff --git a/thread-pool.c b/thread-pool.c
index 1039188..9e6d5c9 100644
--- a/thread-pool.c
+++ b/thread-pool.c
@@ -185,7 +185,9 @@ restart:
              */
             qemu_bh_schedule(pool->completion_bh);
 
+            aio_context_release(pool->ctx);
             elem->common.cb(elem->common.opaque, elem->ret);
+            aio_context_acquire(pool->ctx);
             qemu_aio_unref(elem);
             goto restart;
         } else {
@@ -261,21 +263,29 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
 
 typedef struct ThreadPoolCo {
     Coroutine *co;
+    AioContext *ctx;
     int ret;
 } ThreadPoolCo;
 
 static void thread_pool_co_cb(void *opaque, int ret)
 {
     ThreadPoolCo *co = opaque;
+    AioContext *ctx = co->ctx;
 
     co->ret = ret;
+    aio_context_acquire(ctx);
     qemu_coroutine_enter(co->co, NULL);
+    aio_context_release(ctx);
 }
 
 int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func,
                                        void *arg)
 {
-    ThreadPoolCo tpc = { .co = qemu_coroutine_self(), .ret = -EINPROGRESS };
+    ThreadPoolCo tpc = {
+        .co = qemu_coroutine_self(),
+        .ctx = pool->ctx,
+        .ret = -EINPROGRESS
+    };
     assert(qemu_in_coroutine());
     thread_pool_submit_aio(pool, func, arg, thread_pool_co_cb, &tpc);
     qemu_coroutine_yield();
-- 
2.4.3





reply via email to

[Prev in Thread] Current Thread [Next in Thread]