qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 10/13] block/linux-aio.c: prepare for elastical reso


From: Ming Lei
Subject: [Qemu-devel] [PATCH 10/13] block/linux-aio.c: prepare for elastical resource's allocation
Date: Sun, 9 Nov 2014 15:42:55 +0800

Because we will support AioContext wide IO submission as batch,
so IO resources should be allocated according to how many there
are backends in the same AioContext.

Signed-off-by: Ming Lei <address@hidden>
---
 block/linux-aio.c |   38 +++++++++++++++++++++++++++++++-------
 1 file changed, 31 insertions(+), 7 deletions(-)

diff --git a/block/linux-aio.c b/block/linux-aio.c
index e3e0532..e219b80 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -43,7 +43,7 @@ struct qemu_laiocb {
  * so in future the IO queue should be per AIO context.
  */
 typedef struct {
-    struct iocb *iocbs[MAX_QUEUED_IO];
+    struct iocb **iocbs;
     int plugged;
     unsigned int size;
     unsigned int idx;
@@ -66,6 +66,7 @@ struct qemu_laio_state {
     LaioQueue *io_q;
 
     /* I/O completion processing */
+    int nr_evt;
     QEMUBH *completion_bh;
     struct io_event *events;
     int event_idx;
@@ -73,6 +74,7 @@ struct qemu_laio_state {
 
     /* All BS in the list shared this 'qemu_laio_state' */
     QLIST_HEAD(, LaioTrackedBs) tracked_bs;
+    int nr_bs;
     AioContext *aio_context;
 };
 
@@ -131,7 +133,7 @@ static void qemu_laio_completion_bh(void *opaque)
     if (s->event_idx == s->event_max) {
         do {
             struct timespec ts = { 0 };
-            s->event_max = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS,
+            s->event_max = io_getevents(s->ctx, s->nr_evt, s->nr_evt,
                                         s->events, &ts);
         } while (s->event_max == -EINTR);
 
@@ -201,11 +203,13 @@ static const AIOCBInfo laio_aiocb_info = {
     .cancel_async       = laio_cancel,
 };
 
-static void ioq_init(LaioQueue *io_q)
+static void ioq_init(LaioQueue *io_q, unsigned size)
 {
-    io_q->size = MAX_QUEUED_IO;
+    io_q->size = size;
     io_q->idx = 0;
     io_q->plugged = 0;
+
+    io_q->iocbs = g_malloc(sizeof(*io_q->iocbs) * size);
 }
 
 static void abort_queue(struct qemu_laio_state *s)
@@ -385,16 +389,34 @@ static int laio_alloc_resources(AioContext *ctx,
         struct qemu_laio_state *s)
 {
     LaioQueue *ioq;
+    unsigned nr = s->nr_bs ? : 1;
+
+    /* return immediately if resources are allocated already */
+    if (likely(s->io_q)) {
+        return 0;
+    }
+
+    if (nr > 10) {
+        nr = 10;
+    }
+
+    while (nr > 0) {
+        if (io_setup(MAX_EVENTS * nr, &s->ctx) == 0) {
+            break;
+        }
+        nr--;
+    }
 
-    if (io_setup(MAX_EVENTS, &s->ctx) != 0) {
+    if (nr == 0) {
         return -1;
     }
 
+    s->nr_evt = nr * MAX_EVENTS;
     ioq = g_malloc0(sizeof(*s->io_q));
-    ioq_init(ioq);
+    ioq_init(ioq, MAX_QUEUED_IO * nr);
     ioq->retry = aio_bh_new(ctx, ioq_submit_retry, s);
 
-    s->events = g_malloc(sizeof(*s->events) * MAX_EVENTS);
+    s->events = g_malloc(sizeof(*s->events) * s->nr_evt);
     s->io_q = ioq;
 
     s->completion_bh = aio_bh_new(ctx, qemu_laio_completion_bh, s);
@@ -464,6 +486,7 @@ void laio_detach_aio_context(void *s_, BlockDriverState *bs,
         }
     }
 
+    qs->state->nr_bs--;
     if (!aio_detach_aio_bs(old_context, bs)) {
         /* assign new master aio bs for the aio context */
         if (old_context->master_aio_bs == bs) {
@@ -494,6 +517,7 @@ void laio_attach_aio_context(void *s_, BlockDriverState *bs,
 
     tbs->bs = bs;
     QLIST_INSERT_HEAD(&qs->state->tracked_bs, tbs, list);
+    qs->state->nr_bs++;
 }
 
 void *laio_init(void)
-- 
1.7.9.5




reply via email to

[Prev in Thread] Current Thread [Next in Thread]