qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v9 3/4] block: add the block throttling algorithm


From: Zhi Yong Wu
Subject: [Qemu-devel] [PATCH v9 3/4] block: add the block throttling algorithm
Date: Fri, 28 Oct 2011 18:02:22 +0800

Signed-off-by: Zhi Yong Wu <address@hidden>
---
 block.c |  360 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 348 insertions(+), 12 deletions(-)

diff --git a/block.c b/block.c
index 8f92950..2d85b64 100644
--- a/block.c
+++ b/block.c
@@ -63,9 +63,11 @@ static int coroutine_fn bdrv_co_writev_em(BlockDriverState 
*bs,
                                          int64_t sector_num, int nb_sectors,
                                          QEMUIOVector *iov);
 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
-    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+    void *opaque, BlockAPIType co_type);
 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
-    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+    void *opaque, BlockAPIType co_type);
 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
                                                int64_t sector_num,
                                                QEMUIOVector *qiov,
@@ -75,6 +77,13 @@ static BlockDriverAIOCB 
*bdrv_co_aio_rw_vector(BlockDriverState *bs,
                                                bool is_write);
 static void coroutine_fn bdrv_co_do_rw(void *opaque);
 
+static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
+        bool is_write, double elapsed_time, uint64_t *wait);
+static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
+        double elapsed_time, uint64_t *wait);
+static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
+        bool is_write, int64_t *wait);
+
 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
     QTAILQ_HEAD_INITIALIZER(bdrv_states);
 
@@ -161,6 +170,26 @@ bool bdrv_io_limits_enabled(BlockDriverState *bs)
          || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
 }
 
+static BlockQueueAIOCB *bdrv_io_limits_perform(BlockDriverState *bs,
+                       BlockRequestHandler *handler, int64_t sector_num,
+                       QEMUIOVector *qiov, int nb_sectors)
+{
+    BlockQueueAIOCB *ret = NULL;
+    int64_t wait_time = -1;
+
+    if (bdrv_exceed_io_limits(bs, nb_sectors, false, &wait_time)) {
+        ret = qemu_block_queue_enqueue(bs->block_queue, bs, handler,
+                                       sector_num, qiov,
+                                       nb_sectors, NULL, NULL);
+        if (wait_time != -1) {
+            qemu_mod_timer(bs->block_timer,
+                           wait_time + qemu_get_clock_ns(vm_clock));
+        }
+    }
+
+    return ret;
+}
+
 /* check if the path starts with "<protocol>:" */
 static int path_has_protocol(const char *path)
 {
@@ -1112,10 +1141,12 @@ static void coroutine_fn bdrv_rw_co_entry(void *opaque)
 
     if (!rwco->is_write) {
         rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
-                                     rwco->nb_sectors, rwco->qiov);
+                                     rwco->nb_sectors, rwco->qiov,
+                                     rwco, BDRV_API_SYNC);
     } else {
         rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
-                                      rwco->nb_sectors, rwco->qiov);
+                                      rwco->nb_sectors, rwco->qiov,
+                                      rwco, BDRV_API_SYNC);
     }
 }
 
@@ -1331,6 +1362,17 @@ typedef struct BlockDriverAIOCBCoroutine {
     void *blkq_acb;
 } BlockDriverAIOCBCoroutine;
 
+static void bdrv_co_rw_bh(void *opaque)
+{
+    BlockDriverAIOCBCoroutine *acb = opaque;
+
+    acb->common.cb(acb->common.opaque, acb->req.error);
+
+    acb->blkq_acb = NULL;
+    qemu_bh_delete(acb->bh);
+    qemu_aio_release(acb);
+}
+
 /* block I/O throttling */
 typedef struct CoroutineCB {
     BlockDriverState *bs;
@@ -1339,6 +1381,25 @@ typedef struct CoroutineCB {
     QEMUIOVector *qiov;
 } CoroutineCB;
 
+static bool bdrv_io_limits_skip_query(void *opaque,
+                                      BlockAPIType co_type) {
+    bool limit_skip;
+    RwCo *rwco;
+    BlockDriverAIOCBCoroutine *aioco;
+
+    if (co_type == BDRV_API_SYNC) {
+        rwco = opaque;
+        limit_skip = rwco->limit_skip;
+    } else if (co_type == BDRV_API_ASYNC) {
+        aioco = opaque;
+        limit_skip = aioco->limit_skip;
+    } else {
+        abort();
+    }
+
+    return limit_skip;
+}
+
 static void bdrv_io_limits_skip_set(void *opaque,
                                     BlockAPIType co_type,
                                     bool cb_skip,
@@ -1379,13 +1440,52 @@ void bdrv_io_limits_issue_request(void *opaque,
     }
 }
 
+static int bdrv_io_limits_intercept(BlockDriverState *bs,
+    BlockRequestHandler *handler, int64_t sector_num,
+    QEMUIOVector *qiov, int nb_sectors, void *opaque, BlockAPIType co_type)
+{
+    BlockQueueAIOCB *blkq_acb = NULL;
+    BlockQueueRetType ret = BDRV_BLKQ_PASS;
+
+    blkq_acb = bdrv_io_limits_perform(bs, handler,
+                                      sector_num, qiov,
+                                      nb_sectors);
+    if (blkq_acb) {
+        blkq_acb->co_type = co_type;
+        blkq_acb->cocb = opaque;
+        if (co_type == BDRV_API_ASYNC) {
+            BlockDriverAIOCBCoroutine *aioco = opaque;
+            aioco->blkq_acb = blkq_acb;
+            aioco->cb_skip  = true;
+        } else if (co_type == BDRV_API_CO) {
+            blkq_acb->co = qemu_coroutine_self();
+        }
+
+        ret = BDRV_BLKQ_ENQ_FIRST;
+    } else {
+        if (bs->block_queue->flushing) {
+            if (bs->block_queue->limit_exceeded) {
+                ret = BDRV_BLKQ_ENQ_AGAIN;
+            } else {
+                blkq_acb = opaque;
+                bdrv_io_limits_issue_request(blkq_acb, co_type);
+                ret = BDRV_BLKQ_DEQ_PASS;
+            }
+        }
+    }
+
+    return ret;
+}
+
 /*
  * Handle a read request in coroutine context
  */
 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
-    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
+    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+    void *opaque, BlockAPIType co_type)
 {
     BlockDriver *drv = bs->drv;
+    int ret;
 
     if (!drv) {
         return -ENOMEDIUM;
@@ -1394,6 +1494,28 @@ static int coroutine_fn 
bdrv_co_do_readv(BlockDriverState *bs,
         return -EIO;
     }
 
+    /* throttling disk read I/O */
+    if (bs->io_limits_enabled
+        && !bdrv_io_limits_skip_query(opaque, co_type)) {
+        ret = bdrv_io_limits_intercept(bs, bdrv_co_do_readv,
+                                       sector_num, qiov,
+                                       nb_sectors, opaque, co_type);
+        switch (ret) {
+        case BDRV_BLKQ_ENQ_FIRST:
+            if (co_type == BDRV_API_SYNC) {
+                ret = NOT_DONE;
+            } else if (co_type == BDRV_API_ASYNC) {
+                ret = 0;
+            } else if (co_type == BDRV_API_CO) {
+                qemu_coroutine_yield();
+                break;
+            }
+        case BDRV_BLKQ_ENQ_AGAIN:
+        case BDRV_BLKQ_DEQ_PASS:
+            return ret;
+        }
+    }
+
     return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
 }
 
@@ -1402,14 +1524,23 @@ int coroutine_fn bdrv_co_readv(BlockDriverState *bs, 
int64_t sector_num,
 {
     trace_bdrv_co_readv(bs, sector_num, nb_sectors);
 
-    return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov);
+    CoroutineCB cocb = {
+        .bs = bs,
+        .sector_num = sector_num,
+        .nb_sectors = nb_sectors,
+        .qiov = qiov,
+    };
+
+    return bdrv_co_do_readv(bs, sector_num, nb_sectors,
+                            qiov, &cocb, BDRV_API_CO);
 }
 
 /*
  * Handle a write request in coroutine context
  */
 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
-    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
+    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+    void *opaque, BlockAPIType co_type)
 {
     BlockDriver *drv = bs->drv;
     int ret;
@@ -1424,6 +1555,28 @@ static int coroutine_fn 
bdrv_co_do_writev(BlockDriverState *bs,
         return -EIO;
     }
 
+    /* throttling disk write I/O */
+    if (bs->io_limits_enabled
+        && !bdrv_io_limits_skip_query(opaque, co_type)) {
+        ret = bdrv_io_limits_intercept(bs, bdrv_co_do_writev,
+                                       sector_num, qiov,
+                                       nb_sectors, opaque, co_type);
+        switch (ret) {
+        case BDRV_BLKQ_ENQ_FIRST:
+            if (co_type == BDRV_API_SYNC) {
+                ret = NOT_DONE;
+            } else if (co_type == BDRV_API_ASYNC) {
+                ret = 0;
+            } else if (co_type == BDRV_API_CO) {
+                qemu_coroutine_yield();
+                break;
+            }
+        case BDRV_BLKQ_ENQ_AGAIN:
+        case BDRV_BLKQ_DEQ_PASS:
+            return ret;
+        }
+    }
+
     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
 
     if (bs->dirty_bitmap) {
@@ -1442,7 +1595,15 @@ int coroutine_fn bdrv_co_writev(BlockDriverState *bs, 
int64_t sector_num,
 {
     trace_bdrv_co_writev(bs, sector_num, nb_sectors);
 
-    return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov);
+    CoroutineCB cocb = {
+        .bs = bs,
+        .sector_num = sector_num,
+        .nb_sectors = nb_sectors,
+        .qiov = qiov,
+    };
+
+    return bdrv_co_do_writev(bs, sector_num, nb_sectors,
+                             qiov, &cocb, BDRV_API_CO);
 }
 
 /**
@@ -2709,6 +2870,170 @@ void bdrv_aio_cancel(BlockDriverAIOCB *acb)
     acb->pool->cancel(acb);
 }
 
+/* block I/O throttling */
+static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
+                 bool is_write, double elapsed_time, uint64_t *wait) {
+    uint64_t bps_limit = 0;
+    double   bytes_limit, bytes_disp, bytes_res;
+    double   slice_time, wait_time;
+
+    if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
+        bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
+    } else if (bs->io_limits.bps[is_write]) {
+        bps_limit = bs->io_limits.bps[is_write];
+    } else {
+        if (wait) {
+            *wait = 0;
+        }
+
+        return false;
+    }
+
+    slice_time = bs->slice_end - bs->slice_start;
+    slice_time /= (NANOSECONDS_PER_SECOND);
+    bytes_limit = bps_limit * slice_time;
+    bytes_disp  = bs->nr_bytes[is_write] - bs->io_disps.bytes[is_write];
+    if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
+        bytes_disp += bs->nr_bytes[!is_write] - bs->io_disps.bytes[!is_write];
+    }
+
+    bytes_res   = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
+
+    if (bytes_disp + bytes_res <= bytes_limit) {
+        if (wait) {
+            *wait = 0;
+        }
+
+        return false;
+    }
+
+    /* Calc approx time to dispatch */
+    wait_time = (bytes_disp + bytes_res) / bps_limit - elapsed_time;
+
+    bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
+    bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
+    if (wait) {
+        *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
+    }
+
+    return true;
+}
+
+static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
+                             double elapsed_time, uint64_t *wait) {
+    uint64_t iops_limit = 0;
+    double   ios_limit, ios_disp;
+    double   slice_time, wait_time;
+
+    if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
+        iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
+    } else if (bs->io_limits.iops[is_write]) {
+        iops_limit = bs->io_limits.iops[is_write];
+    } else {
+        if (wait) {
+            *wait = 0;
+        }
+
+        return false;
+    }
+
+    slice_time = bs->slice_end - bs->slice_start;
+    slice_time /= (NANOSECONDS_PER_SECOND);
+    ios_limit  = iops_limit * slice_time;
+    ios_disp   = bs->nr_ops[is_write] - bs->io_disps.ios[is_write];
+    if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
+        ios_disp += bs->nr_ops[!is_write] - bs->io_disps.ios[!is_write];
+    }
+
+    if (ios_disp + 1 <= ios_limit) {
+        if (wait) {
+            *wait = 0;
+        }
+
+        return false;
+    }
+
+    /* Calc approx time to dispatch */
+    wait_time = (ios_disp + 1) / iops_limit;
+    if (wait_time > elapsed_time) {
+        wait_time = wait_time - elapsed_time;
+    } else {
+        wait_time = 0;
+    }
+
+    bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
+    bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
+    if (wait) {
+        *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
+    }
+
+    return true;
+}
+
+static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
+                           bool is_write, int64_t *wait) {
+    int64_t  now, max_wait;
+    uint64_t bps_wait = 0, iops_wait = 0;
+    double   elapsed_time;
+    int      bps_ret, iops_ret;
+
+    now = qemu_get_clock_ns(vm_clock);
+    if ((bs->slice_start < now)
+        && (bs->slice_end > now)) {
+        bs->slice_end = now + bs->slice_time;
+    } else {
+        bs->slice_time  =  5 * BLOCK_IO_SLICE_TIME;
+        bs->slice_start = now;
+        bs->slice_end   = now + bs->slice_time;
+
+        bs->io_disps.bytes[is_write]  = bs->nr_bytes[is_write];
+        bs->io_disps.bytes[!is_write] = bs->nr_bytes[!is_write];
+
+        bs->io_disps.ios[is_write]    = bs->nr_ops[is_write];
+        bs->io_disps.ios[!is_write]   = bs->nr_ops[!is_write];
+    }
+
+    /* If a limit was exceeded, immediately queue this request */
+    if (!bs->block_queue->flushing
+        && qemu_block_queue_has_pending(bs->block_queue)) {
+        if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]
+            || bs->io_limits.bps[is_write] || bs->io_limits.iops[is_write]
+            || bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
+            if (wait) {
+                *wait = -1;
+            }
+
+            return true;
+        }
+    }
+
+    elapsed_time  = now - bs->slice_start;
+    elapsed_time  /= (NANOSECONDS_PER_SECOND);
+
+    bps_ret  = bdrv_exceed_bps_limits(bs, nb_sectors,
+                                      is_write, elapsed_time, &bps_wait);
+    iops_ret = bdrv_exceed_iops_limits(bs, is_write,
+                                      elapsed_time, &iops_wait);
+    if (bps_ret || iops_ret) {
+        max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
+        if (wait) {
+            *wait = max_wait;
+        }
+
+        now = qemu_get_clock_ns(vm_clock);
+        if (bs->slice_end < now + max_wait) {
+            bs->slice_end = now + max_wait;
+        }
+
+        return true;
+    }
+
+    if (wait) {
+        *wait = 0;
+    }
+
+    return false;
+}
 
 /**************************************************************/
 /* async block device emulation */
@@ -2798,6 +3123,15 @@ static BlockDriverAIOCB 
*bdrv_aio_writev_em(BlockDriverState *bs,
 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
 {
     qemu_aio_flush();
+
+    BlockDriverAIOCBCoroutine *co_acb =
+            container_of(blockacb, BlockDriverAIOCBCoroutine, common);
+    if (co_acb->blkq_acb) {
+        BlockDriverAIOCB *acb_com = (BlockDriverAIOCB *)co_acb->blkq_acb;
+        assert(acb_com->bs);
+        acb_com->pool->cancel(acb_com);
+        co_acb->blkq_acb = NULL;
+    }
 }
 
 static AIOPool bdrv_em_co_aio_pool = {
@@ -2822,14 +3156,16 @@ static void coroutine_fn bdrv_co_do_rw(void *opaque)
 
     if (!acb->is_write) {
         acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
-            acb->req.nb_sectors, acb->req.qiov);
+            acb->req.nb_sectors, acb->req.qiov, acb, BDRV_API_ASYNC);
     } else {
         acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
-            acb->req.nb_sectors, acb->req.qiov);
+            acb->req.nb_sectors, acb->req.qiov, acb, BDRV_API_ASYNC);
     }
 
-    acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
-    qemu_bh_schedule(acb->bh);
+    if (!acb->cb_skip) {
+        acb->bh = qemu_bh_new(bdrv_co_rw_bh, acb);
+        qemu_bh_schedule(acb->bh);
+    }
 }
 
 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
-- 
1.7.6




reply via email to

[Prev in Thread] Current Thread [Next in Thread]