qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PULL 05/31] block: Move throttling fields from BDS to BB


From: Kevin Wolf
Subject: [Qemu-block] [PULL 05/31] block: Move throttling fields from BDS to BB
Date: Thu, 19 May 2016 17:21:43 +0200

This patch changes where the throttling state is stored (used to be the
BlockDriverState, now it is the BlockBackend), but it doesn't actually
make it a BB level feature yet. For example, throttling is still
disabled when the BDS is detached from the BB.

Signed-off-by: Kevin Wolf <address@hidden>
Acked-by: Stefan Hajnoczi <address@hidden>
---
 block.c                         |  22 +++----
 block/block-backend.c           |  13 ++--
 block/io.c                      |  36 +++++++----
 block/qapi.c                    |   2 +-
 block/throttle-groups.c         | 129 +++++++++++++++++++++-------------------
 blockdev.c                      |   4 +-
 include/block/block_int.h       |  13 ----
 include/block/throttle-groups.h |   2 +-
 include/sysemu/block-backend.h  |  11 +++-
 tests/test-throttle.c           |  28 +++++----
 10 files changed, 142 insertions(+), 118 deletions(-)

diff --git a/block.c b/block.c
index a789844..f723060 100644
--- a/block.c
+++ b/block.c
@@ -237,8 +237,6 @@ BlockDriverState *bdrv_new(void)
         QLIST_INIT(&bs->op_blockers[i]);
     }
     notifier_with_return_list_init(&bs->before_write_notifiers);
-    qemu_co_queue_init(&bs->throttled_reqs[0]);
-    qemu_co_queue_init(&bs->throttled_reqs[1]);
     bs->refcnt = 1;
     bs->aio_context = qemu_get_aio_context();
 
@@ -1525,7 +1523,7 @@ static int bdrv_open_inherit(BlockDriverState **pbs, 
const char *filename,
             return -ENODEV;
         }
 
-        if (bs->throttle_state) {
+        if (bs->blk && blk_get_public(bs->blk)->throttle_state) {
             error_setg(errp, "Cannot reference an existing block device for "
                        "which I/O throttling is enabled");
             return -EINVAL;
@@ -2124,7 +2122,7 @@ static void bdrv_close(BlockDriverState *bs)
     assert(!bs->job);
 
     /* Disable I/O limits and drain all pending throttled requests */
-    if (bs->throttle_state) {
+    if (bs->blk && blk_get_public(bs->blk)->throttle_state) {
         bdrv_io_limits_disable(bs);
     }
 
@@ -2257,8 +2255,8 @@ static void swap_feature_fields(BlockDriverState *bs_top,
     bdrv_move_feature_fields(bs_top, bs_new);
     bdrv_move_feature_fields(bs_new, &tmp);
 
-    assert(!bs_new->throttle_state);
-    if (bs_top->throttle_state) {
+    assert(!bs_new->blk);
+    if (bs_top->blk && blk_get_public(bs_top->blk)->throttle_state) {
         /*
          * FIXME Need to break I/O throttling with graph manipulations
          * temporarily because of conflicting invariants (3. will go away when
@@ -2300,11 +2298,11 @@ void bdrv_append(BlockDriverState *bs_new, 
BlockDriverState *bs_top)
     assert(!bdrv_requests_pending(bs_new));
 
     bdrv_ref(bs_top);
-    change_parent_backing_link(bs_top, bs_new);
 
     /* Some fields always stay on top of the backing file chain */
     swap_feature_fields(bs_top, bs_new);
 
+    change_parent_backing_link(bs_top, bs_new);
     bdrv_set_backing_hd(bs_new, bs_top);
     bdrv_unref(bs_top);
 
@@ -3676,8 +3674,9 @@ void bdrv_detach_aio_context(BlockDriverState *bs)
         baf->detach_aio_context(baf->opaque);
     }
 
-    if (bs->throttle_state) {
-        throttle_timers_detach_aio_context(&bs->throttle_timers);
+    if (bs->blk && blk_get_public(bs->blk)->throttle_state) {
+        throttle_timers_detach_aio_context(
+            &blk_get_public(bs->blk)->throttle_timers);
     }
     if (bs->drv->bdrv_detach_aio_context) {
         bs->drv->bdrv_detach_aio_context(bs);
@@ -3712,8 +3711,9 @@ void bdrv_attach_aio_context(BlockDriverState *bs,
     if (bs->drv->bdrv_attach_aio_context) {
         bs->drv->bdrv_attach_aio_context(bs, new_context);
     }
-    if (bs->throttle_state) {
-        throttle_timers_attach_aio_context(&bs->throttle_timers, new_context);
+    if (bs->blk && blk_get_public(bs->blk)->throttle_state) {
+        throttle_timers_attach_aio_context(
+            &blk_get_public(bs->blk)->throttle_timers, new_context);
     }
 
     QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
diff --git a/block/block-backend.c b/block/block-backend.c
index 964a205..6880659 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -107,8 +107,12 @@ BlockBackend *blk_new(Error **errp)
 
     blk = g_new0(BlockBackend, 1);
     blk->refcnt = 1;
+    qemu_co_queue_init(&blk->public.throttled_reqs[0]);
+    qemu_co_queue_init(&blk->public.throttled_reqs[1]);
+
     notifier_list_init(&blk->remove_bs_notifiers);
     notifier_list_init(&blk->insert_bs_notifiers);
+
     QTAILQ_INSERT_TAIL(&block_backends, blk, link);
     return blk;
 }
@@ -437,7 +441,7 @@ void blk_remove_bs(BlockBackend *blk)
     notifier_list_notify(&blk->remove_bs_notifiers, blk);
 
     blk_update_root_state(blk);
-    if (blk->root->bs->throttle_state) {
+    if (blk->public.throttle_state) {
         bdrv_io_limits_disable(blk->root->bs);
     }
 
@@ -795,7 +799,6 @@ static int blk_prw(BlockBackend *blk, int64_t offset, 
uint8_t *buf,
 int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
                           int count)
 {
-    BlockDriverState *bs = blk_bs(blk);
     int ret;
 
     ret = blk_check_byte_request(blk, offset, count);
@@ -803,9 +806,9 @@ int blk_pread_unthrottled(BlockBackend *blk, int64_t 
offset, uint8_t *buf,
         return ret;
     }
 
-    bdrv_no_throttling_begin(bs);
+    bdrv_no_throttling_begin(blk_bs(blk));
     ret = blk_pread(blk, offset, buf, count);
-    bdrv_no_throttling_end(bs);
+    bdrv_no_throttling_end(blk_bs(blk));
     return ret;
 }
 
@@ -1524,7 +1527,7 @@ void blk_update_root_state(BlockBackend *blk)
         g_free(blk->root_state.throttle_group);
         throttle_group_unref(blk->root_state.throttle_state);
     }
-    if (blk->root->bs->throttle_state) {
+    if (blk->public.throttle_state) {
         const char *name = throttle_group_get_name(blk);
         blk->root_state.throttle_group = g_strdup(name);
         blk->root_state.throttle_state = throttle_group_incref(name);
diff --git a/block/io.c b/block/io.c
index f6fb868..bdbaa1c 100644
--- a/block/io.c
+++ b/block/io.c
@@ -55,20 +55,31 @@ void bdrv_set_io_limits(BlockDriverState *bs,
 
 void bdrv_no_throttling_begin(BlockDriverState *bs)
 {
-    if (bs->io_limits_disabled++ == 0) {
-        throttle_group_restart_bs(bs);
+    if (!bs->blk) {
+        return;
+    }
+
+    if (blk_get_public(bs->blk)->io_limits_disabled++ == 0) {
+        throttle_group_restart_blk(bs->blk);
     }
 }
 
 void bdrv_no_throttling_end(BlockDriverState *bs)
 {
-    assert(bs->io_limits_disabled);
-    --bs->io_limits_disabled;
+    BlockBackendPublic *blkp;
+
+    if (!bs->blk) {
+        return;
+    }
+
+    blkp = blk_get_public(bs->blk);
+    assert(blkp->io_limits_disabled);
+    --blkp->io_limits_disabled;
 }
 
 void bdrv_io_limits_disable(BlockDriverState *bs)
 {
-    assert(bs->throttle_state);
+    assert(blk_get_public(bs->blk)->throttle_state);
     bdrv_no_throttling_begin(bs);
     throttle_group_unregister_blk(bs->blk);
     bdrv_no_throttling_end(bs);
@@ -77,14 +88,16 @@ void bdrv_io_limits_disable(BlockDriverState *bs)
 /* should be called before bdrv_set_io_limits if a limit is set */
 void bdrv_io_limits_enable(BlockDriverState *bs, const char *group)
 {
-    assert(!bs->throttle_state);
+    BlockBackendPublic *blkp = blk_get_public(bs->blk);
+
+    assert(!blkp->throttle_state);
     throttle_group_register_blk(bs->blk, group);
 }
 
 void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group)
 {
     /* this bs is not part of any group */
-    if (!bs->throttle_state) {
+    if (!blk_get_public(bs->blk)->throttle_state) {
         return;
     }
 
@@ -178,14 +191,15 @@ void bdrv_disable_copy_on_read(BlockDriverState *bs)
 bool bdrv_requests_pending(BlockDriverState *bs)
 {
     BdrvChild *child;
+    BlockBackendPublic *blkp = bs->blk ? blk_get_public(bs->blk) : NULL;
 
     if (!QLIST_EMPTY(&bs->tracked_requests)) {
         return true;
     }
-    if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
+    if (blkp && !qemu_co_queue_empty(&blkp->throttled_reqs[0])) {
         return true;
     }
-    if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
+    if (blkp && !qemu_co_queue_empty(&blkp->throttled_reqs[1])) {
         return true;
     }
 
@@ -1070,7 +1084,7 @@ int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
     }
 
     /* throttling disk I/O */
-    if (bs->throttle_state) {
+    if (bs->blk && blk_get_public(bs->blk)->throttle_state) {
         throttle_group_co_io_limits_intercept(bs, bytes, false);
     }
 
@@ -1431,7 +1445,7 @@ int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
     }
 
     /* throttling disk I/O */
-    if (bs->throttle_state) {
+    if (bs->blk && blk_get_public(bs->blk)->throttle_state) {
         throttle_group_co_io_limits_intercept(bs, bytes, true);
     }
 
diff --git a/block/qapi.c b/block/qapi.c
index a3e514d..1e4bb8a 100644
--- a/block/qapi.c
+++ b/block/qapi.c
@@ -67,7 +67,7 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
     info->backing_file_depth = bdrv_get_backing_file_depth(bs);
     info->detect_zeroes = bs->detect_zeroes;
 
-    if (bs->throttle_state) {
+    if (bs->blk && blk_get_public(bs->blk)->throttle_state) {
         ThrottleConfig cfg;
 
         throttle_group_get_config(bs, &cfg);
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index e50ccaa..56dc311 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -30,7 +30,7 @@
 #include "sysemu/qtest.h"
 
 /* The ThrottleGroup structure (with its ThrottleState) is shared
- * among different BlockDriverState and it's independent from
+ * among different BlockBackends and it's independent from
  * AioContext, so in order to use it from different threads it needs
  * its own locking.
  *
@@ -40,18 +40,18 @@
  * The whole ThrottleGroup structure is private and invisible to
  * outside users, that only use it through its ThrottleState.
  *
- * In addition to the ThrottleGroup structure, BlockDriverState has
+ * In addition to the ThrottleGroup structure, BlockBackendPublic has
  * fields that need to be accessed by other members of the group and
- * therefore also need to be protected by this lock. Once a BDS is
- * registered in a group those fields can be accessed by other threads
- * any time.
+ * therefore also need to be protected by this lock. Once a
+ * BlockBackend is registered in a group those fields can be accessed
+ * by other threads any time.
  *
  * Again, all this is handled internally and is mostly transparent to
  * the outside. The 'throttle_timers' field however has an additional
  * constraint because it may be temporarily invalid (see for example
  * bdrv_set_aio_context()). Therefore in this file a thread will
- * access some other BDS's timers only after verifying that that BDS
- * has throttled requests in the queue.
+ * access some other BlockBackend's timers only after verifying that
+ * that BlockBackend has throttled requests in the queue.
  */
 typedef struct ThrottleGroup {
     char *name; /* This is constant during the lifetime of the group */
@@ -141,8 +141,8 @@ void throttle_group_unref(ThrottleState *ts)
  */
 const char *throttle_group_get_name(BlockBackend *blk)
 {
-    ThrottleGroup *tg = container_of(blk_bs(blk)->throttle_state,
-                                     ThrottleGroup, ts);
+    BlockBackendPublic *blkp = blk_get_public(blk);
+    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
     return tg->name;
 }
 
@@ -156,10 +156,10 @@ const char *throttle_group_get_name(BlockBackend *blk)
  */
 static BlockBackend *throttle_group_next_blk(BlockBackend *blk)
 {
-    BlockDriverState *bs = blk_bs(blk);
-    ThrottleState *ts = bs->throttle_state;
+    BlockBackendPublic *blkp = blk_get_public(blk);
+    ThrottleState *ts = blkp->throttle_state;
     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
-    BlockBackendPublic *next = QLIST_NEXT(blk_get_public(blk), round_robin);
+    BlockBackendPublic *next = QLIST_NEXT(blkp, round_robin);
 
     if (!next) {
         next = QLIST_FIRST(&tg->head);
@@ -180,15 +180,15 @@ static BlockBackend *throttle_group_next_blk(BlockBackend 
*blk)
  */
 static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write)
 {
-    ThrottleGroup *tg = container_of(blk_bs(blk)->throttle_state,
-                                     ThrottleGroup, ts);
+    BlockBackendPublic *blkp = blk_get_public(blk);
+    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
     BlockBackend *token, *start;
 
     start = token = tg->tokens[is_write];
 
     /* get next bs round in round robin style */
     token = throttle_group_next_blk(token);
-    while (token != start && !blk_bs(token)->pending_reqs[is_write]) {
+    while (token != start && !blkp->pending_reqs[is_write]) {
         token = throttle_group_next_blk(token);
     }
 
@@ -196,7 +196,7 @@ static BlockBackend *next_throttle_token(BlockBackend *blk, 
bool is_write)
      * then decide the token is the current bs because chances are
      * the current bs get the current request queued.
      */
-    if (token == start && !blk_bs(token)->pending_reqs[is_write]) {
+    if (token == start && !blkp->pending_reqs[is_write]) {
         token = blk;
     }
 
@@ -215,12 +215,13 @@ static BlockBackend *next_throttle_token(BlockBackend 
*blk, bool is_write)
  */
 static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
 {
-    ThrottleState *ts = blk_bs(blk)->throttle_state;
-    ThrottleTimers *tt = &blk_bs(blk)->throttle_timers;
+    BlockBackendPublic *blkp = blk_get_public(blk);
+    ThrottleState *ts = blkp->throttle_state;
+    ThrottleTimers *tt = &blkp->throttle_timers;
     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
     bool must_wait;
 
-    if (blk_bs(blk)->io_limits_disabled) {
+    if (blkp->io_limits_disabled) {
         return false;
     }
 
@@ -249,14 +250,14 @@ static bool throttle_group_schedule_timer(BlockBackend 
*blk, bool is_write)
  */
 static void schedule_next_request(BlockBackend *blk, bool is_write)
 {
-    BlockDriverState *bs = blk_bs(blk);
-    ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
+    BlockBackendPublic *blkp = blk_get_public(blk);
+    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
     bool must_wait;
     BlockBackend *token;
 
     /* Check if there's any pending request to schedule next */
     token = next_throttle_token(blk, is_write);
-    if (!blk_bs(token)->pending_reqs[is_write]) {
+    if (!blkp->pending_reqs[is_write]) {
         return;
     }
 
@@ -265,12 +266,12 @@ static void schedule_next_request(BlockBackend *blk, bool 
is_write)
 
     /* If it doesn't have to wait, queue it for immediate execution */
     if (!must_wait) {
-        /* Give preference to requests from the current bs */
+        /* Give preference to requests from the current blk */
         if (qemu_in_coroutine() &&
-            qemu_co_queue_next(&bs->throttled_reqs[is_write])) {
+            qemu_co_queue_next(&blkp->throttled_reqs[is_write])) {
             token = blk;
         } else {
-            ThrottleTimers *tt = &blk_bs(token)->throttle_timers;
+            ThrottleTimers *tt = &blkp->throttle_timers;
             int64_t now = qemu_clock_get_ns(tt->clock_type);
             timer_mod(tt->timers[is_write], now + 1);
             tg->any_timer_armed[is_write] = true;
@@ -294,37 +295,40 @@ void coroutine_fn 
throttle_group_co_io_limits_intercept(BlockDriverState *bs,
     bool must_wait;
     BlockBackend *token;
 
-    ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
+    BlockBackend *blk = bs->blk;
+    BlockBackendPublic *blkp = blk_get_public(blk);
+    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
     qemu_mutex_lock(&tg->lock);
 
     /* First we check if this I/O has to be throttled. */
-    token = next_throttle_token(bs->blk, is_write);
+    token = next_throttle_token(blk, is_write);
     must_wait = throttle_group_schedule_timer(token, is_write);
 
     /* Wait if there's a timer set or queued requests of this type */
-    if (must_wait || bs->pending_reqs[is_write]) {
-        bs->pending_reqs[is_write]++;
+    if (must_wait || blkp->pending_reqs[is_write]) {
+        blkp->pending_reqs[is_write]++;
         qemu_mutex_unlock(&tg->lock);
-        qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
+        qemu_co_queue_wait(&blkp->throttled_reqs[is_write]);
         qemu_mutex_lock(&tg->lock);
-        bs->pending_reqs[is_write]--;
+        blkp->pending_reqs[is_write]--;
     }
 
     /* The I/O will be executed, so do the accounting */
-    throttle_account(bs->throttle_state, is_write, bytes);
+    throttle_account(blkp->throttle_state, is_write, bytes);
 
     /* Schedule the next request */
-    schedule_next_request(bs->blk, is_write);
+    schedule_next_request(blk, is_write);
 
     qemu_mutex_unlock(&tg->lock);
 }
 
-void throttle_group_restart_bs(BlockDriverState *bs)
+void throttle_group_restart_blk(BlockBackend *blk)
 {
+    BlockBackendPublic *blkp = blk_get_public(blk);
     int i;
 
     for (i = 0; i < 2; i++) {
-        while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
+        while (qemu_co_enter_next(&blkp->throttled_reqs[i])) {
             ;
         }
     }
@@ -339,8 +343,9 @@ void throttle_group_restart_bs(BlockDriverState *bs)
  */
 void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg)
 {
-    ThrottleTimers *tt = &bs->throttle_timers;
-    ThrottleState *ts = bs->throttle_state;
+    BlockBackendPublic *blkp = blk_get_public(bs->blk);
+    ThrottleTimers *tt = &blkp->throttle_timers;
+    ThrottleState *ts = blkp->throttle_state;
     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
     qemu_mutex_lock(&tg->lock);
     /* throttle_config() cancels the timers */
@@ -353,8 +358,8 @@ void throttle_group_config(BlockDriverState *bs, 
ThrottleConfig *cfg)
     throttle_config(ts, tt, cfg);
     qemu_mutex_unlock(&tg->lock);
 
-    qemu_co_enter_next(&bs->throttled_reqs[0]);
-    qemu_co_enter_next(&bs->throttled_reqs[1]);
+    qemu_co_enter_next(&blkp->throttled_reqs[0]);
+    qemu_co_enter_next(&blkp->throttled_reqs[1]);
 }
 
 /* Get the throttle configuration from a particular group. Similar to
@@ -366,7 +371,8 @@ void throttle_group_config(BlockDriverState *bs, 
ThrottleConfig *cfg)
  */
 void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg)
 {
-    ThrottleState *ts = bs->throttle_state;
+    BlockBackendPublic *blkp = blk_get_public(bs->blk);
+    ThrottleState *ts = blkp->throttle_state;
     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
     qemu_mutex_lock(&tg->lock);
     throttle_get_config(ts, cfg);
@@ -376,12 +382,13 @@ void throttle_group_get_config(BlockDriverState *bs, 
ThrottleConfig *cfg)
 /* ThrottleTimers callback. This wakes up a request that was waiting
  * because it had been throttled.
  *
- * @bs:        the BlockDriverState whose request had been throttled
+ * @blk:       the BlockBackend whose request had been throttled
  * @is_write:  the type of operation (read/write)
  */
-static void timer_cb(BlockDriverState *bs, bool is_write)
+static void timer_cb(BlockBackend *blk, bool is_write)
 {
-    ThrottleState *ts = bs->throttle_state;
+    BlockBackendPublic *blkp = blk_get_public(blk);
+    ThrottleState *ts = blkp->throttle_state;
     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
     bool empty_queue;
 
@@ -391,13 +398,13 @@ static void timer_cb(BlockDriverState *bs, bool is_write)
     qemu_mutex_unlock(&tg->lock);
 
     /* Run the request that was waiting for this timer */
-    empty_queue = !qemu_co_enter_next(&bs->throttled_reqs[is_write]);
+    empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
 
     /* If the request queue was empty then we have to take care of
      * scheduling the next one */
     if (empty_queue) {
         qemu_mutex_lock(&tg->lock);
-        schedule_next_request(bs->blk, is_write);
+        schedule_next_request(blk, is_write);
         qemu_mutex_unlock(&tg->lock);
     }
 }
@@ -422,7 +429,7 @@ static void write_timer_cb(void *opaque)
 void throttle_group_register_blk(BlockBackend *blk, const char *groupname)
 {
     int i;
-    BlockDriverState *bs = blk_bs(blk);
+    BlockBackendPublic *blkp = blk_get_public(blk);
     ThrottleState *ts = throttle_group_incref(groupname);
     ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
     int clock_type = QEMU_CLOCK_REALTIME;
@@ -432,7 +439,7 @@ void throttle_group_register_blk(BlockBackend *blk, const 
char *groupname)
         clock_type = QEMU_CLOCK_VIRTUAL;
     }
 
-    bs->throttle_state = ts;
+    blkp->throttle_state = ts;
 
     qemu_mutex_lock(&tg->lock);
     /* If the ThrottleGroup is new set this BlockBackend as the token */
@@ -442,14 +449,14 @@ void throttle_group_register_blk(BlockBackend *blk, const 
char *groupname)
         }
     }
 
-    QLIST_INSERT_HEAD(&tg->head, blk_get_public(blk), round_robin);
+    QLIST_INSERT_HEAD(&tg->head, blkp, round_robin);
 
-    throttle_timers_init(&bs->throttle_timers,
-                         bdrv_get_aio_context(bs),
+    throttle_timers_init(&blkp->throttle_timers,
+                         blk_get_aio_context(blk),
                          clock_type,
                          read_timer_cb,
                          write_timer_cb,
-                         bs);
+                         blk);
 
     qemu_mutex_unlock(&tg->lock);
 }
@@ -466,19 +473,19 @@ void throttle_group_register_blk(BlockBackend *blk, const 
char *groupname)
  */
 void throttle_group_unregister_blk(BlockBackend *blk)
 {
-    BlockDriverState *bs = blk_bs(blk);
-    ThrottleGroup *tg = container_of(bs->throttle_state, ThrottleGroup, ts);
+    BlockBackendPublic *blkp = blk_get_public(blk);
+    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
     int i;
 
-    assert(bs->pending_reqs[0] == 0 && bs->pending_reqs[1] == 0);
-    assert(qemu_co_queue_empty(&bs->throttled_reqs[0]));
-    assert(qemu_co_queue_empty(&bs->throttled_reqs[1]));
+    assert(blkp->pending_reqs[0] == 0 && blkp->pending_reqs[1] == 0);
+    assert(qemu_co_queue_empty(&blkp->throttled_reqs[0]));
+    assert(qemu_co_queue_empty(&blkp->throttled_reqs[1]));
 
     qemu_mutex_lock(&tg->lock);
     for (i = 0; i < 2; i++) {
         if (tg->tokens[i] == blk) {
             BlockBackend *token = throttle_group_next_blk(blk);
-            /* Take care of the case where this is the last bs in the group */
+            /* Take care of the case where this is the last blk in the group */
             if (token == blk) {
                 token = NULL;
             }
@@ -486,13 +493,13 @@ void throttle_group_unregister_blk(BlockBackend *blk)
         }
     }
 
-    /* remove the current bs from the list */
-    QLIST_REMOVE(blk_get_public(blk), round_robin);
-    throttle_timers_destroy(&bs->throttle_timers);
+    /* remove the current blk from the list */
+    QLIST_REMOVE(blkp, round_robin);
+    throttle_timers_destroy(&blkp->throttle_timers);
     qemu_mutex_unlock(&tg->lock);
 
     throttle_group_unref(&tg->ts);
-    bs->throttle_state = NULL;
+    blkp->throttle_state = NULL;
 }
 
 static void throttle_groups_init(void)
diff --git a/blockdev.c b/blockdev.c
index 8106ca7..3211a40 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -2726,14 +2726,14 @@ void qmp_block_set_io_throttle(const char *device, 
int64_t bps, int64_t bps_rd,
     if (throttle_enabled(&cfg)) {
         /* Enable I/O limits if they're not enabled yet, otherwise
          * just update the throttling group. */
-        if (!bs->throttle_state) {
+        if (!blk_get_public(bs->blk)->throttle_state) {
             bdrv_io_limits_enable(bs, has_group ? group : device);
         } else if (has_group) {
             bdrv_io_limits_update_group(bs, group);
         }
         /* Set the new throttling configuration */
         bdrv_set_io_limits(bs, &cfg);
-    } else if (bs->throttle_state) {
+    } else if (blk_get_public(bs->blk)->throttle_state) {
         /* If all throttling settings are set to 0, disable I/O limits */
         bdrv_io_limits_disable(bs);
     }
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 3f5d2b1..2bbc2c0 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -26,7 +26,6 @@
 
 #include "block/accounting.h"
 #include "block/block.h"
-#include "block/throttle-groups.h"
 #include "qemu/option.h"
 #include "qemu/queue.h"
 #include "qemu/coroutine.h"
@@ -424,18 +423,6 @@ struct BlockDriverState {
     /* number of in-flight serialising requests */
     unsigned int serialising_in_flight;
 
-    /* I/O throttling.
-     * throttle_state tells us if this BDS has I/O limits configured.
-     * io_limits_disabled tells us if they are currently being enforced */
-    CoQueue      throttled_reqs[2];
-    unsigned int io_limits_disabled;
-
-    /* The following fields are protected by the ThrottleGroup lock.
-     * See the ThrottleGroup documentation for details. */
-    ThrottleState *throttle_state;
-    ThrottleTimers throttle_timers;
-    unsigned       pending_reqs[2];
-
     /* Offset after the highest byte written to */
     uint64_t wr_highest_offset;
 
diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h
index bd55a34..840ba44 100644
--- a/include/block/throttle-groups.h
+++ b/include/block/throttle-groups.h
@@ -38,7 +38,7 @@ void throttle_group_get_config(BlockDriverState *bs, 
ThrottleConfig *cfg);
 
 void throttle_group_register_blk(BlockBackend *blk, const char *groupname);
 void throttle_group_unregister_blk(BlockBackend *blk);
-void throttle_group_restart_bs(BlockDriverState *bs);
+void throttle_group_restart_blk(BlockBackend *blk);
 
 void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs,
                                                         unsigned int bytes,
diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h
index 1dcd70e..08d27a8 100644
--- a/include/sysemu/block-backend.h
+++ b/include/sysemu/block-backend.h
@@ -14,6 +14,7 @@
 #define BLOCK_BACKEND_H
 
 #include "qemu/iov.h"
+#include "block/throttle-groups.h"
 
 /*
  * TODO Have to include block/block.h for a bunch of block layer
@@ -63,9 +64,17 @@ typedef struct BlockDevOps {
  * fields that must be public. This is in particular for QLIST_ENTRY() and
  * friends so that BlockBackends can be kept in lists outside block-backend.c 
*/
 typedef struct BlockBackendPublic {
-    /* I/O throttling */
+    /* I/O throttling.
+     * throttle_state tells us if this BlockBackend has I/O limits configured.
+     * io_limits_disabled tells us if they are currently being enforced */
+    CoQueue      throttled_reqs[2];
+    unsigned int io_limits_disabled;
+
     /* The following fields are protected by the ThrottleGroup lock.
      * See the ThrottleGroup documentation for details. */
+    ThrottleState *throttle_state;
+    ThrottleTimers throttle_timers;
+    unsigned       pending_reqs[2];
     QLIST_ENTRY(BlockBackendPublic) round_robin;
 } BlockBackendPublic;
 
diff --git a/tests/test-throttle.c b/tests/test-throttle.c
index 1a322f1..a020068 100644
--- a/tests/test-throttle.c
+++ b/tests/test-throttle.c
@@ -576,31 +576,35 @@ static void test_groups(void)
 {
     ThrottleConfig cfg1, cfg2;
     BlockBackend *blk1, *blk2, *blk3;
-    BlockDriverState *bdrv1, *bdrv2, *bdrv3;
+    BlockBackendPublic *blkp1, *blkp2, *blkp3;
+    BlockDriverState *bdrv1, *bdrv3;
 
     blk1 = blk_new_with_bs(&error_abort);
     blk2 = blk_new_with_bs(&error_abort);
     blk3 = blk_new_with_bs(&error_abort);
 
     bdrv1 = blk_bs(blk1);
-    bdrv2 = blk_bs(blk2);
     bdrv3 = blk_bs(blk3);
 
-    g_assert(bdrv1->throttle_state == NULL);
-    g_assert(bdrv2->throttle_state == NULL);
-    g_assert(bdrv3->throttle_state == NULL);
+    blkp1 = blk_get_public(blk1);
+    blkp2 = blk_get_public(blk2);
+    blkp3 = blk_get_public(blk3);
+
+    g_assert(blkp1->throttle_state == NULL);
+    g_assert(blkp2->throttle_state == NULL);
+    g_assert(blkp3->throttle_state == NULL);
 
     throttle_group_register_blk(blk1, "bar");
     throttle_group_register_blk(blk2, "foo");
     throttle_group_register_blk(blk3, "bar");
 
-    g_assert(bdrv1->throttle_state != NULL);
-    g_assert(bdrv2->throttle_state != NULL);
-    g_assert(bdrv3->throttle_state != NULL);
+    g_assert(blkp1->throttle_state != NULL);
+    g_assert(blkp2->throttle_state != NULL);
+    g_assert(blkp3->throttle_state != NULL);
 
     g_assert(!strcmp(throttle_group_get_name(blk1), "bar"));
     g_assert(!strcmp(throttle_group_get_name(blk2), "foo"));
-    g_assert(bdrv1->throttle_state == bdrv3->throttle_state);
+    g_assert(blkp1->throttle_state == blkp3->throttle_state);
 
     /* Setting the config of a group member affects the whole group */
     throttle_config_init(&cfg1);
@@ -628,9 +632,9 @@ static void test_groups(void)
     throttle_group_unregister_blk(blk2);
     throttle_group_unregister_blk(blk3);
 
-    g_assert(bdrv1->throttle_state == NULL);
-    g_assert(bdrv2->throttle_state == NULL);
-    g_assert(bdrv3->throttle_state == NULL);
+    g_assert(blkp1->throttle_state == NULL);
+    g_assert(blkp2->throttle_state == NULL);
+    g_assert(blkp3->throttle_state == NULL);
 }
 
 int main(int argc, char **argv)
-- 
1.8.3.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]