qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [2.3 PATCH v7 07/10] qmp: Add support of "dirty-bitmap"


From: Max Reitz
Subject: Re: [Qemu-devel] [2.3 PATCH v7 07/10] qmp: Add support of "dirty-bitmap" sync mode for drive-backup
Date: Wed, 26 Nov 2014 15:19:46 +0100
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Thunderbird/31.2.0

On 2014-11-25 at 20:46, John Snow wrote:
From: Fam Zheng <address@hidden>

For "dirty-bitmap" sync mode, the block job will iterate through the
given dirty bitmap to decide if a sector needs backup (backup all the
dirty clusters and skip clean ones), just as allocation conditions of
"top" sync mode.

There are two bitmap use modes for sync=dirty-bitmap:

  - reset: backup job makes a copy of bitmap and resets the original
    one.
  - consume: backup job makes the original anonymous (invisible to user)
    and releases it after use.

Signed-off-by: Fam Zheng <address@hidden>
Signed-off-by: John Snow <address@hidden>
---
  block.c                   |   5 ++
  block/backup.c            | 128 ++++++++++++++++++++++++++++++++++++++--------
  block/mirror.c            |   4 ++
  blockdev.c                |  18 ++++++-
  hmp.c                     |   4 +-
  include/block/block.h     |   1 +
  include/block/block_int.h |   6 +++
  qapi/block-core.json      |  30 +++++++++--
  qmp-commands.hx           |   7 +--
  9 files changed, 174 insertions(+), 29 deletions(-)

diff --git a/block.c b/block.c
index 7217066..cf93148 100644
--- a/block.c
+++ b/block.c
@@ -5454,6 +5454,11 @@ void bdrv_dirty_iter_init(BlockDriverState *bs,
      hbitmap_iter_init(hbi, bitmap->bitmap, 0);
  }
+void bdrv_dirty_iter_set(HBitmapIter *hbi, int64_t offset)
+{
+    hbitmap_iter_init(hbi, hbi->hb, offset);
+}
+
  void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
                      int nr_sectors)
  {
diff --git a/block/backup.c b/block/backup.c
index 792e655..8e7d135 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -37,6 +37,10 @@ typedef struct CowRequest {
  typedef struct BackupBlockJob {
      BlockJob common;
      BlockDriverState *target;
+    /* bitmap for sync=dirty-bitmap */
+    BdrvDirtyBitmap *sync_bitmap;
+    /* dirty bitmap granularity */
+    int64_t sync_bitmap_gran;
      MirrorSyncMode sync_mode;
      RateLimit limit;
      BlockdevOnError on_source_error;
@@ -242,6 +246,31 @@ static void backup_complete(BlockJob *job, void *opaque)
      g_free(data);
  }
+static bool yield_and_check(BackupBlockJob *job)
+{
+    if (block_job_is_cancelled(&job->common)) {
+        return true;
+    }
+
+    /* we need to yield so that qemu_aio_flush() returns.
+     * (without, VM does not reboot)
+     */
+    if (job->common.speed) {
+        uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
+                                                      job->sectors_read);
+        job->sectors_read = 0;
+        block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
+    } else {
+        block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);
+    }
+
+    if (block_job_is_cancelled(&job->common)) {
+        return true;
+    }
+
+    return false;
+}
+
  static void coroutine_fn backup_run(void *opaque)
  {
      BackupBlockJob *job = opaque;
@@ -254,13 +283,13 @@ static void coroutine_fn backup_run(void *opaque)
      };
      int64_t start, end;
      int ret = 0;
+    bool error_is_read;
QLIST_INIT(&job->inflight_reqs);
      qemu_co_rwlock_init(&job->flush_rwlock);
start = 0;
-    end = DIV_ROUND_UP(job->common.len / BDRV_SECTOR_SIZE,
-                       BACKUP_SECTORS_PER_CLUSTER);
+    end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE);
job->bitmap = hbitmap_alloc(end, 0); @@ -278,28 +307,45 @@ static void coroutine_fn backup_run(void *opaque)
              qemu_coroutine_yield();
              job->common.busy = true;
          }
+    } else if (job->sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) {
+        /* Dirty Bitmap sync has a slightly different iteration method */
+        HBitmapIter hbi;
+        int64_t sector;
+        int64_t cluster;
+        bool polyrhythmic;
+
+        bdrv_dirty_iter_init(bs, job->sync_bitmap, &hbi);
+        /* Does the granularity happen to match our backup cluster size? */
+        polyrhythmic = (job->sync_bitmap_gran != BACKUP_CLUSTER_SIZE);
+
+        /* Find the next dirty /sector/ and copy that /cluster/ */
+        while ((sector = hbitmap_iter_next(&hbi)) != -1) {
+            if (yield_and_check(job)) {
+                goto leave;
+            }
+            cluster = sector / BACKUP_SECTORS_PER_CLUSTER;
+
+            do {
+                ret = backup_do_cow(bs, cluster * BACKUP_SECTORS_PER_CLUSTER,
+                                    BACKUP_SECTORS_PER_CLUSTER, 
&error_is_read);
+                if ((ret < 0) &&
+                    backup_error_action(job, error_is_read, -ret) ==
+                    BLOCK_ERROR_ACTION_REPORT) {
+                    goto leave;
+                }
+            } while (ret < 0);
+
+            /* Advance (or rewind) our iterator if we need to. */
+            if (polyrhythmic) {
+                bdrv_dirty_iter_set(&hbi,
+                                    (cluster + 1) * 
BACKUP_SECTORS_PER_CLUSTER);
+            }
+        }
      } else {
          /* Both FULL and TOP SYNC_MODE's require copying.. */
          for (; start < end; start++) {

There is an empty line remaining after the next line (I'm not writing this directly below or above that empty line because that would look strange), we could remove it.

-            bool error_is_read;
- if (block_job_is_cancelled(&job->common)) {
-                break;
-            }
-
-            /* we need to yield so that qemu_aio_flush() returns.
-             * (without, VM does not reboot)
-             */
-            if (job->common.speed) {
-                uint64_t delay_ns = ratelimit_calculate_delay(
-                        &job->limit, job->sectors_read);
-                job->sectors_read = 0;
-                block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 
delay_ns);
-            } else {
-                block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);
-            }
-
-            if (block_job_is_cancelled(&job->common)) {
+            if (yield_and_check(job)) {
                  break;
              }
@@ -351,12 +397,16 @@ static void coroutine_fn backup_run(void *opaque)
          }
      }
+leave:
      notifier_with_return_remove(&before_write);
/* wait until pending backup_do_cow() calls have completed */
      qemu_co_rwlock_wrlock(&job->flush_rwlock);
      qemu_co_rwlock_unlock(&job->flush_rwlock);
+ if (job->sync_bitmap) {
+        bdrv_release_dirty_bitmap(bs, job->sync_bitmap);
+    }
      hbitmap_free(job->bitmap);
bdrv_iostatus_disable(target);
@@ -368,12 +418,15 @@ static void coroutine_fn backup_run(void *opaque)
void backup_start(BlockDriverState *bs, BlockDriverState *target,
                    int64_t speed, MirrorSyncMode sync_mode,
+                  BdrvDirtyBitmap *sync_bitmap,
+                  BitmapUseMode bitmap_mode,
                    BlockdevOnError on_source_error,
                    BlockdevOnError on_target_error,
                    BlockCompletionFunc *cb, void *opaque,
                    Error **errp)
  {
      int64_t len;
+    BdrvDirtyBitmap *original;
assert(bs);
      assert(target);
@@ -386,6 +439,35 @@ void backup_start(BlockDriverState *bs, BlockDriverState 
*target,
          return;
      }
+ if (sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) {
+        if (!sync_bitmap) {
+            error_setg(errp, "must provide a valid bitmap name for "
+                             "\"dirty-bitmap\" sync mode");
+            return;
+        }
+
+        switch (bitmap_mode) {
+        case BITMAP_USE_MODE_RESET:
+            original = sync_bitmap;
+            sync_bitmap = bdrv_copy_dirty_bitmap(bs, sync_bitmap, NULL);
+            bdrv_reset_dirty_bitmap(bs, original);
+            break;
+        case BITMAP_USE_MODE_CONSUME:
+            bdrv_dirty_bitmap_make_anon(bs, sync_bitmap);
+            break;
+        default:
+            g_assert_not_reached();
+            error_setg(errp, "Invalid BitmapUseMode (0x%02x) given to 
backup_start.", bitmap_mode);
+            return;

This looks fun. Without NDEBUG, crashing is fine, but with NDEBUG (if someone would ever be crazy enough to define that) it's just a normal error?

I'd drop the error_setg() and the return statement; and I'd also make it an abort(), but that's a different story, g_assert_not_reached() is more clear in what it's for, but abort() always works (although you'd really have to be crazy to define NDEBUG for qemu), so I'm fine with any.

Well, I'm also fine with how it's here (with error_setg() and return), if just because the g_assert_not_reached() should never ever be skipped, but it just looks funny.

+        }
+        bdrv_disable_dirty_bitmap(bs, sync_bitmap);
+    } else if (sync_bitmap) {
+        error_setg(errp,
+                   "sync_bitmap provided to backup_run, but received an 
incompatible sync_mode (0x%02x)",
+                   sync_mode);

We do have BitmapUseMode_lookup, maybe you want to use that.

+        return;
+    }
+
      len = bdrv_getlength(bs);
      if (len < 0) {
          error_setg_errno(errp, -len, "unable to get length for '%s'",
@@ -403,6 +485,12 @@ void backup_start(BlockDriverState *bs, BlockDriverState 
*target,
      job->on_target_error = on_target_error;
      job->target = target;
      job->sync_mode = sync_mode;
+    job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_DIRTY_BITMAP ?
+                       sync_bitmap : NULL;
+    if (sync_bitmap) {
+        job->sync_bitmap_gran =
+            bdrv_dirty_bitmap_granularity(bs, job->sync_bitmap);
+    }
      job->common.len = len;
      job->common.co = qemu_coroutine_create(backup_run);
      qemu_coroutine_enter(job->common.co, job);
diff --git a/block/mirror.c b/block/mirror.c
index 3633632..af91ae0 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -714,6 +714,10 @@ void mirror_start(BlockDriverState *bs, BlockDriverState 
*target,
      bool is_none_mode;
      BlockDriverState *base;
+ if (mode == MIRROR_SYNC_MODE_DIRTY_BITMAP) {
+        error_setg(errp, "Sync mode 'dirty-bitmap' not supported");
+        return;
+    }
      is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
      base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL;
      mirror_start_job(bs, target, replaces,
diff --git a/blockdev.c b/blockdev.c
index baaf902..adf841a 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -1470,6 +1470,8 @@ static void drive_backup_prepare(BlkTransactionState 
*common, Error **errp)
                       backup->sync,
                       backup->has_mode, backup->mode,
                       backup->has_speed, backup->speed,
+                     backup->has_bitmap, backup->bitmap,
+                     backup->has_bitmap_use_mode, backup->bitmap_use_mode,
                       backup->has_on_source_error, backup->on_source_error,
                       backup->has_on_target_error, backup->on_target_error,
                       &local_err);
@@ -2245,6 +2247,8 @@ void qmp_drive_backup(const char *device, const char 
*target,
                        enum MirrorSyncMode sync,
                        bool has_mode, enum NewImageMode mode,
                        bool has_speed, int64_t speed,
+                      bool has_bitmap, const char *bitmap,
+                      bool has_bitmap_use_mode, enum BitmapUseMode bitmap_mode,
                        bool has_on_source_error, BlockdevOnError 
on_source_error,
                        bool has_on_target_error, BlockdevOnError 
on_target_error,
                        Error **errp)
@@ -2252,6 +2256,7 @@ void qmp_drive_backup(const char *device, const char 
*target,
      BlockDriverState *bs;
      BlockDriverState *target_bs;
      BlockDriverState *source = NULL;
+    BdrvDirtyBitmap *bmap = NULL;
      AioContext *aio_context;
      BlockDriver *drv = NULL;
      Error *local_err = NULL;
@@ -2347,7 +2352,18 @@ void qmp_drive_backup(const char *device, const char 
*target,
bdrv_set_aio_context(target_bs, aio_context); - backup_start(bs, target_bs, speed, sync, on_source_error, on_target_error,
+    if (has_bitmap) {
+        bmap = bdrv_find_dirty_bitmap(bs, bitmap);
+        if (!bmap) {
+            error_setg(errp, "has_bitmap was set, but bitmap '%s' could not be 
found",

has_bitmap is something internal to qemu. I don't know the exact path until this point, but I think what must happen to trigger this statement is the user just gives a bitmap name which is not available. So the error message should just say "I'm so very sorry, my dearest user, but I have looked far and wide and could not locate a bitmap of that name." Or something like that.

So I'd translate "has_bitmap was set" to "A bitmap name was specified".


Aside from these small issues regarding error messages and an empty line, the code looks correct, so (although bdrv_dirty_iter_set() still looks a bit magic to me, I can't explain it):

Reviewed-by: Max Reitz <address@hidden>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]