qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 5/9] block: BdrvDirtyBitmap store/restore interf


From: Vladimir Sementsov-Ogievskiy
Subject: Re: [Qemu-devel] [PATCH 5/9] block: BdrvDirtyBitmap store/restore interface
Date: Wed, 14 Jan 2015 14:27:35 +0300
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:24.0) Gecko/20100101 Thunderbird/24.5.0

As in previous patch, rename store/restore to serialize/deserialize...

Hmm. In this case, isn't it be better to include serialization of granularity, name and name length in these functions?

Best regards,
Vladimir

On 11.12.2014 17:17, Vladimir Sementsov-Ogievskiy wrote:
Several functions to provide necessary access to BdrvDirtyBitmap for
block-migration.c

Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
---
  block.c               | 61 +++++++++++++++++++++++++++++++++++++++++++++++++--
  include/block/block.h | 10 +++++++++
  2 files changed, 69 insertions(+), 2 deletions(-)

diff --git a/block.c b/block.c
index 6edf1dc..7d42620 100644
--- a/block.c
+++ b/block.c
@@ -5511,8 +5511,65 @@ void bdrv_reset_dirty_bitmap(BlockDriverState *bs, 
BdrvDirtyBitmap *bitmap,
      hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
  }
-static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
-                           int nr_sectors)
+const char *bdrv_dirty_bitmap_name(const BdrvDirtyBitmap *bitmap)
+{
+    return bitmap->name;
+}
+
+uint64_t bdrv_dbm_data_size(const BdrvDirtyBitmap *bitmap, uint64_t count)
+{
+    return hbitmap_data_size(bitmap->bitmap, count);
+}
+
+void bdrv_dbm_store_data(const BdrvDirtyBitmap *bitmap, uint8_t *buf,
+                         uint64_t start, uint64_t count)
+{
+    hbitmap_store_data(bitmap->bitmap, buf, start, count);
+}
+
+void bdrv_dbm_restore_data(BdrvDirtyBitmap *bitmap, uint8_t *buf,
+                           uint64_t start, uint64_t count)
+{
+    hbitmap_restore_data(bitmap->bitmap, buf, start, count);
+}
+
+BdrvDirtyBitmap **bdrv_dbm_find_all_named(BlockDriverState *bs, int *count)
+{
+    BdrvDirtyBitmap *bm, **res, **iter;
+    assert(count);
+
+    QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
+        if (bm->name != NULL) {
+            (*count)++;
+        }
+    }
+
+    iter = res = g_malloc(sizeof(*res) * (*count));
+    QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
+        if (bm->name != NULL) {
+            *iter++ = bm;
+        }
+    }
+
+    return res;
+}
+
+void bdrv_dbm_restore_finish(void)
+{
+    BlockDriverState *bs;
+    BdrvDirtyBitmap *bm;
+
+    for (bs = bdrv_next(NULL); bs != NULL; bs = bdrv_next(bs)) {
+        QLIST_FOREACH(bm, &bs->dirty_bitmaps, list) {
+            if (bm->name != NULL) {
+                hbitmap_restore_finish(bm->bitmap);
+            }
+        }
+    }
+}
+
+void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
+                    int nr_sectors)
  {
      BdrvDirtyBitmap *bitmap;
      QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
diff --git a/include/block/block.h b/include/block/block.h
index b21233c..09eff80 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -459,6 +459,16 @@ void bdrv_dirty_iter_init(BlockDriverState *bs,
  void bdrv_dirty_iter_set(struct HBitmapIter *hbi, int64_t offset);
  int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap);
+uint64_t bdrv_dbm_data_size(const BdrvDirtyBitmap *bitmap, uint64_t count);
+void bdrv_dbm_store_data(const BdrvDirtyBitmap *bitmap, uint8_t *buf,
+                         uint64_t start, uint64_t count);
+void bdrv_dbm_restore_data(BdrvDirtyBitmap *bitmap, uint8_t *buf,
+                           uint64_t start, uint64_t count);
+bool bdrv_dbm_is_named(BdrvDirtyBitmap *bitmap);
+const char *bdrv_dirty_bitmap_name(const BdrvDirtyBitmap *bitmap);
+BdrvDirtyBitmap **bdrv_dbm_find_all_named(BlockDriverState *bs, int *count);
+void bdrv_dbm_restore_finish(void);
+
  void bdrv_enable_copy_on_read(BlockDriverState *bs);
  void bdrv_disable_copy_on_read(BlockDriverState *bs);




reply via email to

[Prev in Thread] Current Thread [Next in Thread]