qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PATCH RFC for-2.6 1/3] HBitmap: Introduce "meta" bitmap to


From: Fam Zheng
Subject: [Qemu-block] [PATCH RFC for-2.6 1/3] HBitmap: Introduce "meta" bitmap to track bit changes
Date: Mon, 7 Dec 2015 13:59:53 +0800

The meta bitmap will have the same size and granularity as the tracked
bitmap, and upon each bit toggle, the corresponding bit in the meta
bitmap, at an identical position, will be set.

Signed-off-by: Fam Zheng <address@hidden>
---
 include/qemu/hbitmap.h |  7 +++++++
 util/hbitmap.c         | 22 ++++++++++++++++++++++
 2 files changed, 29 insertions(+)

diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
index bb94a00..09a6b06 100644
--- a/include/qemu/hbitmap.h
+++ b/include/qemu/hbitmap.h
@@ -181,6 +181,13 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap 
*hb, uint64_t first);
  */
 unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
 
+/* hbitmap_create_meta
+ * @hb: The HBitmap to operate on.
+ *
+ * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
+ */
+HBitmap *hbitmap_create_meta(HBitmap *hb);
+
 /**
  * hbitmap_iter_next:
  * @hbi: HBitmapIter to operate on.
diff --git a/util/hbitmap.c b/util/hbitmap.c
index 50b888f..3ad406e 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -81,6 +81,9 @@ struct HBitmap {
      */
     int granularity;
 
+    /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
+    HBitmap *meta;
+
     /* A number of progressively less coarse bitmaps (i.e. level 0 is the
      * coarsest).  Each bit in level N represents a word in level N+1 that
      * has a set bit, except the last level where each bit represents the
@@ -232,6 +235,7 @@ static inline bool hb_set_elem(unsigned long *elem, 
uint64_t start, uint64_t las
 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
 static void hb_set_between(HBitmap *hb, int level, uint64_t start, uint64_t 
last)
 {
+    uint64_t save_start = start;
     size_t pos = start >> BITS_PER_LEVEL;
     size_t lastpos = last >> BITS_PER_LEVEL;
     bool changed = false;
@@ -252,6 +256,9 @@ static void hb_set_between(HBitmap *hb, int level, uint64_t 
start, uint64_t last
         }
     }
     changed |= hb_set_elem(&hb->levels[level][i], start, last);
+    if (hb->meta && level == HBITMAP_LEVELS - 1 && changed) {
+        hbitmap_set(hb->meta, save_start, last - save_start + 1);
+    }
 
     /* If there was any change in this layer, we may have to update
      * the one above.
@@ -298,6 +305,7 @@ static inline bool hb_reset_elem(unsigned long *elem, 
uint64_t start, uint64_t l
 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
 static void hb_reset_between(HBitmap *hb, int level, uint64_t start, uint64_t 
last)
 {
+    uint64_t save_start = start;
     size_t pos = start >> BITS_PER_LEVEL;
     size_t lastpos = last >> BITS_PER_LEVEL;
     bool changed = false;
@@ -336,6 +344,10 @@ static void hb_reset_between(HBitmap *hb, int level, 
uint64_t start, uint64_t la
         lastpos--;
     }
 
+    if (hb->meta && level == HBITMAP_LEVELS - 1 && changed) {
+        hbitmap_set(hb->meta, save_start, last - save_start + 1);
+    }
+
     if (level > 0 && changed) {
         hb_reset_between(hb, level - 1, pos, lastpos);
     }
@@ -384,6 +396,9 @@ void hbitmap_free(HBitmap *hb)
     for (i = HBITMAP_LEVELS; i-- > 0; ) {
         g_free(hb->levels[i]);
     }
+    if (hb->meta) {
+        hbitmap_free(hb->meta);
+    }
     g_free(hb);
 }
 
@@ -493,3 +508,10 @@ bool hbitmap_merge(HBitmap *a, const HBitmap *b)
 
     return true;
 }
+
+HBitmap *hbitmap_create_meta(HBitmap *hb)
+{
+    assert(!hb->meta);
+    hb->meta = hbitmap_alloc(hb->size, hb->granularity);
+    return hb->meta;
+}
-- 
2.4.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]