qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC V6 24/33] qcow2: Integrate deduplication in qcow2_co_w


From: Benoît Canet
Subject: [Qemu-devel] [RFC V6 24/33] qcow2: Integrate deduplication in qcow2_co_writev loop.
Date: Wed, 6 Feb 2013 13:31:57 +0100

Signed-off-by: Benoit Canet <address@hidden>
---
 block/qcow2.c |   87 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 85 insertions(+), 2 deletions(-)

diff --git a/block/qcow2.c b/block/qcow2.c
index 9cbb2f0..f39b6d5 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -331,6 +331,7 @@ static int qcow2_open(BlockDriverState *bs, int flags)
     QCowHeader header;
     uint64_t ext_end;
 
+    s->has_dedup = false;
     ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
     if (ret < 0) {
         goto fail;
@@ -793,13 +794,18 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState 
*bs,
     BDRVQcowState *s = bs->opaque;
     int index_in_cluster;
     int n_end;
-    int ret;
+    int ret = 0;
     int cur_nr_sectors; /* number of sectors in current iteration */
     uint64_t cluster_offset;
     QEMUIOVector hd_qiov;
     uint64_t bytes_done = 0;
     uint8_t *cluster_data = NULL;
     QCowL2Meta *l2meta = NULL;
+    uint8_t *dedup_cluster_data = NULL;
+    int dedup_cluster_data_nr;
+    int deduped_sectors_nr;
+    QCowDedupState ds;
+    bool atomic_dedup_is_running;
 
     trace_qcow2_writev_start_req(qemu_coroutine_self(), sector_num,
                                  remaining_sectors);
@@ -810,13 +816,70 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState 
*bs,
 
     qemu_co_mutex_lock(&s->lock);
 
+    atomic_dedup_is_running = qcow2_dedup_is_running(bs);
+    if (atomic_dedup_is_running) {
+        QTAILQ_INIT(&ds.undedupables);
+        ds.phash.reuse = false;
+        ds.nb_undedupable_sectors = 0;
+        ds.nb_clusters_processed = 0;
+
+        /* if deduplication is on we make sure dedup_cluster_data
+         * contains a multiple of cluster size of data in order
+         * to compute the hashes
+         */
+        ret = qcow2_dedup_read_missing_and_concatenate(bs,
+                                                       qiov,
+                                                       sector_num,
+                                                       remaining_sectors,
+                                                       &dedup_cluster_data,
+                                                       &dedup_cluster_data_nr);
+
+        if (ret < 0) {
+            goto fail;
+        }
+    }
+
     while (remaining_sectors != 0) {
 
         l2meta = NULL;
 
         trace_qcow2_writev_start_part(qemu_coroutine_self());
+
+        if (atomic_dedup_is_running && ds.nb_undedupable_sectors == 0) {
+            /* Try to deduplicate as much clusters as possible */
+            deduped_sectors_nr = qcow2_dedup(bs,
+                                             &ds,
+                                             sector_num,
+                                             dedup_cluster_data,
+                                             dedup_cluster_data_nr);
+
+            if (deduped_sectors_nr < 0) {
+                goto fail;
+            }
+
+            remaining_sectors -= deduped_sectors_nr;
+            sector_num += deduped_sectors_nr;
+            bytes_done += deduped_sectors_nr * 512;
+
+            /* no more data to write -> exit */
+            if (remaining_sectors <= 0) {
+                goto fail;
+            }
+
+            /* if we deduped something trace it */
+            if (deduped_sectors_nr) {
+                trace_qcow2_writev_done_part(qemu_coroutine_self(),
+                                             deduped_sectors_nr);
+                trace_qcow2_writev_start_part(qemu_coroutine_self());
+            }
+        }
+
         index_in_cluster = sector_num & (s->cluster_sectors - 1);
-        n_end = index_in_cluster + remaining_sectors;
+        n_end = atomic_dedup_is_running &&
+                ds.nb_undedupable_sectors < remaining_sectors ?
+                index_in_cluster + ds.nb_undedupable_sectors :
+                index_in_cluster + remaining_sectors;
+
         if (s->crypt_method &&
             n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) {
             n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
@@ -852,6 +915,24 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState 
*bs,
                 cur_nr_sectors * 512);
         }
 
+        /* Write the non duplicated clusters hashes to disk */
+        if (atomic_dedup_is_running) {
+            int count = cur_nr_sectors / s->cluster_sectors;
+            int has_ending = ((cluster_offset >> 9) + index_in_cluster +
+                             cur_nr_sectors) & (s->cluster_sectors - 1);
+            count = index_in_cluster ? count + 1 : count;
+            count = has_ending ? count + 1 : count;
+            ret = qcow2_dedup_store_new_hashes(bs,
+                                               &ds,
+                                               count,
+                                               sector_num,
+                                               (cluster_offset >> 9));
+            if (ret < 0) {
+                goto fail;
+            }
+        }
+
+        BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
         qemu_co_mutex_unlock(&s->lock);
         BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
         trace_qcow2_writev_data(qemu_coroutine_self(),
@@ -883,6 +964,7 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState 
*bs,
             l2meta = NULL;
         }
 
+        ds.nb_undedupable_sectors -= cur_nr_sectors;
         remaining_sectors -= cur_nr_sectors;
         sector_num += cur_nr_sectors;
         bytes_done += cur_nr_sectors * 512;
@@ -903,6 +985,7 @@ fail:
 
     qemu_iovec_destroy(&hd_qiov);
     qemu_vfree(cluster_data);
+    qemu_vfree(dedup_cluster_data);
     trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
 
     return ret;
-- 
1.7.10.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]