[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [Patch 1/4] [RFC] Zero Cluster Dedup, Offline dedup, q
From: |
Shahar Frank |
Subject: |
Re: [Qemu-devel] [Patch 1/4] [RFC] Zero Cluster Dedup, Offline dedup, qemu-img extentions |
Date: |
Mon, 6 Oct 2008 10:23:18 -0700 |
Basic zero dedup patch.
Signed-off-by: Shahar Frank <address@hidden>
diff --git a/block-qcow2.c b/block-qcow2.c
index 64a94bd..693cd77 100644
--- a/block-qcow2.c
+++ b/block-qcow2.c
@@ -43,8 +43,8 @@
- L2 tables have always a size of one cluster.
*/
-#define DEBUG_ALLOC
-#define DEBUG_ALLOC2
+//#define DEBUG_ALLOC
+//#define DEBUG_ALLOC2
#define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb)
#define QCOW_VERSION 2
@@ -61,14 +61,6 @@
#define REFCOUNT_SHIFT 1 /* refcount size is 2 bytes */
-int enable_zero_dedup = 1;
-
-#if defined(DEBUG_ALLOC) || defined(DEBUG_ALLOC2)
-#define DEBUG(msg, args...) fprintf(stderr, "(%d) %s: " msg "\n",
getpid(), __FUNCTION__, ##args)
-#else
-#define DEBUG(msg, args...)
-#endif
-
typedef struct QCowHeader {
uint32_t magic;
uint32_t version;
@@ -146,8 +138,7 @@ typedef struct BDRVQcowState {
uint16_t *refcount_block_cache;
int64_t free_cluster_index;
int64_t free_byte_offset;
- uint64_t zero_cluster; /* TODO: make this persistant to avoid
re-allocation of it each run */
-
+
uint32_t crypt_method; /* current crypt method, 0 if no key yet */
uint32_t crypt_method_header;
AES_KEY aes_encrypt_key;
@@ -176,8 +167,6 @@ static int64_t alloc_clusters(BlockDriverState *bs,
int64_t size);
static int64_t alloc_bytes(BlockDriverState *bs, int size);
static void free_clusters(BlockDriverState *bs,
int64_t offset, int64_t size);
-static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t
size);
-
#ifdef DEBUG_ALLOC
static void check_refcounts(BlockDriverState *bs);
#endif
@@ -767,7 +756,6 @@ static int get_cluster_table(BlockDriverState *bs,
uint64_t offset,
/* seek the the l2 offset in the l1 table */
l1_index = offset >> (s->l2_bits + s->cluster_bits);
- DEBUG("offset %"PRIx64" l1_index %d", offset, l1_index);
if (l1_index >= s->l1_size) {
ret = grow_l1_table(bs, l1_index + 1);
if (ret < 0)
@@ -875,57 +863,57 @@ static uint64_t
alloc_cluster_offset(BlockDriverState *bs,
int n_start, int n_end,
int *num)
{
- BDRVQcowState *s = bs->opaque;
- int l2_index, ret;
- uint64_t l2_offset, *l2_table, cluster_offset;
- int nb_available, nb_clusters, i, j;
- uint64_t start_sect, current;
-
- ret = get_cluster_table(bs, offset, &l2_table, &l2_offset,
&l2_index);
- if (ret == 0)
- return 0;
-
- nb_clusters = ((n_end << 9) + s->cluster_size - 1) >>
- s->cluster_bits;
- if (nb_clusters > s->l2_size - l2_index)
- nb_clusters = s->l2_size - l2_index;
-
- cluster_offset = be64_to_cpu(l2_table[l2_index]);
-
+ BDRVQcowState *s = bs->opaque;
+ int l2_index, ret;
+ uint64_t l2_offset, *l2_table, cluster_offset;
+ int nb_available, nb_clusters, i, j;
+ uint64_t start_sect, current;
+
+ ret = get_cluster_table(bs, offset, &l2_table, &l2_offset,
&l2_index);
+ if (ret == 0)
+ return 0;
+
+ nb_clusters = ((n_end << 9) + s->cluster_size - 1) >>
+ s->cluster_bits;
+ if (nb_clusters > s->l2_size - l2_index)
+ nb_clusters = s->l2_size - l2_index;
+
+ cluster_offset = be64_to_cpu(l2_table[l2_index]);
+
/* We keep all QCOW_OFLAG_COPIED clusters */
-
- if (cluster_offset & QCOW_OFLAG_COPIED) {
-
- for (i = 1; i < nb_clusters; i++) {
- current = be64_to_cpu(l2_table[l2_index + i]);
- if (cluster_offset + (i << s->cluster_bits) !=
current)
- break;
- }
- nb_clusters = i;
-
- nb_available = nb_clusters << (s->cluster_bits - 9);
- if (nb_available > n_end)
- nb_available = n_end;
-
- cluster_offset &= ~QCOW_OFLAG_COPIED;
-
- goto out;
- }
-
+
+ if (cluster_offset & QCOW_OFLAG_COPIED) {
+
+ for (i = 1; i < nb_clusters; i++) {
+ current = be64_to_cpu(l2_table[l2_index + i]);
+ if (cluster_offset + (i << s->cluster_bits) != current)
+ break;
+ }
+ nb_clusters = i;
+
+ nb_available = nb_clusters << (s->cluster_bits - 9);
+ if (nb_available > n_end)
+ nb_available = n_end;
+
+ cluster_offset &= ~QCOW_OFLAG_COPIED;
+
+ goto out;
+ }
+
/* for the moment, multiple compressed clusters are not managed */
-
- if (cluster_offset & QCOW_OFLAG_COMPRESSED)
- nb_clusters = 1;
-
+
+ if (cluster_offset & QCOW_OFLAG_COMPRESSED)
+ nb_clusters = 1;
+
/* how many available clusters ? */
-
+
i = 0;
- while (i < nb_clusters) {
-
- i++;
-
- if (!cluster_offset) {
-
+ while (i < nb_clusters) {
+
+ i++;
+
+ if (!cluster_offset) {
+
/* how many free clusters ? */
while (i < nb_clusters) {
@@ -942,72 +930,72 @@ static uint64_t
alloc_cluster_offset(BlockDriverState *bs,
} else {
/* how many contiguous clusters ? */
-
- j = 1;
- current = 0;
- while (i < nb_clusters) {
- current = be64_to_cpu(l2_table[l2_index
+ i]);
- if (cluster_offset + (j <<
s->cluster_bits) != current)
- break;
-
- i++;
- j++;
- }
-
- free_any_clusters(bs, cluster_offset, j);
- if (current)
- break;
- cluster_offset = current;
- }
- }
- nb_clusters = i;
-
+
+ j = 1;
+ current = 0;
+ while (i < nb_clusters) {
+ current = be64_to_cpu(l2_table[l2_index + i]);
+ if (cluster_offset + (j << s->cluster_bits) != current)
+ break;
+
+ i++;
+ j++;
+ }
+
+ free_any_clusters(bs, cluster_offset, j);
+ if (current)
+ break;
+ cluster_offset = current;
+ }
+ }
+ nb_clusters = i;
+
/* allocate a new cluster */
-
- cluster_offset = alloc_clusters(bs, nb_clusters *
s->cluster_size);
-
+
+ cluster_offset = alloc_clusters(bs, nb_clusters * s->cluster_size);
+
/* we must initialize the cluster content which won't be
written */
-
- nb_available = nb_clusters << (s->cluster_bits - 9);
- if (nb_available > n_end)
- nb_available = n_end;
-
+
+ nb_available = nb_clusters << (s->cluster_bits - 9);
+ if (nb_available > n_end)
+ nb_available = n_end;
+
/* copy content of unmodified sectors */
-
- start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
- if (n_start) {
- ret = copy_sectors(bs, start_sect, cluster_offset, 0,
n_start);
- if (ret < 0)
- return 0;
- }
-
- if (nb_available & (s->cluster_sectors - 1)) {
- uint64_t end = nb_available &
~(uint64_t)(s->cluster_sectors - 1);
- ret = copy_sectors(bs, start_sect + end,
- cluster_offset + (end << 9),
- nb_available - end,
- s->cluster_sectors);
- if (ret < 0)
- return 0;
- }
-
+
+ start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
+ if (n_start) {
+ ret = copy_sectors(bs, start_sect, cluster_offset, 0, n_start);
+ if (ret < 0)
+ return 0;
+ }
+
+ if (nb_available & (s->cluster_sectors - 1)) {
+ uint64_t end = nb_available & ~(uint64_t)(s->cluster_sectors -
1);
+ ret = copy_sectors(bs, start_sect + end,
+ cluster_offset + (end << 9),
+ nb_available - end,
+ s->cluster_sectors);
+ if (ret < 0)
+ return 0;
+ }
+
/* update L2 table */
-
- for (i = 0; i < nb_clusters; i++)
- l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
- (i <<
s->cluster_bits)) |
- QCOW_OFLAG_COPIED);
-
- if (bdrv_pwrite(s->hd,
- l2_offset + l2_index * sizeof(uint64_t),
- l2_table + l2_index,
- nb_clusters * sizeof(uint64_t)) !=
- nb_clusters * sizeof(uint64_t))
- return 0;
-
+
+ for (i = 0; i < nb_clusters; i++)
+ l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
+ (i << s->cluster_bits)) |
+ QCOW_OFLAG_COPIED);
+
+ if (bdrv_pwrite(s->hd,
+ l2_offset + l2_index * sizeof(uint64_t),
+ l2_table + l2_index,
+ nb_clusters * sizeof(uint64_t)) !=
+ nb_clusters * sizeof(uint64_t))
+ return 0;
+
out:
- *num = nb_available - n_start;
+ *num = nb_available - n_start;
return cluster_offset;
}
@@ -1131,174 +1119,35 @@ static int qcow_read(BlockDriverState *bs,
int64_t sector_num,
return 0;
}
-/*
- * Low level synchronous write clusters: write nb_sectors of data given
in 'buf' at loggical
- * offset 'loffset'.
- * If physical offset 'poffset' is non zero then it is used, otherwise
a logical to
- * physical mapping/allocation is done for loffset.
- */
-static int write_clusters(BlockDriverState *bs, uint64_t loffset,
uint64_t poffset,
+static int qcow_write(BlockDriverState *bs, int64_t sector_num,
const uint8_t *buf, int nb_sectors)
{
BDRVQcowState *s = bs->opaque;
- int index_in_cluster = loffset & (s->cluster_sectors - 1);
- int n_end = index_in_cluster + nb_sectors;
- int n = nb_sectors;
- int ret;
-
- if (s->crypt_method && n_end > QCOW_MAX_CRYPT_CLUSTERS *
s->cluster_sectors)
- n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
+ int ret, index_in_cluster, n;
+ uint64_t cluster_offset;
+ int n_end;
- if (!poffset)
- poffset = alloc_cluster_offset(bs, loffset << 9,
+ while (nb_sectors > 0) {
+ index_in_cluster = sector_num & (s->cluster_sectors - 1);
+ n_end = index_in_cluster + nb_sectors;
+ if (s->crypt_method &&
+ n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
+ n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
+ cluster_offset = alloc_cluster_offset(bs, sector_num << 9,
index_in_cluster,
n_end, &n);
- if (!poffset)
- return -1;
- if (s->crypt_method) {
- encrypt_sectors(s, loffset, s->cluster_data, buf, n, 1,
- &s->aes_encrypt_key);
- ret = bdrv_pwrite(s->hd, poffset + index_in_cluster * 512,
- s->cluster_data, n * 512);
- } else {
- ret = bdrv_pwrite(s->hd, poffset + index_in_cluster * 512, buf,
n * 512);
- }
- if (ret != n * 512)
- return -1;
- return 0;
-}
-
-/*
- * dedup_clusters: remap logical offset(s) starting at 'loffset' up to
'loffset' + 'nclusters'
- * (not including) to physical offset 'poffset' (many to one).
- * The old physical cluster(s) will be freed if it is not the same as
the new one.
- * Returns the number of remapped clusters, or -1 in case of errors.
- * Note that if the given poffset is zero, then the given cluster will
be just deallocated,
- * and a 'hole' is created instead. If a backing file exists the
backing files clusters
- * may be read through the hole.
- */
-static int dedup_clusters(BlockDriverState *bs, uint64_t loffset,
uint64_t poffset, int nclusters)
-{
- uint64_t l2_offset, base_l2_offset = 0, ooffset;
- uint64_t *l2_table, *base_l2_table = 0;
- BDRVQcowState *s = bs->opaque;
- int l2_index, base_l2_index = 0;
- int n, entries = 0, remapped = 0;
-
- DEBUG("%"PRIx64" to %"PRIx64" (%d)", loffset, poffset, nclusters);
- for (remapped = 0; remapped < nclusters; remapped += entries) {
- for (entries = 0, n = 0 ; remapped + entries < nclusters;
entries++, loffset += s->cluster_size) {
- if (get_cluster_table(bs, loffset, &l2_table, &l2_offset,
&l2_index) == 0) {
- nclusters = remapped; /* do not attempt to do more */
- DEBUG("get cluster table failed");
- break; /* goto to l2 table / ref
update path */
- }
-
- if (!entries) {
- base_l2_index = l2_index;
- base_l2_offset = l2_offset;
- base_l2_table = l2_table;
- } else if (l2_offset != base_l2_offset) {
- DEBUG("got new l2offset (%"PRIx64" != %"PRIx64")",
l2_offset, base_l2_offset);
- break;
- }
-
- ooffset = be64_to_cpu(l2_table[l2_index]);
- if (ooffset == poffset) {
- continue; /* nothing to do, already mapped to
poffset */
- }
-
- if (ooffset) {
- DEBUG("free old offset %"PRIx64" (%"PRIx64")", ooffset,
ooffset & ~QCOW_OFLAG_COPIED);
- free_any_clusters(bs, ooffset & ~QCOW_OFLAG_COPIED, 1);
- }
- /* update L2 table */
- base_l2_table[l2_index] = cpu_to_be64(poffset);
- n++; /* target page ref increase count */
- }
-
- if (n == 0)
- continue;
-
- if (poffset)
- update_refcount(bs, poffset, s->cluster_size, n);
-
- /* calculate the number of entries we have to update */
- if (bdrv_pwrite(s->hd,
- base_l2_offset + base_l2_index *
sizeof(uint64_t),
- base_l2_table + base_l2_index, entries *
sizeof(uint64_t)) != entries * sizeof(uint64_t))
+ if (!cluster_offset)
return -1;
-
- }
- DEBUG("remapped %d", remapped);
- return remapped;
-}
-
-/*
- * Attempt to optimize data writing. Currently only zero cluster dedup
is supported.
- * 'sector_num' is the loggical offset of the data in sectors.
- * 'nb_sectors' (in param) is the maximux number of sectors to dedup.
- * 'nb_sectors' (out param) is the number of sectors acctually
handeled.
- * The function returns zero if no dedup can be done, negative number
on errors,
- * and positive number if the data was deduped.
- */
-static int optimize_write(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int *nb_sectors)
-{
- BDRVQcowState *s = bs->opaque;
- uint64_t loffset = sector_num << 9, poffset;
- int n;
-
- DEBUG("%s 0x%"PRIx64" %d", bs->filename, loffset, *nb_sectors);
- if (!enable_zero_dedup)
- return 0;
-
- if ((sector_num & (s->cluster_sectors - 1))) /* dedup only cluster
aligend IO */
- return 0;
-
- for (n = 0; n < *nb_sectors; n += s->cluster_sectors) {
- if (*nb_sectors - n < s->cluster_sectors ||
/* no support for partial cluster IO */
- qemu_is_not_zero(buf + 512 * n, s->cluster_size)) /* only
zero dedup for now */
- break;
- }
- if (!n)
- return 0;
-
- DEBUG("%s zero dedup 0x%"PRIx64" n %d", bs->filename, loffset, n);
- *nb_sectors = n;
-
- /*
- * Allocate a zero cluster if we don't have one, and we have a
backing file.
- * If no backing file is used, then we can just deallocate the
cluster. This
- * can be achieved by passing poffset=0 to dedup_clusters (see
above).
- */
- if (!s->zero_cluster && bs->backing_hd) {
- /*
- * create new zero page. It is created without any ref
because
- * the remapped cluster will ref it.
- */
- if (!(poffset = alloc_clusters_noref(bs, s->cluster_size)) ||
- write_clusters(bs, loffset, poffset, buf,
s->cluster_sectors) < 0)
- return 0;
- DEBUG("creating new zero page at 0x%"PRIx64"", poffset);
- s->zero_cluster = poffset; /* TODO: save it in persistant
storage */
- }
-
- return dedup_clusters(bs, loffset, s->zero_cluster, n /
s->cluster_sectors);
-}
-
-static int qcow_write(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int nb_sectors)
-{
- BDRVQcowState *s = bs->opaque;
- int n;
-
- while (nb_sectors > 0) {
- n = nb_sectors;
- if (optimize_write(bs, sector_num, buf, &n) <= 0 &&
- write_clusters(bs, sector_num << 9, 0, buf, n) < 0)
+ if (s->crypt_method) {
+ encrypt_sectors(s, sector_num, s->cluster_data, buf, n, 1,
+ &s->aes_encrypt_key);
+ ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster
* 512,
+ s->cluster_data, n * 512);
+ } else {
+ ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster
* 512, buf, n * 512);
+ }
+ if (ret != n * 512)
return -1;
-
nb_sectors -= n;
sector_num += n;
buf += n * 512;
@@ -1445,7 +1294,6 @@ static void qcow_aio_write_cb(void *opaque, int
ret)
acb->hd_aiocb = NULL;
- DEBUG("<<");
if (ret < 0) {
fail:
acb->common.cb(acb->common.opaque, ret);
@@ -1453,24 +1301,15 @@ static void qcow_aio_write_cb(void *opaque, int
ret)
return;
}
- do {
- acb->nb_sectors -= acb->n;
- acb->sector_num += acb->n;
- acb->buf += acb->n * 512;
-
- if (acb->nb_sectors == 0) {
- /* request completed */
- acb->common.cb(acb->common.opaque, 0);
- qemu_aio_release(acb);
- return;
- }
- acb->n = acb->nb_sectors;
- DEBUG("! n %d sec %d", acb->n, acb->nb_sectors);
- } while ((ret = optimize_write(bs, acb->sector_num, acb->buf,
&acb->n)) > 0);
+ acb->nb_sectors -= acb->n;
+ acb->sector_num += acb->n;
+ acb->buf += acb->n * 512;
- if (ret < 0) {
- ret = -EIO;
- goto fail;
+ if (acb->nb_sectors == 0) {
+ /* request completed */
+ acb->common.cb(acb->common.opaque, 0);
+ qemu_aio_release(acb);
+ return;
}
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
@@ -2329,7 +2168,7 @@ static int64_t
alloc_clusters_noref(BlockDriverState *bs, int64_t size)
{
BDRVQcowState *s = bs->opaque;
int i, nb_clusters;
-
+
nb_clusters = (size + s->cluster_size - 1) >> s->cluster_bits;
for(;;) {
if (get_refcount(bs, s->free_cluster_index) == 0) {
@@ -2340,7 +2179,7 @@ static int64_t
alloc_clusters_noref(BlockDriverState *bs, int64_t size)
s->free_cluster_index++;
}
#ifdef DEBUG_ALLOC2
- printf("alloc_clusters: size=%"PRId64" -> %"PRId64"\n",
+ printf("alloc_clusters: size=%lld -> %lld\n",
size,
(s->free_cluster_index - nb_clusters) <<
s->cluster_bits);
#endif
@@ -2531,11 +2370,8 @@ static int
update_cluster_refcount(BlockDriverState *bs,
refcount += addend;
if (refcount < 0 || refcount > 0xffff)
return -EINVAL;
- if (refcount == 0) {
- if (cluster_index < s->free_cluster_index)
- s->free_cluster_index = cluster_index;
- if (cluster_index == s->zero_cluster)
- s->zero_cluster = 0;
+ if (refcount == 0 && cluster_index < s->free_cluster_index) {
+ s->free_cluster_index = cluster_index;
}
s->refcount_block_cache[block_index] = cpu_to_be16(refcount);
if (bdrv_pwrite(s->hd,
@@ -2553,7 +2389,7 @@ static void update_refcount(BlockDriverState *bs,
int64_t start, last, cluster_offset;
#ifdef DEBUG_ALLOC2
- printf("update_refcount: offset=%"PRId64" size=%"PRId64"
addend=%d\n",
+ printf("update_refcount: offset=%lld size=%lld addend=%d\n",
offset, length, addend);
#endif
if (length <= 0)
@@ -2708,7 +2544,7 @@ static void check_refcounts(BlockDriverState *bs)
sn->l1_table_offset, sn->l1_size, 0);
}
inc_refcounts(bs, refcount_table, nb_clusters,
- s->snapshots_offset, s->snapshots_size);
+ s->snapshots_offset, s->snapshots_size);
/* refcount data */
inc_refcounts(bs, refcount_table, nb_clusters,
diff --git a/block.c b/block.c
index e0383a8..27b39d6 100644
--- a/block.c
+++ b/block.c
@@ -58,17 +58,6 @@ BlockDriverState *bdrv_first;
static BlockDriver *first_drv;
-int qemu_is_not_zero(const uint8_t *buf, int len)
-{
- int i;
- len >>= 2;
- for(i = 0;i < len; i++) {
- if (((uint32_t *)buf)[i] != 0)
- return 1;
- }
- return 0;
-}
-
int path_is_absolute(const char *path)
{
const char *p;
diff --git a/block_int.h b/block_int.h
index 062b95f..7f1a514 100644
--- a/block_int.h
+++ b/block_int.h
@@ -145,8 +145,6 @@ void *qemu_aio_get(BlockDriverState *bs,
BlockDriverCompletionFunc *cb,
void *opaque);
void qemu_aio_release(void *p);
-int qemu_is_not_zero(const uint8_t *buf, int len);
-
extern BlockDriverState *bdrv_first;
#endif /* BLOCK_INT_H */
diff --git a/qemu-img.c b/qemu-img.c
index 727ce91..70c2403 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -74,7 +74,7 @@ static void help(void)
" '-c' indicates that target image must be compressed (qcow
format only)\n"
" '-e' indicates that the target image must be encrypted
(qcow format only)\n"
" '-6' indicates that the target image must use
compatibility level 6 (vmdk format only)\n"
- );
+ );
printf("\nSupported format:");
bdrv_iterate_format(format_print, NULL);
printf("\n");
@@ -343,6 +343,17 @@ static int img_commit(int argc, char **argv)
return 0;
}
+static int is_not_zero(const uint8_t *sector, int len)
+{
+ int i;
+ len >>= 2;
+ for(i = 0;i < len; i++) {
+ if (((uint32_t *)sector)[i] != 0)
+ return 1;
+ }
+ return 0;
+}
+
/*
* Returns true iff the first sector pointed to by 'buf' contains at
least
* a non-NUL byte.
@@ -358,10 +369,10 @@ static int is_allocated_sectors(const uint8_t
*buf, int n, int *pnum)
*pnum = 0;
return 0;
}
- v = qemu_is_not_zero(buf, 512);
+ v = is_not_zero(buf, 512);
for(i = 1; i < n; i++) {
buf += 512;
- if (v != qemu_is_not_zero(buf, 512))
+ if (v != is_not_zero(buf, 512))
break;
}
*pnum = i;
@@ -516,7 +527,7 @@ static int img_convert(int argc, char **argv)
if (n < cluster_sectors)
memset(buf + n * 512, 0, cluster_size - n * 512);
- if (qemu_is_not_zero(buf, cluster_size)) {
+ if (is_not_zero(buf, cluster_size)) {
if (bdrv_write_compressed(out_bs, sector_num, buf,
cluster_sectors) != 0)
error("error while compressing sector %" PRId64,
1-zerodedup.patch
Description: 1-zerodedup.patch
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- Re: [Qemu-devel] [Patch 1/4] [RFC] Zero Cluster Dedup, Offline dedup, qemu-img extentions,
Shahar Frank <=