[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC 15/24] block/block-copy: implement block_copy_async
From: |
Vladimir Sementsov-Ogievskiy |
Subject: |
[RFC 15/24] block/block-copy: implement block_copy_async |
Date: |
Fri, 15 Nov 2019 17:14:35 +0300 |
We'll need async block-copy invocation to use in backup directly.
Signed-off-by: Vladimir Sementsov-Ogievskiy <address@hidden>
---
include/block/block-copy.h | 13 +++++++++++
block/block-copy.c | 48 +++++++++++++++++++++++++++++++++++---
2 files changed, 58 insertions(+), 3 deletions(-)
diff --git a/include/block/block-copy.h b/include/block/block-copy.h
index 753fa663ac..407de7e520 100644
--- a/include/block/block-copy.h
+++ b/include/block/block-copy.h
@@ -20,7 +20,10 @@
typedef void (*ProgressBytesCallbackFunc)(int64_t bytes, void *opaque);
typedef void (*ProgressResetCallbackFunc)(void *opaque);
+typedef void (*BlockCopyAsyncCallbackFunc)(int ret, bool error_is_read,
+ void *opaque);
typedef struct BlockCopyState BlockCopyState;
+typedef struct BlockCopyCallState BlockCopyCallState;
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
int64_t cluster_size,
@@ -41,6 +44,16 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
int coroutine_fn block_copy(BlockCopyState *s, int64_t start, uint64_t bytes,
bool *error_is_read);
+/*
+ * Run block-copy in a coroutine, return state pointer. If finished early
+ * returns NULL (@cb is called anyway).
+ */
+BlockCopyCallState *block_copy_async(BlockCopyState *s,
+ int64_t offset, int64_t bytes,
+ bool ratelimit, int max_workers,
+ int64_t max_chunk,
+ BlockCopyAsyncCallbackFunc cb);
+
BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s);
void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip);
diff --git a/block/block-copy.c b/block/block-copy.c
index 4b1a0cecbd..0b41afd30d 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -32,9 +32,11 @@ typedef struct BlockCopyCallState {
BlockCopyState *s;
int64_t offset;
int64_t bytes;
+ BlockCopyAsyncCallbackFunc cb;
/* State */
bool failed;
+ bool finished;
/* OUT parameters */
bool error_is_read;
@@ -602,15 +604,17 @@ out:
static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
{
+ int ret = 0;
+
while (true) {
- int ret = block_copy_dirty_clusters(call_state);
+ ret = block_copy_dirty_clusters(call_state);
if (ret < 0) {
/*
* IO operation failed, which means the whole block_copy request
* failed.
*/
- return ret;
+ break;
}
if (ret) {
/*
@@ -634,7 +638,14 @@ static int coroutine_fn
block_copy_common(BlockCopyCallState *call_state)
}
}
- return 0;
+ if (call_state->cb) {
+ call_state->cb(ret, call_state->error_is_read,
+ call_state->s->progress_opaque);
+ }
+
+ call_state->finished = true;
+
+ return ret;
}
int coroutine_fn block_copy(BlockCopyState *s, int64_t start, uint64_t bytes,
@@ -655,6 +666,37 @@ int coroutine_fn block_copy(BlockCopyState *s, int64_t
start, uint64_t bytes,
return ret;
}
+static void coroutine_fn block_copy_async_co_entry(void *opaque)
+{
+ block_copy_common(opaque);
+}
+
+BlockCopyCallState *block_copy_async(BlockCopyState *s,
+ int64_t offset, int64_t bytes,
+ bool ratelimit, int max_workers,
+ int64_t max_chunk,
+ BlockCopyAsyncCallbackFunc cb)
+{
+ BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
+ Coroutine *co = qemu_coroutine_create(block_copy_async_co_entry,
+ call_state);
+
+ *call_state = (BlockCopyCallState) {
+ .s = s,
+ .offset = offset,
+ .bytes = bytes,
+ .cb = cb,
+ };
+
+ qemu_coroutine_enter(co);
+
+ if (call_state->finished) {
+ g_free(call_state);
+ return NULL;
+ }
+
+ return call_state;
+}
BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
{
return s->copy_bitmap;
--
2.21.0
- [RFC 00/24] backup performance: block_status + async, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 12/24] block/block-copy: move block_copy_task_create down, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 03/24] block/block-copy: factor out block_copy_find_inflight_req, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 20/24] job: call job_enter from job_user_pause, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 19/24] blockjob: add set_speed to BlockJobDriver, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 16/24] block/block-copy: add max_chunk and max_workers paramters, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 09/24] block/block-copy: alloc task on each iteration, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 15/24] block/block-copy: implement block_copy_async,
Vladimir Sementsov-Ogievskiy <=
- [RFC 23/24] python: add qemu/bench_block_job.py, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 17/24] block/block-copy: add ratelimit to block-copy, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 14/24] block/block-copy: More explicit call_state, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 18/24] block/block-copy: add block_copy_cancel, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 01/24] block/block-copy: specialcase first copy_range request, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 05/24] block/block-copy: rename start to offset in interfaces, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 24/24] python: benchmark new backup architecture, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 08/24] block/block-copy: rename in-flight requests to tasks, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 10/24] block/block-copy: add state pointer to BlockCopyTask, Vladimir Sementsov-Ogievskiy, 2019/11/15
- [RFC 06/24] block/block-copy: reduce intersecting request lock, Vladimir Sementsov-Ogievskiy, 2019/11/15