qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-block] [PATCH v2 02/13] block: Introduce bdrv_lock and bdrv_unlock


From: Fam Zheng
Subject: [Qemu-block] [PATCH v2 02/13] block: Introduce bdrv_lock and bdrv_unlock API
Date: Tue, 2 Jun 2015 11:21:51 +0800

For various purposes, BDS users call bdrv_drain or bdrv_drain_all to make sure
there are no pending requests during a series of operations on the BDS. But in
the middle of operations, the caller may 1) yield from a coroutine (mirror_run);
2) defer the next part of work to a BH (mirror_run); 3) call nested aio_poll
(qmp_transaction); etc..

This lock/unlock API is introduced to help assure above complications won't
spoil the purpose of the bdrv_drain(): bdrv_lock should help quiesce other
readers and writers in the beginning of such operations, and bdrv_unlock should
resume the blocked requests.

This patch only adds the API, so that some bdrv_drain() callers can
already switch to it without behavior change. Fleshing up of the
functions will follow.

Signed-off-by: Fam Zheng <address@hidden>
---
 block.c                   |  4 ++++
 block/io.c                | 12 ++++++++++++
 include/block/block.h     | 14 ++++++++++++++
 include/block/block_int.h |  2 ++
 4 files changed, 32 insertions(+)

diff --git a/block.c b/block.c
index b589506..0e25dbd 100644
--- a/block.c
+++ b/block.c
@@ -1716,6 +1716,7 @@ void bdrv_close(BlockDriverState *bs)
 {
     BdrvAioNotifier *ban, *ban_next;
 
+    assert(bs->lock_level == 0);
     if (bs->job) {
         block_job_cancel_sync(bs->job);
     }
@@ -1850,6 +1851,9 @@ static void bdrv_move_feature_fields(BlockDriverState 
*bs_dest,
     bs_dest->device_list = bs_src->device_list;
     bs_dest->blk = bs_src->blk;
 
+    /* lock */
+    bs_dest->lock_level = bs_src->lock_level;
+
     memcpy(bs_dest->op_blockers, bs_src->op_blockers,
            sizeof(bs_dest->op_blockers));
 }
diff --git a/block/io.c b/block/io.c
index e394d92..49060e5 100644
--- a/block/io.c
+++ b/block/io.c
@@ -2601,3 +2601,15 @@ void bdrv_flush_io_queue(BlockDriverState *bs)
         bdrv_flush_io_queue(bs->file);
     }
 }
+
+void bdrv_lock(BlockDriverState *bs)
+{
+    bs->lock_level++;
+    bdrv_drain(bs);
+}
+
+void bdrv_unlock(BlockDriverState *bs)
+{
+    bs->lock_level--;
+    assert(bs->lock_level >= 0);
+}
diff --git a/include/block/block.h b/include/block/block.h
index f7680b6..b49194d 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -592,6 +592,20 @@ void bdrv_io_plug(BlockDriverState *bs);
 void bdrv_io_unplug(BlockDriverState *bs);
 void bdrv_flush_io_queue(BlockDriverState *bs);
 
+/**
+ * bdrv_lock:
+ *
+ * Begin a temporary exclusive accessing by locking the BDS.
+ */
+void bdrv_lock(BlockDriverState *bs);
+
+/**
+ * bdrv_unlock:
+ *
+ * End a exclusive accessing.
+ */
+void bdrv_unlock(BlockDriverState *bs);
+
 BlockAcctStats *bdrv_get_stats(BlockDriverState *bs);
 
 #endif
diff --git a/include/block/block_int.h b/include/block/block_int.h
index f004378..9f75d46 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -433,6 +433,8 @@ struct BlockDriverState {
     /* threshold limit for writes, in bytes. "High water mark". */
     uint64_t write_threshold_offset;
     NotifierWithReturn write_threshold_notifier;
+
+    int lock_level;
 };
 
 
-- 
2.4.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]