[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 14/17] block: optimize access to reqs_lock
From: |
Paolo Bonzini |
Subject: |
[Qemu-devel] [PATCH 14/17] block: optimize access to reqs_lock |
Date: |
Thu, 20 Apr 2017 14:00:55 +0200 |
Hot path reqs_lock critical sections are very small; the only large critical
sections happen when a request waits for serialising requests, and these
should never happen in usual circumstances.
We do not want these small critical sections to yield in any case,
which calls for using a spinlock while writing the list. The reqs_lock
is still used to protect the individual requests' CoQueue. For this
purpose, serializing removals against concurrent walks of the request
list can use lock_unlock for efficiency and determinism.
The reqs_lock is also used to protect the flush generation counts, but
that's unrelated.
Signed-off-by: Paolo Bonzini <address@hidden>
---
block.c | 1 +
block/io.c | 25 ++++++++++++++++++++-----
include/block/block_int.h | 11 ++++++++---
3 files changed, 29 insertions(+), 8 deletions(-)
diff --git a/block.c b/block.c
index 3b2ed29..7ba6afe 100644
--- a/block.c
+++ b/block.c
@@ -234,6 +234,7 @@ BlockDriverState *bdrv_new(void)
QLIST_INIT(&bs->op_blockers[i]);
}
notifier_with_return_list_init(&bs->before_write_notifiers);
+ qemu_spin_init(&bs->reqs_list_write_lock);
qemu_co_mutex_init(&bs->reqs_lock);
bs->refcnt = 1;
bs->aio_context = qemu_get_aio_context();
diff --git a/block/io.c b/block/io.c
index 7af9d47..476807d 100644
--- a/block/io.c
+++ b/block/io.c
@@ -374,14 +374,29 @@ void bdrv_drain_all(void)
*/
static void tracked_request_end(BdrvTrackedRequest *req)
{
+ BlockDriverState *bs = req->bs;
+
if (req->serialising) {
- atomic_dec(&req->bs->serialising_in_flight);
+ atomic_dec(&bs->serialising_in_flight);
}
- qemu_co_mutex_lock(&req->bs->reqs_lock);
+ /* Note that there can be a concurrent visit while we remove the list,
+ * so we need to...
+ */
+ qemu_spin_lock(&bs->reqs_list_write_lock);
QLIST_REMOVE(req, list);
+ qemu_spin_unlock(&bs->reqs_list_write_lock);
+
+ /* ... wait for it to end before we leave. qemu_co_mutex_lock_unlock
+ * avoids cacheline bouncing in the common case of no concurrent
+ * reader.
+ */
+ qemu_co_mutex_lock_unlock(&bs->reqs_lock);
+
+ /* Now no coroutine can add itself to the wait queue, so it is
+ * safe to call qemu_co_queue_restart_all outside the reqs_lock.
+ */
qemu_co_queue_restart_all(&req->wait_queue);
- qemu_co_mutex_unlock(&req->bs->reqs_lock);
}
/**
@@ -406,9 +421,9 @@ static void tracked_request_begin(BdrvTrackedRequest *req,
qemu_co_queue_init(&req->wait_queue);
- qemu_co_mutex_lock(&bs->reqs_lock);
+ qemu_spin_lock(&bs->reqs_list_write_lock);
QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
- qemu_co_mutex_unlock(&bs->reqs_lock);
+ qemu_spin_unlock(&bs->reqs_list_write_lock);
}
static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 42b49f5..b298de8 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -78,9 +78,10 @@ typedef struct BdrvTrackedRequest {
QLIST_ENTRY(BdrvTrackedRequest) list;
Coroutine *co; /* owner, used for deadlock detection */
- CoQueue wait_queue; /* coroutines blocked on this request */
-
struct BdrvTrackedRequest *waiting_for;
+
+ /* Protected by BlockDriverState's reqs_lock. */
+ CoQueue wait_queue; /* coroutines blocked on this request */
} BdrvTrackedRequest;
struct BlockDriver {
@@ -626,11 +627,15 @@ struct BlockDriverState {
int quiesce_counter;
unsigned int write_gen; /* Current data generation */
- /* Protected by reqs_lock. */
+ /* Writes are protected by reqs_list_write_lock. Reads take
+ * reqs_lock so that removals can easily synchronize with walks.
+ */
QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
CoQueue flush_queue; /* Serializing flush queue */
bool active_flush_req; /* Flush request in flight? */
unsigned int flushed_gen; /* Flushed write generation */
+
+ QemuSpin reqs_list_write_lock;
CoMutex reqs_lock;
};
--
2.9.3
- [Qemu-devel] [PATCH for 2.10 00/17] Block layer thread safety, part 1, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 07/17] throttle-groups: do not use qemu_co_enter_next, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 06/17] block: access io_plugged with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 05/17] block: access wakeup with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 11/17] block: access write_gen with atomics, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 01/17] block: access copy_on_read with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 13/17] coroutine-lock: introduce qemu_co_mutex_lock_unlock, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 09/17] util: add stats64 module, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 14/17] block: optimize access to reqs_lock,
Paolo Bonzini <=
- [Qemu-devel] [PATCH 03/17] block: access io_limits_disabled with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 02/17] block: access quiesce_counter with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 04/17] block: access serialising_in_flight with atomic ops, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 10/17] block: use Stat64 for wr_highest_offset, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 15/17] block: introduce dirty_bitmap_mutex, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 17/17] block: make accounting thread-safe, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 08/17] throttle-groups: protect throttled requests with a CoMutex, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 12/17] block: protect tracked_requests and flush_queue with reqs_lock, Paolo Bonzini, 2017/04/20
- [Qemu-devel] [PATCH 16/17] block: protect modification of dirty bitmaps with a mutex, Paolo Bonzini, 2017/04/20