[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 3/4] KVM: Dynamic sized kvm memslots array
From: |
Peter Xu |
Subject: |
[PATCH 3/4] KVM: Dynamic sized kvm memslots array |
Date: |
Wed, 4 Sep 2024 15:16:34 -0400 |
Zhiyi reported an infinite loop issue in VFIO use case. The cause of that
was a separate discussion, however during that I found a regression of
dirty sync slowness when profiling.
Each KVMMemoryListerner maintains an array of kvm memslots. Currently it's
statically allocated to be the max supported by the kernel. However after
Linux commit 4fc096a99e ("KVM: Raise the maximum number of user memslots"),
the max supported memslots reported now grows to some number large enough
so that it may not be wise to always statically allocate with the max
reported.
What's worse, QEMU kvm code still walks all the allocated memslots entries
to do any form of lookups. It can drastically slow down all memslot
operations because each of such loop can run over 32K times on the new
kernels.
Fix this issue by making the memslots to be allocated dynamically.
Here the initial size was set to 16 because it should cover the basic VM
usages, so that the hope is the majority VM use case may not even need to
grow at all (e.g. if one starts a VM with ./qemu-system-x86_64 by default
it'll consume 9 memslots), however not too large to waste memory.
There can also be even better way to address this, but so far this is the
simplest and should be already better even than before we grow the max
supported memslots. For example, in the case of above issue when VFIO was
attached on a 32GB system, there are only ~10 memslots used. So it could
be good enough as of now.
In the above VFIO context, measurement shows that the precopy dirty sync
shrinked from ~86ms to ~3ms after this patch applied. It should also apply
to any KVM enabled VM even without VFIO.
Reported-by: Zhiyi Guo <zhguo@redhat.com>
Tested-by: Zhiyi Guo <zhguo@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
include/sysemu/kvm_int.h | 1 +
accel/kvm/kvm-all.c | 87 +++++++++++++++++++++++++++++++++-------
accel/kvm/trace-events | 1 +
3 files changed, 74 insertions(+), 15 deletions(-)
diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
index e5de43619e..e67b2e5a68 100644
--- a/include/sysemu/kvm_int.h
+++ b/include/sysemu/kvm_int.h
@@ -46,6 +46,7 @@ typedef struct KVMMemoryListener {
MemoryListener listener;
KVMSlot *slots;
unsigned int nr_used_slots;
+ unsigned int nr_slots_allocated;
int as_id;
QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_add;
QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_del;
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index e408dbb753..0d379606e4 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -71,6 +71,8 @@
/* Default max allowed memslots if kernel reported nothing */
#define KVM_MEMSLOTS_NUM_MAX_DEFAULT 32
+/* Default num of memslots to be allocated when VM starts */
+#define KVM_MEMSLOTS_NUM_ALLOC_DEFAULT 16
struct KVMParkedVcpu {
unsigned long vcpu_id;
@@ -168,6 +170,52 @@ void kvm_resample_fd_notify(int gsi)
}
}
+/**
+ * kvm_slots_grow(): Grow the slots[] array in the KVMMemoryListener
+ *
+ * @kml: The KVMMemoryListener* to grow the slots[] array
+ * @nr_slots_new: The new size of slots[] array
+ *
+ * Returns: True if the array grows larger, false otherwise.
+ */
+static bool kvm_slots_grow(KVMMemoryListener *kml, unsigned int nr_slots_new)
+{
+ unsigned int i, cur = kml->nr_slots_allocated;
+ KVMSlot *slots;
+
+ if (nr_slots_new > kvm_state->nr_slots_max) {
+ nr_slots_new = kvm_state->nr_slots_max;
+ }
+
+ if (cur >= nr_slots_new) {
+ /* Big enough, no need to grow, or we reached max */
+ return false;
+ }
+
+ if (cur == 0) {
+ slots = g_new0(KVMSlot, nr_slots_new);
+ } else {
+ assert(kml->slots);
+ slots = g_renew(KVMSlot, kml->slots, nr_slots_new);
+ /*
+ * g_renew() doesn't initialize extended buffers, however kvm
+ * memslots require fields to be zero-initialized. E.g. pointers,
+ * memory_size field, etc.
+ */
+ memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur));
+ }
+
+ for (i = cur; i < nr_slots_new; i++) {
+ slots[i].slot = i;
+ }
+
+ kml->slots = slots;
+ kml->nr_slots_allocated = nr_slots_new;
+ trace_kvm_slots_grow(cur, nr_slots_new);
+
+ return true;
+}
+
unsigned int kvm_get_max_memslots(void)
{
KVMState *s = KVM_STATE(current_accel());
@@ -196,15 +244,20 @@ unsigned int kvm_get_free_memslots(void)
/* Called with KVMMemoryListener.slots_lock held */
static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
{
- KVMState *s = kvm_state;
int i;
- for (i = 0; i < s->nr_slots_max; i++) {
+retry:
+ for (i = 0; i < kml->nr_slots_allocated; i++) {
if (kml->slots[i].memory_size == 0) {
return &kml->slots[i];
}
}
+ /* If no free slots, try to grow first by doubling */
+ if (kvm_slots_grow(kml, kml->nr_slots_allocated * 2)) {
+ goto retry;
+ }
+
return NULL;
}
@@ -225,10 +278,9 @@ static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener
*kml,
hwaddr start_addr,
hwaddr size)
{
- KVMState *s = kvm_state;
int i;
- for (i = 0; i < s->nr_slots_max; i++) {
+ for (i = 0; i < kml->nr_slots_allocated; i++) {
KVMSlot *mem = &kml->slots[i];
if (start_addr == mem->start_addr && size == mem->memory_size) {
@@ -270,7 +322,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void
*ram,
int i, ret = 0;
kvm_slots_lock();
- for (i = 0; i < s->nr_slots_max; i++) {
+ for (i = 0; i < kml->nr_slots_allocated; i++) {
KVMSlot *mem = &kml->slots[i];
if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
@@ -1074,7 +1126,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
kvm_slots_lock();
- for (i = 0; i < s->nr_slots_max; i++) {
+ for (i = 0; i < kml->nr_slots_allocated; i++) {
mem = &kml->slots[i];
/* Discard slots that are empty or do not overlap the section */
if (!mem->memory_size ||
@@ -1722,12 +1774,8 @@ static void kvm_log_sync_global(MemoryListener *l, bool
last_stage)
/* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
kvm_dirty_ring_flush();
- /*
- * TODO: make this faster when nr_slots_max is big while there are
- * only a few used slots (small VMs).
- */
kvm_slots_lock();
- for (i = 0; i < s->nr_slots_max; i++) {
+ for (i = 0; i < kml->nr_slots_allocated; i++) {
mem = &kml->slots[i];
if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_slot_sync_dirty_pages(mem);
@@ -1842,12 +1890,9 @@ void kvm_memory_listener_register(KVMState *s,
KVMMemoryListener *kml,
{
int i;
- kml->slots = g_new0(KVMSlot, s->nr_slots_max);
kml->as_id = as_id;
- for (i = 0; i < s->nr_slots_max; i++) {
- kml->slots[i].slot = i;
- }
+ kvm_slots_grow(kml, KVM_MEMSLOTS_NUM_ALLOC_DEFAULT);
QSIMPLEQ_INIT(&kml->transaction_add);
QSIMPLEQ_INIT(&kml->transaction_del);
@@ -2464,6 +2509,18 @@ static int kvm_init(MachineState *ms)
s->nr_slots_max = KVM_MEMSLOTS_NUM_MAX_DEFAULT;
}
+ /*
+ * A VM will at least require a few memslots to work, or it can even
+ * fail to boot. Make sure the supported value is always at least
+ * larger than what we will initially allocate.
+ */
+ if (s->nr_slots_max < KVM_MEMSLOTS_NUM_ALLOC_DEFAULT) {
+ ret = -EINVAL;
+ fprintf(stderr, "KVM max supported number of slots (%d) too small\n",
+ s->nr_slots_max);
+ goto err;
+ }
+
s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
if (s->nr_as <= 1) {
s->nr_as = 1;
diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events
index 37626c1ac5..ad2ae6fca5 100644
--- a/accel/kvm/trace-events
+++ b/accel/kvm/trace-events
@@ -36,3 +36,4 @@ kvm_io_window_exit(void) ""
kvm_run_exit_system_event(int cpu_index, uint32_t event_type) "cpu_index %d,
system_even_type %"PRIu32
kvm_convert_memory(uint64_t start, uint64_t size, const char *msg) "start 0x%"
PRIx64 " size 0x%" PRIx64 " %s"
kvm_memory_fault(uint64_t start, uint64_t size, uint64_t flags) "start 0x%"
PRIx64 " size 0x%" PRIx64 " flags 0x%" PRIx64
+kvm_slots_grow(unsigned int old, unsigned int new) "%u -> %u"
--
2.45.0
- [PATCH 0/4] KVM: Dynamic sized memslots array, Peter Xu, 2024/09/04
- [PATCH 2/4] KVM: Define KVM_MEMSLOTS_NUM_MAX_DEFAULT, Peter Xu, 2024/09/04
- [PATCH 1/4] KVM: Rename KVMState->nr_slots to nr_slots_max, Peter Xu, 2024/09/04
- [PATCH 3/4] KVM: Dynamic sized kvm memslots array,
Peter Xu <=
- Re: [PATCH 3/4] KVM: Dynamic sized kvm memslots array, David Hildenbrand, 2024/09/04
- Re: [PATCH 3/4] KVM: Dynamic sized kvm memslots array, David Hildenbrand, 2024/09/04
- Re: [PATCH 3/4] KVM: Dynamic sized kvm memslots array, Peter Xu, 2024/09/04
- Re: [PATCH 3/4] KVM: Dynamic sized kvm memslots array, David Hildenbrand, 2024/09/04
- Re: [PATCH 3/4] KVM: Dynamic sized kvm memslots array, Peter Xu, 2024/09/04
- Re: [PATCH 3/4] KVM: Dynamic sized kvm memslots array, David Hildenbrand, 2024/09/04
- Re: [PATCH 3/4] KVM: Dynamic sized kvm memslots array, Peter Xu, 2024/09/04
- Re: [PATCH 3/4] KVM: Dynamic sized kvm memslots array, Peter Xu, 2024/09/04