[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH V6 1/9] accel/kvm: Extract common KVM vCPU {creation,parking}
|
From: |
Igor Mammedov |
|
Subject: |
Re: [PATCH V6 1/9] accel/kvm: Extract common KVM vCPU {creation,parking} code |
|
Date: |
Fri, 27 Oct 2023 14:56:52 +0200 |
On Fri, 13 Oct 2023 11:51:21 +0100
Salil Mehta <salil.mehta@huawei.com> wrote:
> KVM vCPU creation is done once during the initialization of the VM when Qemu
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> thread is spawned. This is common to all the architectures.
is it really true fox x86?
>
> Hot-unplug of vCPU results in destruction of the vCPU object in QOM but the
> corresponding KVM vCPU object in the Host KVM is not destroyed and its
^
since KVM doesn't support vCPU removal
> representative KVM vCPU object/context in Qemu is parked.
>
> Refactor common logic so that some APIs could be reused by vCPU Hotplug code.
'reused' part doesn't happen within this series. So a reason
why patch exists is not clear/no one can deduce the reason
without the actual user here.
Suggest to move it to a series that actually will use this patch.
> Update new/old APIs with trace events instead of DTRACE.
>
> Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
> Reviewed-by: Gavin Shan <gshan@redhat.com>
> Tested-by: Vishnu Pajjuri <vishnu@os.amperecomputing.com>
> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
> Tested-by: Xianglai Li <lixianglai@loongson.cn>
> ---
> accel/kvm/kvm-all.c | 64 ++++++++++++++++++++++++++++++++----------
> accel/kvm/trace-events | 4 +++
> include/sysemu/kvm.h | 16 +++++++++++
> 3 files changed, 69 insertions(+), 15 deletions(-)
>
> diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
> index 72e1d1141c..bfa7816aaa 100644
> --- a/accel/kvm/kvm-all.c
> +++ b/accel/kvm/kvm-all.c
> @@ -137,6 +137,7 @@ static QemuMutex kml_slots_lock;
> #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock)
>
> static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
> +static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id);
>
> static inline void kvm_resample_fd_remove(int gsi)
> {
> @@ -320,14 +321,53 @@ err:
> return ret;
> }
>
> +void kvm_park_vcpu(CPUState *cpu)
> +{
> + struct KVMParkedVcpu *vcpu;
> +
> + trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
> +
> + vcpu = g_malloc0(sizeof(*vcpu));
> + vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
> + vcpu->kvm_fd = cpu->kvm_fd;
> + QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
> +}
> +
> +int kvm_create_vcpu(CPUState *cpu)
> +{
> + unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
> + KVMState *s = kvm_state;
> + int kvm_fd;
> +
> + trace_kvm_create_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
> +
> + /* check if the KVM vCPU already exist but is parked */
> + kvm_fd = kvm_get_vcpu(s, vcpu_id);
> + if (kvm_fd < 0) {
> + /* vCPU not parked: create a new KVM vCPU */
> + kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
> + if (kvm_fd < 0) {
> + error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu",
> vcpu_id);
> + return kvm_fd;
> + }
> + }
> +
> + cpu->kvm_fd = kvm_fd;
> + cpu->kvm_state = s;
> + cpu->vcpu_dirty = true;
> + cpu->dirty_pages = 0;
> + cpu->throttle_us_per_full = 0;
> +
> + return 0;
> +}
> +
> static int do_kvm_destroy_vcpu(CPUState *cpu)
> {
> KVMState *s = kvm_state;
> long mmap_size;
> - struct KVMParkedVcpu *vcpu = NULL;
> int ret = 0;
>
> - DPRINTF("kvm_destroy_vcpu\n");
> + trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
>
> ret = kvm_arch_destroy_vcpu(cpu);
> if (ret < 0) {
> @@ -353,10 +393,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu)
> }
> }
>
> - vcpu = g_malloc0(sizeof(*vcpu));
> - vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
> - vcpu->kvm_fd = cpu->kvm_fd;
> - QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
> + kvm_park_vcpu(cpu);
> err:
> return ret;
> }
> @@ -377,6 +414,8 @@ static int kvm_get_vcpu(KVMState *s, unsigned long
> vcpu_id)
> if (cpu->vcpu_id == vcpu_id) {
> int kvm_fd;
>
> + trace_kvm_get_vcpu(vcpu_id);
> +
> QLIST_REMOVE(cpu, node);
> kvm_fd = cpu->kvm_fd;
> g_free(cpu);
> @@ -384,7 +423,7 @@ static int kvm_get_vcpu(KVMState *s, unsigned long
> vcpu_id)
> }
> }
>
> - return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
> + return -ENOENT;
> }
>
> int kvm_init_vcpu(CPUState *cpu, Error **errp)
> @@ -395,19 +434,14 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
>
> trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
>
> - ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
> + ret = kvm_create_vcpu(cpu);
> if (ret < 0) {
> - error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed
> (%lu)",
> + error_setg_errno(errp, -ret,
> + "kvm_init_vcpu: kvm_create_vcpu failed (%lu)",
> kvm_arch_vcpu_id(cpu));
> goto err;
> }
>
> - cpu->kvm_fd = ret;
> - cpu->kvm_state = s;
> - cpu->vcpu_dirty = true;
> - cpu->dirty_pages = 0;
> - cpu->throttle_us_per_full = 0;
> -
> mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
> if (mmap_size < 0) {
> ret = mmap_size;
> diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events
> index 399aaeb0ec..cdd0c95c09 100644
> --- a/accel/kvm/trace-events
> +++ b/accel/kvm/trace-events
> @@ -9,6 +9,10 @@ kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d,
> type 0x%x, arg %p"
> kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to
> retrieve ONEREG %" PRIu64 " from KVM: %s"
> kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set
> ONEREG %" PRIu64 " to KVM: %s"
> kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
> +kvm_create_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
> +kvm_get_vcpu(unsigned long arch_cpu_id) "id: %lu"
> +kvm_destroy_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id:
> %lu"
> +kvm_park_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
> kvm_irqchip_commit_routes(void) ""
> kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector
> %d virq %d"
> kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
> diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
> index ee9025f8e9..8137e6a44c 100644
> --- a/include/sysemu/kvm.h
> +++ b/include/sysemu/kvm.h
> @@ -465,6 +465,22 @@ void kvm_set_sigmask_len(KVMState *s, unsigned int
> sigmask_len);
> int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
> hwaddr *phys_addr);
>
> +/**
> + * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
> + * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created.
> + *
> + * @returns: 0 when success, errno (<0) when failed.
> + */
> +int kvm_create_vcpu(CPUState *cpu);
> +
> +/**
> + * kvm_park_vcpu - Park QEMU KVM vCPU context
> + * @cpu: QOM CPUState object for which QEMU KVM vCPU context has to be
> parked.
> + *
> + * @returns: none
> + */
> +void kvm_park_vcpu(CPUState *cpu);
> +
> #endif /* NEED_CPU_H */
>
> void kvm_cpu_synchronize_state(CPUState *cpu);
- [PATCH V6 0/9] Add architecture agnostic code to support vCPU Hotplug, Salil Mehta, 2023/10/13
- [PATCH V6 1/9] accel/kvm: Extract common KVM vCPU {creation, parking} code, Salil Mehta, 2023/10/13
- Re: [PATCH V6 1/9] accel/kvm: Extract common KVM vCPU {creation,parking} code,
Igor Mammedov <=
- [PATCH V6 2/9] hw/acpi: Move CPU ctrl-dev MMIO region len macro to common header file, Salil Mehta, 2023/10/13
- [PATCH V6 3/9] hw/acpi: Add ACPI CPU hotplug init stub, Salil Mehta, 2023/10/13
- [PATCH V6 4/9] hw/acpi: Init GED framework with CPU hotplug events, Salil Mehta, 2023/10/13
- [PATCH V6 5/9] hw/acpi: Update CPUs AML with cpu-(ctrl)dev change, Salil Mehta, 2023/10/13
- [PATCH V6 6/9] hw/acpi: Update GED _EVT method AML with CPU scan, Salil Mehta, 2023/10/13