[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH v2 19/22] i386/xen: implement HVMOP_set_evtchn_upcall_vector
From: |
David Woodhouse |
Subject: |
[RFC PATCH v2 19/22] i386/xen: implement HVMOP_set_evtchn_upcall_vector |
Date: |
Fri, 9 Dec 2022 09:56:09 +0000 |
From: Ankur Arora <ankur.a.arora@oracle.com>
The HVMOP_set_evtchn_upcall_vector hypercall sets the per-vCPU upcall
vector, to be delivered to the local APIC just like an MSI (with an EOI).
This takes precedence over the system-wide delivery method set by the
HVMOP_set_param hypercall with HVM_PARAM_CALLBACK_IRQ. It's used by
Windows and Xen (PV shim) guests but normally not by Linux.
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
[dwmw2: Rework for upstream kernel changes and split from HVMOP_set_param]
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
target/i386/cpu.h | 1 +
target/i386/kvm/kvm.c | 7 ++++
target/i386/machine.c | 4 ++-
target/i386/trace-events | 1 +
target/i386/xen.c | 69 +++++++++++++++++++++++++++++++++++++---
target/i386/xen.h | 1 +
6 files changed, 77 insertions(+), 6 deletions(-)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index bf44a87ddb..938a1b9c8b 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1792,6 +1792,7 @@ typedef struct CPUArchState {
uint64_t xen_vcpu_info_default_gpa;
uint64_t xen_vcpu_time_info_gpa;
uint64_t xen_vcpu_runstate_gpa;
+ uint8_t xen_vcpu_callback_vector;
#endif
#if defined(CONFIG_HVF)
HVFX86LazyFlags hvf_lflags;
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index a5e67a3119..dc1b3fc502 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -4762,6 +4762,13 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
return ret;
}
}
+
+ if (x86_cpu->env.xen_vcpu_callback_vector) {
+ ret = kvm_xen_set_vcpu_callback_vector(cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
}
#endif
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 6a510e5cbd..09e13cf716 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1265,7 +1265,8 @@ static bool xen_vcpu_needed(void *opaque)
return (env->xen_vcpu_info_gpa != UINT64_MAX ||
env->xen_vcpu_info_default_gpa != UINT64_MAX ||
env->xen_vcpu_time_info_gpa != UINT64_MAX ||
- env->xen_vcpu_runstate_gpa != UINT64_MAX);
+ env->xen_vcpu_runstate_gpa != UINT64_MAX ||
+ env->xen_vcpu_callback_vector != 0);
}
static const VMStateDescription vmstate_xen_vcpu = {
@@ -1278,6 +1279,7 @@ static const VMStateDescription vmstate_xen_vcpu = {
VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU),
+ VMSTATE_UINT8(env.xen_vcpu_callback_vector, X86CPU),
VMSTATE_END_OF_LIST()
}
};
diff --git a/target/i386/trace-events b/target/i386/trace-events
index 7118640697..58e28f1a19 100644
--- a/target/i386/trace-events
+++ b/target/i386/trace-events
@@ -16,3 +16,4 @@ kvm_sev_attestation_report(const char *mnonce, const char
*data) "mnonce %s data
kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t
a1, uint64_t a2, uint64_t ret) "xen_hypercall: cpu %d cpl %d input %" PRIu64 "
a0 0x%" PRIx64 " a1 0x%" PRIx64 " a2 0x%" PRIx64" ret 0x%" PRIx64
kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64
kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type
%d gpa 0x%" PRIx64
+kvm_xen_set_vcpu_callback(int cpu, int vector) "callback vcpu %d vector %d"
diff --git a/target/i386/xen.c b/target/i386/xen.c
index 97032049e6..2583c00a6b 100644
--- a/target/i386/xen.c
+++ b/target/i386/xen.c
@@ -19,6 +19,7 @@
#include "standard-headers/xen/version.h"
#include "standard-headers/xen/memory.h"
#include "standard-headers/xen/hvm/hvm_op.h"
+#include "standard-headers/xen/hvm/params.h"
#include "standard-headers/xen/vcpu.h"
static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa,
@@ -127,7 +128,8 @@ static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit
*exit, X86CPU *cpu,
fi.submap |= 1 << XENFEAT_writable_page_tables |
1 << XENFEAT_writable_descriptor_tables |
1 << XENFEAT_auto_translated_physmap |
- 1 << XENFEAT_supervisor_mode_kernel;
+ 1 << XENFEAT_supervisor_mode_kernel |
+ 1 << XENFEAT_hvm_callback_vector;
}
err = kvm_copy_to_gva(CPU(cpu), arg, &fi, sizeof(fi));
@@ -154,6 +156,29 @@ int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type,
uint64_t gpa)
return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
}
+int kvm_xen_set_vcpu_callback_vector(CPUState *cs)
+{
+ uint8_t vector = X86_CPU(cs)->env.xen_vcpu_callback_vector;
+ struct kvm_xen_vcpu_attr xva;
+
+ xva.type = KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR;
+ xva.u.vector = vector;
+
+ trace_kvm_xen_set_vcpu_callback(cs->cpu_index, vector);
+
+ return kvm_vcpu_ioctl(cs, KVM_XEN_HVM_SET_ATTR, &xva);
+}
+
+static void do_set_vcpu_callback_vector(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_callback_vector = data.host_int;
+
+ kvm_xen_set_vcpu_callback_vector(cs);
+}
+
static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
{
X86CPU *cpu = X86_CPU(cs);
@@ -262,17 +287,51 @@ static bool kvm_xen_hcall_memory_op(struct kvm_xen_exit
*exit,
return true;
}
-static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit,
+static int kvm_xen_hcall_evtchn_upcall_vector(struct kvm_xen_exit *exit,
+ X86CPU *cpu, uint64_t arg)
+{
+ struct xen_hvm_evtchn_upcall_vector *up;
+ CPUState *target_cs;
+ int vector;
+
+ up = gva_to_hva(CPU(cpu), arg);
+ if (!up) {
+ return -EFAULT;
+ }
+
+ vector = up->vector;
+ if (vector < 0x10) {
+ return -EINVAL;
+ }
+
+ target_cs = qemu_get_cpu(up->vcpu);
+ if (!target_cs) {
+ return -EINVAL;
+ }
+
+ async_run_on_cpu(target_cs, do_set_vcpu_callback_vector,
RUN_ON_CPU_HOST_INT(vector));
+ return 0;
+}
+
+static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, uint64_t arg)
{
+ int ret = -ENOSYS;
switch (cmd) {
+ case HVMOP_set_evtchn_upcall_vector:
+ ret = kvm_xen_hcall_evtchn_upcall_vector(exit, cpu,
+ exit->u.hcall.params[0]);
+ break;
case HVMOP_pagetable_dying:
- exit->u.hcall.result = -ENOSYS;
- return true;
+ ret = -ENOSYS;
+ break;
default:
return false;
}
+
+ exit->u.hcall.result = ret;
+ return true;
}
static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
@@ -377,7 +436,7 @@ static bool __kvm_xen_handle_exit(X86CPU *cpu, struct
kvm_xen_exit *exit)
exit->u.hcall.params[1],
exit->u.hcall.params[2]);
case __HYPERVISOR_hvm_op:
- return kvm_xen_hcall_hvm_op(exit, exit->u.hcall.params[0],
+ return kvm_xen_hcall_hvm_op(exit, cpu, exit->u.hcall.params[0],
exit->u.hcall.params[1]);
case __HYPERVISOR_memory_op:
return kvm_xen_hcall_memory_op(exit, exit->u.hcall.params[0],
diff --git a/target/i386/xen.h b/target/i386/xen.h
index 53573e07f8..07b133ae29 100644
--- a/target/i386/xen.h
+++ b/target/i386/xen.h
@@ -25,5 +25,6 @@
int kvm_xen_init(KVMState *s, uint32_t xen_version);
int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit);
int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa);
+int kvm_xen_set_vcpu_callback_vector(CPUState *cs);
#endif /* QEMU_I386_XEN_H */
--
2.35.3
- Re: [RFC PATCH v2 11/22] i386/xen: implement HYPERCALL_xen_version, (continued)
- [RFC PATCH v2 18/22] i386/xen: handle VCPUOP_register_runstate_memory_area, David Woodhouse, 2022/12/09
- [RFC PATCH v2 17/22] i386/xen: handle VCPUOP_register_vcpu_time_info, David Woodhouse, 2022/12/09
- [RFC PATCH v2 01/22] include: import xen public headers, David Woodhouse, 2022/12/09
- [RFC PATCH v2 22/22] i386/xen: implement HYPERVISOR_sched_op, David Woodhouse, 2022/12/09
- [RFC PATCH v2 19/22] i386/xen: implement HVMOP_set_evtchn_upcall_vector,
David Woodhouse <=
- [RFC PATCH v2 06/22] hw/xen_backend: refactor xen_be_init(), David Woodhouse, 2022/12/09
- [RFC PATCH v2 02/22] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation, David Woodhouse, 2022/12/09