[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH v3 37/38] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callbac
From: |
David Woodhouse |
Subject: |
[RFC PATCH v3 37/38] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback |
Date: |
Fri, 16 Dec 2022 00:41:16 +0000 |
From: David Woodhouse <dwmw@amazon.co.uk>
The GSI callback (and later PCI_INTX) is a level triggered interrupt. It
is asserted when an event channel is delivered to vCPU0, and is supposed
to be cleared when the vcpu_info->evtchn_upcall_pending field for vCPU0
is cleared again.
Thankfully, Xen does *not* assert the GSI if the guest sets its own
evtchn_upcall_pending field; we only need to assert the GSI when we
have delivered an event for ourselves. So that's the easy part.
However, we *do* need to poll for the evtchn_upcall_pending flag being
cleared. In an ideal world we would poll that when the EOI happens on
the PIC/IOAPIC. That's how it works in the kernel with the VFIO eventfd
pairs — one is used to trigger the interrupt, and the other works in the
other direction to 'resample' on EOI, and trigger the first eventfd
again if the line is still active.
However, QEMU doesn't seem to do that. Even VFIO level interrupts seem
to be supported by temporarily unmapping the device's BARs from the
guest when an interrupt happens, then trapping *all* MMIO to the device
and sending the 'resample' event on *every* MMIO access until the IRQ
is cleared! Maybe in future we'll plumb the 'resample' concept through
QEMU's irq framework but for now we'll do what Xen itself does: just
check the flag on every vmexit if the upcall GSI is known to be
asserted.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
hw/i386/kvm/xen_evtchn.c | 88 +++++++++++++++++++++++++++++++++++----
hw/i386/kvm/xen_evtchn.h | 3 ++
hw/i386/pc.c | 10 ++++-
include/sysemu/kvm_xen.h | 2 +-
target/i386/cpu.h | 1 +
target/i386/kvm/kvm.c | 13 ++++++
target/i386/kvm/xen-emu.c | 64 ++++++++++++++++++++--------
target/i386/kvm/xen-emu.h | 1 +
8 files changed, 154 insertions(+), 28 deletions(-)
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 9292602c09..8ea8cf550e 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -24,6 +24,8 @@
#include "hw/sysbus.h"
#include "hw/xen/xen.h"
+#include "hw/i386/x86.h"
+#include "hw/irq.h"
#include "xen_evtchn.h"
#include "xen_overlay.h"
@@ -102,6 +104,7 @@ struct XenEvtchnState {
QemuMutex port_lock;
uint32_t nr_ports;
XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS];
+ qemu_irq gsis[GSI_NUM_PINS];
};
struct XenEvtchnState *xen_evtchn_singleton;
@@ -166,9 +169,29 @@ static const TypeInfo xen_evtchn_info = {
void xen_evtchn_create(void)
{
XenEvtchnState *s = XEN_EVTCHN(sysbus_create_simple(TYPE_XEN_EVTCHN, -1,
NULL));
+ int i;
+
xen_evtchn_singleton = s;
qemu_mutex_init(&s->port_lock);
+
+ for (i = 0; i < GSI_NUM_PINS; i++) {
+ sysbus_init_irq(SYS_BUS_DEVICE(s), &s->gsis[i]);
+ }
+}
+
+void xen_evtchn_connect_gsis(qemu_irq *system_gsis)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ int i;
+
+ if (!s) {
+ return;
+ }
+
+ for (i = 0; i < GSI_NUM_PINS; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(s), i, system_gsis[i]);
+ }
}
static void xen_evtchn_register_types(void)
@@ -178,26 +201,75 @@ static void xen_evtchn_register_types(void)
type_init(xen_evtchn_register_types)
-
#define CALLBACK_VIA_TYPE_SHIFT 56
int xen_evtchn_set_callback_param(uint64_t param)
{
+ XenEvtchnState *s = xen_evtchn_singleton;
int ret = -ENOSYS;
- if (param >> CALLBACK_VIA_TYPE_SHIFT == HVM_PARAM_CALLBACK_TYPE_VECTOR) {
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ switch (param >> CALLBACK_VIA_TYPE_SHIFT) {
+ case HVM_PARAM_CALLBACK_TYPE_VECTOR: {
struct kvm_xen_hvm_attr xa = {
.type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
.u.vector = (uint8_t)param,
};
ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa);
- if (!ret && xen_evtchn_singleton)
- xen_evtchn_singleton->callback_param = param;
+ break;
+ }
+ case HVM_PARAM_CALLBACK_TYPE_GSI:
+ ret = 0;
+ break;
}
+
+ if (!ret) {
+ s->callback_param = param;
+ }
+
return ret;
}
+static void xen_evtchn_set_callback_level(XenEvtchnState *s, int level)
+{
+ uint32_t param = (uint32_t)s->callback_param;
+
+ switch (s->callback_param >> CALLBACK_VIA_TYPE_SHIFT) {
+ case HVM_PARAM_CALLBACK_TYPE_GSI:
+ if (param < GSI_NUM_PINS) {
+ qemu_set_irq(s->gsis[param], level);
+ }
+ break;
+ }
+}
+
+static void inject_callback(XenEvtchnState *s, uint32_t vcpu)
+{
+ if (kvm_xen_inject_vcpu_callback_vector(vcpu, s->callback_param)) {
+ return;
+ }
+
+ /* GSI or PCI_INTX delivery is only for events on vCPU 0 */
+ if (vcpu) {
+ return;
+ }
+
+ xen_evtchn_set_callback_level(s, 1);
+}
+
+void xen_evtchn_deassert_callback(void)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+
+ if (s) {
+ xen_evtchn_set_callback_level(s, 0);
+ }
+}
+
static void deassign_kernel_port(evtchn_port_t port)
{
struct kvm_xen_hvm_attr ha;
@@ -359,7 +431,7 @@ static int do_unmask_port_lm(XenEvtchnState *s,
evtchn_port_t port,
return 0;
}
- kvm_xen_inject_vcpu_callback_vector(s->port_table[port].vcpu);
+ inject_callback(s, s->port_table[port].vcpu);
return 0;
}
@@ -413,7 +485,7 @@ static int do_unmask_port_compat(XenEvtchnState *s,
evtchn_port_t port,
return 0;
}
- kvm_xen_inject_vcpu_callback_vector(s->port_table[port].vcpu);
+ inject_callback(s, s->port_table[port].vcpu);
return 0;
}
@@ -481,7 +553,7 @@ static int do_set_port_lm(XenEvtchnState *s, evtchn_port_t
port,
return 0;
}
- kvm_xen_inject_vcpu_callback_vector(s->port_table[port].vcpu);
+ inject_callback(s, s->port_table[port].vcpu);
return 0;
}
@@ -524,7 +596,7 @@ static int do_set_port_compat(XenEvtchnState *s,
evtchn_port_t port,
return 0;
}
- kvm_xen_inject_vcpu_callback_vector(s->port_table[port].vcpu);
+ inject_callback(s, s->port_table[port].vcpu);
return 0;
}
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index 2acbaeabaa..1176b67b91 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -9,9 +9,12 @@
* See the COPYING file in the top-level directory.
*/
+#include "hw/sysbus.h"
void xen_evtchn_create(void);
int xen_evtchn_set_callback_param(uint64_t param);
+void xen_evtchn_connect_gsis(qemu_irq *system_gsis);
+void xen_evtchn_deassert_callback(void);
void hmp_xen_event_list(Monitor *mon, const QDict *qdict);
void hmp_xen_event_inject(Monitor *mon, const QDict *qdict);
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 427f79e6a8..1c4941de8f 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -1303,6 +1303,12 @@ void pc_basic_device_init(struct PCMachineState *pcms,
}
*rtc_state = mc146818_rtc_init(isa_bus, 2000, rtc_irq);
+#ifdef CONFIG_XEN_EMU
+ if (xen_mode == XEN_EMULATE) {
+ xen_evtchn_connect_gsis(gsi);
+ }
+#endif
+
qemu_register_boot_set(pc_boot_set, *rtc_state);
if (!xen_enabled() &&
@@ -1848,8 +1854,8 @@ int pc_machine_kvm_type(MachineState *machine, const char
*kvm_type)
{
#ifdef CONFIG_XEN_EMU
if (xen_mode == XEN_EMULATE) {
- xen_overlay_create();
- xen_evtchn_create();
+ xen_overlay_create();
+ xen_evtchn_create();
}
#endif
return 0;
diff --git a/include/sysemu/kvm_xen.h b/include/sysemu/kvm_xen.h
index e5b14ffe8d..73fe5969b8 100644
--- a/include/sysemu/kvm_xen.h
+++ b/include/sysemu/kvm_xen.h
@@ -13,7 +13,7 @@
#define QEMU_SYSEMU_KVM_XEN_H
void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id);
-void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id);
+bool kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, uint64_t
callback_param);
int kvm_xen_set_vcpu_virq(uint32_t vcpu_id, uint16_t virq, uint16_t port);
#endif /* QEMU_SYSEMU_KVM_XEN_H */
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 846c738fd7..9330eb83fd 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1795,6 +1795,7 @@ typedef struct CPUArchState {
uint64_t xen_vcpu_time_info_gpa;
uint64_t xen_vcpu_runstate_gpa;
uint8_t xen_vcpu_callback_vector;
+ bool xen_callback_asserted;
uint16_t xen_virq[XEN_NR_VIRQS];
#endif
#if defined(CONFIG_HVF)
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index cbf41d6f81..32d808da37 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -5431,6 +5431,19 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run
*run)
char str[256];
KVMState *state;
+#ifdef CONFIG_XEN_EMU
+ /*
+ * If the callback is asserted as a GSI (or PCI INTx) then check if
+ * vcpu_info->evtchn_upcall_pending has been cleared, and deassert
+ * the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC
+ * EOI and only resample then, exactly how the VFIO eventfd pairs
+ * are designed to work for level triggered interrupts.
+ */
+ if (cpu->env.xen_callback_asserted) {
+ kvm_xen_maybe_deassert_callback(cs);
+ }
+#endif
+
switch (run->exit_reason) {
case KVM_EXIT_HLT:
DPRINTF("handle_hlt\n");
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index a8c953e3ca..48ae47809a 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -240,18 +240,11 @@ static void *gpa_to_hva(uint64_t gpa)
mrs.offset_within_region);
}
-void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id)
+static void *vcpu_info_hva_from_cs(CPUState *cs)
{
- CPUState *cs = qemu_get_cpu(vcpu_id);
- CPUX86State *env;
- uint64_t gpa;
-
- if (!cs) {
- return NULL;
- }
- env = &X86_CPU(cs)->env;
+ CPUX86State *env = &X86_CPU(cs)->env;
+ uint64_t gpa = env->xen_vcpu_info_gpa;
- gpa = env->xen_vcpu_info_gpa;
if (gpa == UINT64_MAX)
gpa = env->xen_vcpu_info_default_gpa;
if (gpa == UINT64_MAX)
@@ -260,13 +253,38 @@ void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id)
return gpa_to_hva(gpa);
}
-void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id)
+void *kvm_xen_get_vcpu_info_hva(uint32_t vcpu_id)
+{
+ CPUState *cs = qemu_get_cpu(vcpu_id);
+
+ if (!cs) {
+ return NULL;
+ }
+
+ return vcpu_info_hva_from_cs(cs);
+}
+
+void kvm_xen_maybe_deassert_callback(CPUState *cs)
+{
+ struct vcpu_info *vi = vcpu_info_hva_from_cs(cs);
+ if (!vi) {
+ return;
+ }
+
+ /* If the evtchn_upcall_pending flag is cleared, turn the GSI off. */
+ if (!vi->evtchn_upcall_pending) {
+ X86_CPU(cs)->env.xen_callback_asserted = false;
+ xen_evtchn_deassert_callback();
+ }
+}
+
+bool kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, uint64_t
callback_param)
{
CPUState *cs = qemu_get_cpu(vcpu_id);
uint8_t vector;
if (!cs) {
- return;
+ return false;
}
vector = X86_CPU(cs)->env.xen_vcpu_callback_vector;
@@ -278,13 +296,25 @@ void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id)
.data = vector | (1UL << MSI_DATA_LEVEL_SHIFT),
};
kvm_irqchip_send_msi(kvm_state, msg);
- return;
+ return true;
}
- /* If the evtchn_upcall_pending field in the vcpu_info is set, then
- * KVM will automatically deliver the vector on entering the vCPU
- * so all we have to do is kick it out. */
- qemu_cpu_kick(cs);
+ switch(callback_param >> 56) {
+ case HVM_PARAM_CALLBACK_TYPE_VECTOR:
+ /* If the evtchn_upcall_pending field in the vcpu_info is set, then
+ * KVM will automatically deliver the vector on entering the vCPU
+ * so all we have to do is kick it out. */
+ qemu_cpu_kick(cs);
+ return true;
+
+ case HVM_PARAM_CALLBACK_TYPE_GSI:
+ case HVM_PARAM_CALLBACK_TYPE_PCI_INTX:
+ if (vcpu_id == 0) {
+ X86_CPU(cs)->env.xen_callback_asserted = true;
+ }
+ return false;
+ }
+ return false;
}
static int kvm_xen_set_vcpu_timer(CPUState *cs)
diff --git a/target/i386/kvm/xen-emu.h b/target/i386/kvm/xen-emu.h
index 58e4748d80..0ff8bed350 100644
--- a/target/i386/kvm/xen-emu.h
+++ b/target/i386/kvm/xen-emu.h
@@ -27,5 +27,6 @@ int kvm_xen_init(KVMState *s, uint32_t hypercall_msr);
int kvm_xen_handle_exit(X86CPU *cpu, struct kvm_xen_exit *exit);
int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa);
int kvm_xen_set_vcpu_callback_vector(CPUState *cs);
+void kvm_xen_maybe_deassert_callback(CPUState *cs);
#endif /* QEMU_I386_KVM_XEN_EMU_H */
--
2.35.3
- [RFC PATCH v3 14/38] i386/xen: implement HYPERVISOR_memory_op, (continued)
- [RFC PATCH v3 16/38] i386/xen: implement HYPERVISOR_hvm_op, David Woodhouse, 2022/12/15
- [RFC PATCH v3 24/38] i386/xen: implement HYPERVISOR_sched_op, David Woodhouse, 2022/12/15
- [RFC PATCH v3 27/38] hw/xen: Implement EVTCHNOP_unmask, David Woodhouse, 2022/12/15
- [RFC PATCH v3 37/38] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback,
David Woodhouse <=
- [RFC PATCH v3 20/38] i386/xen: handle VCPUOP_register_runstate_memory_area, David Woodhouse, 2022/12/15
- [RFC PATCH v3 13/38] i386/xen: manage and save/restore Xen guest long_mode setting, David Woodhouse, 2022/12/15
- [RFC PATCH v3 05/38] i386/kvm: handle Xen HVM cpuid leaves, David Woodhouse, 2022/12/15
- [RFC PATCH v3 01/38] include: import xen public headers, David Woodhouse, 2022/12/15
- [RFC PATCH v3 18/38] i386/xen: handle VCPUOP_register_vcpu_info, David Woodhouse, 2022/12/15
- [RFC PATCH v3 10/38] i386/xen: implement HYPERCALL_xen_version, David Woodhouse, 2022/12/15
- [RFC PATCH v3 07/38] xen-platform: allow its creation with XEN_EMULATE mode, David Woodhouse, 2022/12/15