[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH v3 25/38] hw/xen: Implement EVTCHNOP_status
From: |
David Woodhouse |
Subject: |
[RFC PATCH v3 25/38] hw/xen: Implement EVTCHNOP_status |
Date: |
Fri, 16 Dec 2022 00:41:04 +0000 |
From: David Woodhouse <dwmw@amazon.co.uk>
This adds the basic structure for maintaining the port table and reporting
the status of ports therein.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
hw/i386/kvm/xen_evtchn.c | 108 +++++++++++++++++++++++++++++++++++++-
hw/i386/kvm/xen_evtchn.h | 4 ++
target/i386/kvm/xen-emu.c | 21 +++++++-
3 files changed, 130 insertions(+), 3 deletions(-)
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 1ca0c034e7..77acf58540 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -22,6 +22,7 @@
#include "hw/sysbus.h"
#include "hw/xen/xen.h"
#include "xen_evtchn.h"
+#include "xen_overlay.h"
#include "sysemu/kvm.h"
#include <linux/kvm.h>
@@ -32,12 +33,34 @@
#define TYPE_XEN_EVTCHN "xenevtchn"
OBJECT_DECLARE_SIMPLE_TYPE(XenEvtchnState, XEN_EVTCHN)
+typedef struct XenEvtchnPort {
+ uint32_t vcpu; /* Xen/ACPI vcpu_id */
+ uint16_t type; /* EVTCHNSTAT_xxxx */
+ uint16_t type_val; /* pirq# / virq# / remote port according to type */
+} XenEvtchnPort;
+
+#define COMPAT_EVTCHN_2L_NR_CHANNELS 1024
+
+/*
+ * For unbound/interdomain ports there are only two possible remote
+ * domains; self and QEMU. Use a single high bit in type_val for that,
+ * and the low bits for the remote port number (or 0 for unbound).
+ */
+#define PORT_INFO_TYPEVAL_REMOTE_QEMU 0x8000
+#define PORT_INFO_TYPEVAL_REMOTE_PORT_MASK 0x7FFF
+
+#define DOMID_QEMU 0
+
struct XenEvtchnState {
/*< private >*/
SysBusDevice busdev;
/*< public >*/
uint64_t callback_param;
+
+ QemuMutex port_lock;
+ uint32_t nr_ports;
+ XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS];
};
struct XenEvtchnState *xen_evtchn_singleton;
@@ -58,6 +81,18 @@ static bool xen_evtchn_is_needed(void *opaque)
return xen_mode == XEN_EMULATE;
}
+static const VMStateDescription xen_evtchn_port_vmstate = {
+ .name = "xen_evtchn_port",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(vcpu, XenEvtchnPort),
+ VMSTATE_UINT16(type, XenEvtchnPort),
+ VMSTATE_UINT16(type_val, XenEvtchnPort),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription xen_evtchn_vmstate = {
.name = "xen_evtchn",
.version_id = 1,
@@ -66,6 +101,9 @@ static const VMStateDescription xen_evtchn_vmstate = {
.post_load = xen_evtchn_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT64(callback_param, XenEvtchnState),
+ VMSTATE_UINT32(nr_ports, XenEvtchnState),
+ VMSTATE_STRUCT_VARRAY_UINT32(port_table, XenEvtchnState, nr_ports, 1,
+ xen_evtchn_port_vmstate, XenEvtchnPort),
VMSTATE_END_OF_LIST()
}
};
@@ -86,7 +124,10 @@ static const TypeInfo xen_evtchn_info = {
void xen_evtchn_create(void)
{
- xen_evtchn_singleton = XEN_EVTCHN(sysbus_create_simple(TYPE_XEN_EVTCHN,
-1, NULL));
+ XenEvtchnState *s = XEN_EVTCHN(sysbus_create_simple(TYPE_XEN_EVTCHN, -1,
NULL));
+ xen_evtchn_singleton = s;
+
+ qemu_mutex_init(&s->port_lock);
}
static void xen_evtchn_register_types(void)
@@ -115,3 +156,68 @@ int xen_evtchn_set_callback_param(uint64_t param)
}
return ret;
}
+
+static bool valid_port(evtchn_port_t port)
+{
+ if (!port) {
+ return false;
+ }
+
+ if (xen_is_long_mode()) {
+ return port < EVTCHN_2L_NR_CHANNELS;
+ } else {
+ return port < COMPAT_EVTCHN_2L_NR_CHANNELS;
+ }
+}
+
+int xen_evtchn_status_op(struct evtchn_status *status)
+{
+ XenEvtchnState *s = xen_evtchn_singleton;
+ XenEvtchnPort *p;
+
+ if (!s) {
+ return -ENOTSUP;
+ }
+
+ if (status->dom != DOMID_SELF && status->dom != xen_domid)
+ return -EPERM;
+
+ if (!valid_port(status->port))
+ return -EINVAL;
+
+ qemu_mutex_lock(&s->port_lock);
+
+ p = &s->port_table[status->port];
+
+ status->status = p->type;
+ status->vcpu = p->vcpu;
+
+ switch (p->type) {
+ case EVTCHNSTAT_unbound:
+ if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU)
+ status->u.unbound.dom = DOMID_QEMU;
+ else
+ status->u.unbound.dom = xen_domid;
+ break;
+
+ case EVTCHNSTAT_interdomain:
+ if (p->type_val & PORT_INFO_TYPEVAL_REMOTE_QEMU)
+ status->u.interdomain.dom = DOMID_QEMU;
+ else
+ status->u.interdomain.dom = xen_domid;
+
+ status->u.interdomain.port = p->type_val &
PORT_INFO_TYPEVAL_REMOTE_PORT_MASK;
+ break;
+
+ case EVTCHNSTAT_pirq:
+ status->u.pirq = p->type_val;
+ break;
+
+ case EVTCHNSTAT_virq:
+ status->u.virq = p->type_val;
+ break;
+ }
+
+ qemu_mutex_unlock(&s->port_lock);
+ return 0;
+}
diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h
index 11c6ed22a0..6f50e5c52d 100644
--- a/hw/i386/kvm/xen_evtchn.h
+++ b/hw/i386/kvm/xen_evtchn.h
@@ -9,5 +9,9 @@
* See the COPYING file in the top-level directory.
*/
+
void xen_evtchn_create(void);
int xen_evtchn_set_callback_param(uint64_t param);
+
+struct evtchn_status;
+int xen_evtchn_status_op(struct evtchn_status *status);
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index 1ff6d32edd..d4a35bef64 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -614,15 +614,32 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit
*exit, X86CPU *cpu,
return true;
}
-static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit,
+static bool kvm_xen_hcall_evtchn_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, uint64_t arg)
{
+ CPUState *cs = CPU(cpu);
int err = -ENOSYS;
switch (cmd) {
case EVTCHNOP_init_control:
err = -ENOSYS;
break;
+
+ case EVTCHNOP_status: {
+ struct evtchn_status status;
+
+ qemu_build_assert(sizeof(status) == 24);
+ if (kvm_copy_from_gva(cs, arg, &status, sizeof(status))) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = xen_evtchn_status_op(&status);
+ if (!err && kvm_copy_to_gva(cs, arg, &status, sizeof(status))) {
+ err = -EFAULT;
+ }
+ break;
+ }
default:
return false;
}
@@ -698,7 +715,7 @@ static bool do_kvm_xen_handle_exit(X86CPU *cpu, struct
kvm_xen_exit *exit)
return kvm_xen_hcall_sched_op(exit, cpu, exit->u.hcall.params[0],
exit->u.hcall.params[1]);
case __HYPERVISOR_event_channel_op:
- return kvm_xen_hcall_evtchn_op(exit, exit->u.hcall.params[0],
+ return kvm_xen_hcall_evtchn_op(exit, cpu, exit->u.hcall.params[0],
exit->u.hcall.params[1]);
case __HYPERVISOR_vcpu_op:
return kvm_xen_hcall_vcpu_op(exit, cpu,
--
2.35.3
- [RFC PATCH v3 17/38] i386/xen: implement HYPERVISOR_vcpu_op, (continued)
- [RFC PATCH v3 17/38] i386/xen: implement HYPERVISOR_vcpu_op, David Woodhouse, 2022/12/15
- [RFC PATCH v3 12/38] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode, David Woodhouse, 2022/12/15
- [RFC PATCH v3 38/38] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback, David Woodhouse, 2022/12/15
- [RFC PATCH v3 06/38] xen-platform: exclude vfio-pci from the PCI platform unplug, David Woodhouse, 2022/12/15
- [RFC PATCH v3 22/38] i386/xen: HVMOP_set_param / HVM_PARAM_CALLBACK_IRQ, David Woodhouse, 2022/12/15
- [RFC PATCH v3 29/38] hw/xen: Implement EVTCHNOP_bind_ipi, David Woodhouse, 2022/12/15
- [RFC PATCH v3 02/38] xen: add CONFIG_XENFV_MACHINE and CONFIG_XEN_EMU options for Xen emulation, David Woodhouse, 2022/12/15
- [RFC PATCH v3 32/38] hw/xen: Implement EVTCHNOP_bind_interdomain, David Woodhouse, 2022/12/15
- [RFC PATCH v3 25/38] hw/xen: Implement EVTCHNOP_status,
David Woodhouse <=
- [RFC PATCH v3 08/38] hw/xen_backend: refactor xen_be_init(), David Woodhouse, 2022/12/15
- [RFC PATCH v3 15/38] i386/xen: implement XENMEM_add_to_physmap_batch, David Woodhouse, 2022/12/15
- [RFC PATCH v3 23/38] i386/xen: implement HYPERVISOR_event_channel_op, David Woodhouse, 2022/12/15
- [RFC PATCH v3 31/38] hw/xen: Implement EVTCHNOP_alloc_unbound, David Woodhouse, 2022/12/15
- [RFC PATCH v3 36/38] i386/xen: Implement SCHEDOP_poll, David Woodhouse, 2022/12/15
- [RFC PATCH v3 11/38] hw/xen: Add xen_overlay device for emulating shared xenheap pages, David Woodhouse, 2022/12/15
- [RFC PATCH v3 19/38] i386/xen: handle VCPUOP_register_vcpu_time_info, David Woodhouse, 2022/12/15
- [RFC PATCH v3 28/38] hw/xen: Implement EVTCHNOP_bind_virq, David Woodhouse, 2022/12/15
- [RFC PATCH v3 21/38] i386/xen: implement HVMOP_set_evtchn_upcall_vector, David Woodhouse, 2022/12/15