[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 3/3] x86: Hyper-V SynIC timers test
From: |
Andrey Smetanin |
Subject: |
[Qemu-devel] [PATCH v2 3/3] x86: Hyper-V SynIC timers test |
Date: |
Tue, 8 Dec 2015 19:36:50 +0300 |
The test checks Hyper-V SynIC timers functionality.
The test runs on every vCPU and performs start/stop
of periodic/one-shot timers (with period=1ms) and checks
validity of received expiration messages in appropriate
ISR's.
Changes v2:
* reorg code to use generic hyperv.h
* split timer test into test cases with separate callbacks
* removed unnecessary irq_enable() calls
* moved sint's create/destoy into test prepare/cleanup callbacks
* defined used sint's numbers and vectors
Signed-off-by: Andrey Smetanin <address@hidden>
Reviewed-by: Roman Kagan <address@hidden>
CC: Paolo Bonzini <address@hidden>
CC: Marcelo Tosatti <address@hidden>
CC: Roman Kagan <address@hidden>
CC: Denis V. Lunev <address@hidden>
CC: address@hidden
---
config/config-x86-common.mak | 5 +-
x86/hyperv.h | 125 ++++++++++++++
x86/hyperv_stimer.c | 376 +++++++++++++++++++++++++++++++++++++++++++
x86/unittests.cfg | 5 +
4 files changed, 510 insertions(+), 1 deletion(-)
create mode 100644 x86/hyperv_stimer.c
diff --git a/config/config-x86-common.mak b/config/config-x86-common.mak
index 156be1c..72b95e3 100644
--- a/config/config-x86-common.mak
+++ b/config/config-x86-common.mak
@@ -37,7 +37,7 @@ tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
$(TEST_DIR)/s3.flat $(TEST_DIR)/pmu.flat \
$(TEST_DIR)/tsc_adjust.flat $(TEST_DIR)/asyncpf.flat \
$(TEST_DIR)/init.flat $(TEST_DIR)/smap.flat \
- $(TEST_DIR)/hyperv_synic.flat
+ $(TEST_DIR)/hyperv_synic.flat $(TEST_DIR)/hyperv_stimer.flat \
ifdef API
tests-common += api/api-sample
@@ -116,6 +116,9 @@ $(TEST_DIR)/memory.elf: $(cstart.o) $(TEST_DIR)/memory.o
$(TEST_DIR)/hyperv_synic.elf: $(cstart.o) $(TEST_DIR)/hyperv.o \
$(TEST_DIR)/hyperv_synic.o
+$(TEST_DIR)/hyperv_stimer.elf: $(cstart.o) $(TEST_DIR)/hyperv.o \
+ $(TEST_DIR)/hyperv_stimer.o
+
arch_clean:
$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat $(TEST_DIR)/*.elf \
$(TEST_DIR)/.*.d lib/x86/.*.d
diff --git a/x86/hyperv.h b/x86/hyperv.h
index 0dd1d0d..faf931b 100644
--- a/x86/hyperv.h
+++ b/x86/hyperv.h
@@ -7,7 +7,11 @@
#define HYPERV_CPUID_FEATURES 0x40000003
+#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2)
+#define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3)
+
+#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
/* Define synthetic interrupt controller model specific registers. */
#define HV_X64_MSR_SCONTROL 0x40000080
@@ -32,6 +36,19 @@
#define HV_X64_MSR_SINT14 0x4000009E
#define HV_X64_MSR_SINT15 0x4000009F
+/*
+ * Synthetic Timer MSRs. Four timers per vcpu.
+ */
+
+#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
+#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
+#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
+#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
+#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
+#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
+#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
+#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
+
#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
@@ -40,6 +57,104 @@
#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
#define HV_SYNIC_SINT_COUNT 16
+#define HV_STIMER_ENABLE (1ULL << 0)
+#define HV_STIMER_PERIODIC (1ULL << 1)
+#define HV_STIMER_LAZY (1ULL << 2)
+#define HV_STIMER_AUTOENABLE (1ULL << 3)
+#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
+
+#define HV_SYNIC_STIMER_COUNT (4)
+
+/* Define synthetic interrupt controller message constants. */
+#define HV_MESSAGE_SIZE (256)
+#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
+#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
+
+/* Define hypervisor message types. */
+enum hv_message_type {
+ HVMSG_NONE = 0x00000000,
+
+ /* Memory access messages. */
+ HVMSG_UNMAPPED_GPA = 0x80000000,
+ HVMSG_GPA_INTERCEPT = 0x80000001,
+
+ /* Timer notification messages. */
+ HVMSG_TIMER_EXPIRED = 0x80000010,
+
+ /* Error messages. */
+ HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+ HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
+ HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
+
+ /* Trace buffer complete messages. */
+ HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
+
+ /* Platform-specific processor intercept messages. */
+ HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
+ HVMSG_X64_MSR_INTERCEPT = 0x80010001,
+ HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
+ HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
+ HVMSG_X64_APIC_EOI = 0x80010004,
+ HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
+};
+
+/* Define synthetic interrupt controller message flags. */
+union hv_message_flags {
+ uint8_t asu8;
+ struct {
+ uint8_t msg_pending:1;
+ uint8_t reserved:7;
+ };
+};
+
+union hv_port_id {
+ uint32_t asu32;
+ struct {
+ uint32_t id:24;
+ uint32_t reserved:8;
+ } u;
+};
+
+/* Define port type. */
+enum hv_port_type {
+ HVPORT_MSG = 1,
+ HVPORT_EVENT = 2,
+ HVPORT_MONITOR = 3
+};
+
+/* Define synthetic interrupt controller message header. */
+struct hv_message_header {
+ uint32_t message_type;
+ uint8_t payload_size;
+ union hv_message_flags message_flags;
+ uint8_t reserved[2];
+ union {
+ uint64_t sender;
+ union hv_port_id port;
+ };
+};
+
+/* Define timer message payload structure. */
+struct hv_timer_message_payload {
+ uint32_t timer_index;
+ uint32_t reserved;
+ uint64_t expiration_time; /* When the timer expired */
+ uint64_t delivery_time; /* When the message was delivered */
+};
+
+/* Define synthetic interrupt controller message format. */
+struct hv_message {
+ struct hv_message_header header;
+ union {
+ uint64_t payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+ } u;
+};
+
+/* Define the synthetic interrupt message page layout. */
+struct hv_message_page {
+ struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
+};
+
enum {
HV_TEST_DEV_SINT_ROUTE_CREATE = 1,
HV_TEST_DEV_SINT_ROUTE_DESTROY,
@@ -51,6 +166,16 @@ static inline bool synic_supported(void)
return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_SYNIC_AVAILABLE;
}
+static inline bool stimer_supported(void)
+{
+ return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_SYNIC_AVAILABLE;
+}
+
+static inline bool hv_time_ref_counter_supported(void)
+{
+ return cpuid(HYPERV_CPUID_FEATURES).a &
HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
+}
+
void synic_sint_create(int vcpu, int sint, int vec, bool auto_eoi);
void synic_sint_set(int vcpu, int sint);
void synic_sint_destroy(int vcpu, int sint);
diff --git a/x86/hyperv_stimer.c b/x86/hyperv_stimer.c
new file mode 100644
index 0000000..767459e
--- /dev/null
+++ b/x86/hyperv_stimer.c
@@ -0,0 +1,376 @@
+#include "libcflat.h"
+#include "processor.h"
+#include "msr.h"
+#include "isr.h"
+#include "vm.h"
+#include "apic.h"
+#include "desc.h"
+#include "io.h"
+#include "smp.h"
+#include "atomic.h"
+#include "hyperv.h"
+
+#define MAX_CPUS 4
+
+#define SINT1_VEC 0xF1
+#define SINT2_VEC 0xF2
+
+#define SINT1_NUM 2
+#define SINT2_NUM 3
+#define ONE_MS_IN_100NS 10000
+
+static atomic_t g_cpus_comp_count;
+static int g_cpus_count;
+static struct spinlock g_synic_alloc_lock;
+
+struct stimer {
+ int sint;
+ int index;
+ atomic_t fire_count;
+};
+
+struct svcpu {
+ int vcpu;
+ void *msg_page;
+ void *evt_page;
+ struct stimer timer[HV_SYNIC_STIMER_COUNT];
+};
+
+static struct svcpu g_synic_vcpu[MAX_CPUS];
+
+static void *synic_alloc_page(void)
+{
+ void *page;
+
+ spin_lock(&g_synic_alloc_lock);
+ page = alloc_page();
+ spin_unlock(&g_synic_alloc_lock);
+ return page;
+}
+
+static void synic_free_page(void *page)
+{
+ spin_lock(&g_synic_alloc_lock);
+ free_page(page);
+ spin_unlock(&g_synic_alloc_lock);
+}
+
+static void stimer_init(struct stimer *timer, int index)
+{
+ memset(timer, 0, sizeof(*timer));
+ timer->index = index;
+}
+
+static void synic_enable(void)
+{
+ int vcpu = smp_id(), i;
+ struct svcpu *svcpu = &g_synic_vcpu[vcpu];
+
+ memset(svcpu, 0, sizeof(*svcpu));
+ svcpu->vcpu = vcpu;
+ svcpu->msg_page = synic_alloc_page();
+ for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) {
+ stimer_init(&svcpu->timer[i], i);
+ }
+ wrmsr(HV_X64_MSR_SIMP, (u64)virt_to_phys(svcpu->msg_page) |
+ HV_SYNIC_SIMP_ENABLE);
+ wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE);
+}
+
+static void stimer_shutdown(struct stimer *timer)
+{
+ wrmsr(HV_X64_MSR_STIMER0_CONFIG + 2*timer->index, 0);
+}
+
+static void process_stimer_expired(struct svcpu *svcpu, struct stimer *timer,
+ u64 expiration_time, u64 delivery_time)
+{
+ atomic_inc(&timer->fire_count);
+}
+
+static void process_stimer_msg(struct svcpu *svcpu,
+ struct hv_message *msg, int sint)
+{
+ struct hv_timer_message_payload *payload =
+ (struct hv_timer_message_payload *)msg->u.payload;
+ struct stimer *timer;
+
+ if (msg->header.message_type != HVMSG_TIMER_EXPIRED &&
+ msg->header.message_type != HVMSG_NONE) {
+ report("invalid Hyper-V SynIC msg type", false);
+ report_summary();
+ exit(-1);
+ return;
+ }
+
+ if (msg->header.message_type == HVMSG_NONE) {
+ return;
+ }
+
+ if (msg->header.payload_size < sizeof(*payload)) {
+ report("invalid Hyper-V SynIC msg payload size", false);
+ report_summary();
+ exit(-1);
+ return;
+ }
+
+ /* Now process timer expiration message */
+
+ if (payload->timer_index >= ARRAY_SIZE(svcpu->timer)) {
+ report("invalid Hyper-V SynIC timer index", false);
+ report_summary();
+ exit(-1);
+ return;
+ }
+ timer = &svcpu->timer[payload->timer_index];
+ process_stimer_expired(svcpu, timer, payload->expiration_time,
+ payload->delivery_time);
+
+ msg->header.message_type = HVMSG_NONE;
+ mb();
+ if (msg->header.message_flags.msg_pending) {
+ wrmsr(HV_X64_MSR_EOM, 0);
+ }
+}
+
+static void __stimer_isr(int vcpu)
+{
+ struct svcpu *svcpu = &g_synic_vcpu[vcpu];
+ struct hv_message_page *msg_page;
+ struct hv_message *msg;
+ int i;
+
+
+ msg_page = (struct hv_message_page *)svcpu->msg_page;
+ for (i = 0; i < ARRAY_SIZE(msg_page->sint_message); i++) {
+ msg = &msg_page->sint_message[i];
+ process_stimer_msg(svcpu, msg, i);
+ }
+}
+
+static void stimer_isr(isr_regs_t *regs)
+{
+ int vcpu = smp_id();
+
+ __stimer_isr(vcpu);
+ eoi();
+}
+
+static void stimer_isr_auto_eoi(isr_regs_t *regs)
+{
+ int vcpu = smp_id();
+
+ __stimer_isr(vcpu);
+}
+
+static void stimer_start(struct stimer *timer,
+ bool auto_enable, bool periodic,
+ u64 tick_100ns, int sint)
+{
+ u64 config, count;
+
+ timer->sint = sint;
+ atomic_set(&timer->fire_count, 0);
+
+ config = 0;
+ if (periodic) {
+ config |= HV_STIMER_PERIODIC;
+ }
+
+ config |= ((u8)(sint & 0xFF)) << 16;
+ config |= HV_STIMER_ENABLE;
+ if (auto_enable) {
+ config |= HV_STIMER_AUTOENABLE;
+ }
+
+ if (periodic) {
+ count = tick_100ns;
+ } else {
+ count = rdmsr(HV_X64_MSR_TIME_REF_COUNT) + tick_100ns;
+ }
+
+ if (!auto_enable) {
+ wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count);
+ wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config);
+ } else {
+ wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config);
+ wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count);
+ }
+}
+
+static void stimers_shutdown(void)
+{
+ int vcpu = smp_id(), i;
+ struct svcpu *svcpu = &g_synic_vcpu[vcpu];
+
+ for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) {
+ stimer_shutdown(&svcpu->timer[i]);
+ }
+}
+
+static void synic_disable(void)
+{
+ int vcpu = smp_id();
+ struct svcpu *svcpu = &g_synic_vcpu[vcpu];
+
+ wrmsr(HV_X64_MSR_SCONTROL, 0);
+ wrmsr(HV_X64_MSR_SIMP, 0);
+ wrmsr(HV_X64_MSR_SIEFP, 0);
+ synic_free_page(svcpu->msg_page);
+}
+
+static void cpu_comp(void)
+{
+ atomic_inc(&g_cpus_comp_count);
+}
+
+static void stimer_test_prepare(void *ctx)
+{
+ int vcpu = smp_id();
+
+ write_cr3((ulong)ctx);
+ synic_enable();
+ synic_sint_create(vcpu, SINT1_NUM, SINT1_VEC, false);
+ synic_sint_create(vcpu, SINT2_NUM, SINT2_VEC, true);
+ cpu_comp();
+}
+
+static void stimer_test_periodic(int vcpu, struct stimer *timer1,
+ struct stimer *timer2)
+{
+ /* Check periodic timers */
+ stimer_start(timer1, false, true, ONE_MS_IN_100NS, SINT1_NUM);
+ stimer_start(timer2, false, true, ONE_MS_IN_100NS, SINT2_NUM);
+ while ((atomic_read(&timer1->fire_count) < 1000) ||
+ (atomic_read(&timer2->fire_count) < 1000)) {
+ pause();
+ }
+ report("Hyper-V SynIC periodic timers test vcpu %d", true, vcpu);
+ stimer_shutdown(timer1);
+ stimer_shutdown(timer2);
+}
+
+static void stimer_test_one_shot(int vcpu, struct stimer *timer)
+{
+ /* Check one-shot timer */
+ stimer_start(timer, false, false, ONE_MS_IN_100NS, SINT1_NUM);
+ while (atomic_read(&timer->fire_count) < 1) {
+ pause();
+ }
+ report("Hyper-V SynIC one-shot test vcpu %d", true, vcpu);
+ stimer_shutdown(timer);
+}
+
+static void stimer_test_auto_enable_one_shot(int vcpu, struct stimer *timer)
+{
+ /* Check auto-enable one-shot timer */
+ stimer_start(timer, true, false, ONE_MS_IN_100NS, SINT1_NUM);
+ while (atomic_read(&timer->fire_count) < 1) {
+ pause();
+ }
+ report("Hyper-V SynIC auto-enable one-shot timer test vcpu %d", true,
vcpu);
+ stimer_shutdown(timer);
+}
+
+static void stimer_test_auto_enable_periodic(int vcpu, struct stimer *timer)
+{
+ /* Check auto-enable periodic timer */
+ stimer_start(timer, true, true, ONE_MS_IN_100NS, SINT1_NUM);
+ while (atomic_read(&timer->fire_count) < 1000) {
+ pause();
+ }
+ report("Hyper-V SynIC auto-enable periodic timer test vcpu %d", true,
vcpu);
+ stimer_shutdown(timer);
+}
+
+static void stimer_test(void *ctx)
+{
+ int vcpu = smp_id();
+ struct svcpu *svcpu = &g_synic_vcpu[vcpu];
+ struct stimer *timer1, *timer2;
+
+ irq_enable();
+
+ timer1 = &svcpu->timer[0];
+ timer2 = &svcpu->timer[1];
+
+ stimer_test_periodic(vcpu, timer1, timer2);
+ stimer_test_one_shot(vcpu, timer1);
+ stimer_test_auto_enable_one_shot(vcpu, timer2);
+ stimer_test_auto_enable_periodic(vcpu, timer1);
+
+ irq_disable();
+ cpu_comp();
+}
+
+static void stimer_test_cleanup(void *ctx)
+{
+ int vcpu = smp_id();
+
+ stimers_shutdown();
+ synic_sint_destroy(vcpu, SINT1_NUM);
+ synic_sint_destroy(vcpu, SINT2_NUM);
+ synic_disable();
+ cpu_comp();
+}
+
+static void on_each_cpu_async_wait(void (*func)(void *ctx), void *ctx)
+{
+ int i;
+
+ atomic_set(&g_cpus_comp_count, 0);
+ for (i = 0; i < g_cpus_count; i++) {
+ on_cpu_async(i, func, ctx);
+ }
+ while (atomic_read(&g_cpus_comp_count) != g_cpus_count) {
+ pause();
+ }
+}
+
+static void stimer_test_all(void)
+{
+ int ncpus;
+
+ setup_vm();
+ smp_init();
+ setup_idt();
+ enable_apic();
+
+ handle_irq(SINT1_VEC, stimer_isr);
+ handle_irq(SINT2_VEC, stimer_isr_auto_eoi);
+
+ ncpus = cpu_count();
+ if (ncpus > MAX_CPUS) {
+ ncpus = MAX_CPUS;
+ }
+
+ printf("cpus = %d\n", ncpus);
+ g_cpus_count = ncpus;
+
+ on_each_cpu_async_wait(stimer_test_prepare, (void *)read_cr3());
+ on_each_cpu_async_wait(stimer_test, NULL);
+ on_each_cpu_async_wait(stimer_test_cleanup, NULL);
+}
+
+int main(int ac, char **av)
+{
+
+ if (!synic_supported()) {
+ report("Hyper-V SynIC is not supported", true);
+ goto done;
+ }
+
+ if (!stimer_supported()) {
+ report("Hyper-V SynIC timers are not supported", true);
+ goto done;
+ }
+
+ if (!hv_time_ref_counter_supported()) {
+ report("Hyper-V time reference counter is not supported", true);
+ goto done;
+ }
+
+ stimer_test_all();
+done:
+ return report_summary();
+}
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index ffffc15..99eff26 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -183,3 +183,8 @@ arch = x86_64
file = hyperv_synic.flat
smp = 2
extra_params = -cpu kvm64,hv_synic -device hyperv-testdev
+
+[hyperv_stimer]
+file = hyperv_stimer.flat
+smp = 2
+extra_params = -cpu kvm64,hv_time,hv_synic,hv_stimer -device hyperv-testdev
--
2.4.3