qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v2 4/5] kvm: x86: Add support for KVM_GET/PUT_VCPU_S


From: Jan Kiszka
Subject: [Qemu-devel] [PATCH v2 4/5] kvm: x86: Add support for KVM_GET/PUT_VCPU_STATE
Date: Mon, 09 Nov 2009 20:02:47 +0100
User-agent: Mozilla/5.0 (X11; U; Linux i686 (x86_64); de; rv:1.8.1.12) Gecko/20080226 SUSE/2.0.0.12-1.1 Thunderbird/2.0.0.12 Mnenhy/0.7.5.666

This patch adds support for the new KVM IOCTLs KVM_GET/SET_VCPU_STATE.
They are supposed to be introduced with 2.6.33 or 34 and allow to extend
the VCPU state exchange between kernel and user space without adding new
IOCTLs and to acquire/set a complete state via a single systemcall.

In order to remain backward-compatible to existing kernels, the old
state synchronization interface is of course kept as a fall-back.

Signed-off-by: Jan Kiszka <address@hidden>
---

Changes in v2:
 - A classic: Let's make it cleaner, use ARRAY_SIZE, no need to test.
   IOW: fix request.header.nsubstates initialization
   (queue on kiszka.org updated)

 kvm-all.c         |   11 +
 kvm.h             |    1 
 target-i386/kvm.c |  451 +++++++++++++++++++++++++++++------------------------
 3 files changed, 260 insertions(+), 203 deletions(-)

diff --git a/kvm-all.c b/kvm-all.c
index 31bc2f8..0d22e42 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -61,6 +61,7 @@ struct KVMState
     int coalesced_mmio;
     int broken_set_mem_region;
     int migration_log;
+    int vcpu_state;
 #ifdef KVM_CAP_SET_GUEST_DEBUG
     struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
 #endif
@@ -499,6 +500,11 @@ int kvm_init(int smp_cpus)
     }
 #endif
 
+    s->vcpu_state = 0;
+#ifdef KVM_CAP_VCPU_STATE
+    s->vcpu_state = kvm_check_extension(s, KVM_CAP_VCPU_STATE);
+#endif
+
     ret = kvm_arch_init(s, smp_cpus);
     if (ret < 0)
         goto err;
@@ -888,6 +894,11 @@ int kvm_has_sync_mmu(void)
 #endif
 }
 
+int kvm_has_vcpu_state(void)
+{
+    return kvm_state->vcpu_state;
+}
+
 void kvm_setup_guest_memory(void *start, size_t size)
 {
     if (!kvm_has_sync_mmu()) {
diff --git a/kvm.h b/kvm.h
index 6a82f6a..9e237d1 100644
--- a/kvm.h
+++ b/kvm.h
@@ -47,6 +47,7 @@ int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t 
size);
 int kvm_set_migration_log(int enable);
 
 int kvm_has_sync_mmu(void);
+int kvm_has_vcpu_state(void);
 
 void kvm_setup_guest_memory(void *start, size_t size);
 
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index c769d70..101c313 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -352,115 +352,95 @@ static void kvm_getput_reg(__u64 *kvm_reg, target_ulong 
*qemu_reg, int set)
         *qemu_reg = *kvm_reg;
 }
 
-static int kvm_getput_regs(CPUState *env, int set)
-{
-    struct kvm_regs regs;
-    int ret = 0;
-
-    if (!set) {
-        ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
-        if (ret < 0)
-            return ret;
-    }
-
-    kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
-    kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
-    kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
-    kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
-    kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
-    kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
-    kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
-    kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
+static void kvm_getput_regs(CPUState *env, struct kvm_regs *regs, int set)
+{
+    kvm_getput_reg(&regs->rax, &env->regs[R_EAX], set);
+    kvm_getput_reg(&regs->rbx, &env->regs[R_EBX], set);
+    kvm_getput_reg(&regs->rcx, &env->regs[R_ECX], set);
+    kvm_getput_reg(&regs->rdx, &env->regs[R_EDX], set);
+    kvm_getput_reg(&regs->rsi, &env->regs[R_ESI], set);
+    kvm_getput_reg(&regs->rdi, &env->regs[R_EDI], set);
+    kvm_getput_reg(&regs->rsp, &env->regs[R_ESP], set);
+    kvm_getput_reg(&regs->rbp, &env->regs[R_EBP], set);
 #ifdef TARGET_X86_64
-    kvm_getput_reg(&regs.r8, &env->regs[8], set);
-    kvm_getput_reg(&regs.r9, &env->regs[9], set);
-    kvm_getput_reg(&regs.r10, &env->regs[10], set);
-    kvm_getput_reg(&regs.r11, &env->regs[11], set);
-    kvm_getput_reg(&regs.r12, &env->regs[12], set);
-    kvm_getput_reg(&regs.r13, &env->regs[13], set);
-    kvm_getput_reg(&regs.r14, &env->regs[14], set);
-    kvm_getput_reg(&regs.r15, &env->regs[15], set);
+    kvm_getput_reg(&regs->r8, &env->regs[8], set);
+    kvm_getput_reg(&regs->r9, &env->regs[9], set);
+    kvm_getput_reg(&regs->r10, &env->regs[10], set);
+    kvm_getput_reg(&regs->r11, &env->regs[11], set);
+    kvm_getput_reg(&regs->r12, &env->regs[12], set);
+    kvm_getput_reg(&regs->r13, &env->regs[13], set);
+    kvm_getput_reg(&regs->r14, &env->regs[14], set);
+    kvm_getput_reg(&regs->r15, &env->regs[15], set);
 #endif
 
-    kvm_getput_reg(&regs.rflags, &env->eflags, set);
-    kvm_getput_reg(&regs.rip, &env->eip, set);
-
-    if (set)
-        ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
-
-    return ret;
+    kvm_getput_reg(&regs->rflags, &env->eflags, set);
+    kvm_getput_reg(&regs->rip, &env->eip, set);
 }
 
-static int kvm_put_fpu(CPUState *env)
+static void kvm_put_fpu(CPUState *env, struct kvm_fpu *fpu)
 {
-    struct kvm_fpu fpu;
     int i;
 
-    memset(&fpu, 0, sizeof fpu);
-    fpu.fsw = env->fpus & ~(7 << 11);
-    fpu.fsw |= (env->fpstt & 7) << 11;
-    fpu.fcw = env->fpuc;
-    for (i = 0; i < 8; ++i)
-       fpu.ftwx |= (!env->fptags[i]) << i;
-    memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
-    memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
-    fpu.mxcsr = env->mxcsr;
-
-    return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
+    memset(fpu, 0, sizeof(*fpu));
+    fpu->fsw = env->fpus & ~(7 << 11);
+    fpu->fsw |= (env->fpstt & 7) << 11;
+    fpu->fcw = env->fpuc;
+    for (i = 0; i < 8; ++i) {
+        fpu->ftwx |= (!env->fptags[i]) << i;
+    }
+    memcpy(fpu->fpr, env->fpregs, sizeof env->fpregs);
+    memcpy(fpu->xmm, env->xmm_regs, sizeof env->xmm_regs);
+    fpu->mxcsr = env->mxcsr;
 }
 
-static int kvm_put_sregs(CPUState *env)
+static void kvm_put_sregs(CPUState *env, struct kvm_sregs *sregs)
 {
-    struct kvm_sregs sregs;
-
-    memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
+    memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap));
     if (env->interrupt_injected >= 0) {
-        sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
+        sregs->interrupt_bitmap[env->interrupt_injected / 64] |=
                 (uint64_t)1 << (env->interrupt_injected % 64);
     }
 
     if ((env->eflags & VM_MASK)) {
-           set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
-           set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
-           set_v8086_seg(&sregs.es, &env->segs[R_ES]);
-           set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
-           set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
-           set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
+        set_v8086_seg(&sregs->cs, &env->segs[R_CS]);
+        set_v8086_seg(&sregs->ds, &env->segs[R_DS]);
+        set_v8086_seg(&sregs->es, &env->segs[R_ES]);
+        set_v8086_seg(&sregs->fs, &env->segs[R_FS]);
+        set_v8086_seg(&sregs->gs, &env->segs[R_GS]);
+        set_v8086_seg(&sregs->ss, &env->segs[R_SS]);
     } else {
-           set_seg(&sregs.cs, &env->segs[R_CS]);
-           set_seg(&sregs.ds, &env->segs[R_DS]);
-           set_seg(&sregs.es, &env->segs[R_ES]);
-           set_seg(&sregs.fs, &env->segs[R_FS]);
-           set_seg(&sregs.gs, &env->segs[R_GS]);
-           set_seg(&sregs.ss, &env->segs[R_SS]);
-
-           if (env->cr[0] & CR0_PE_MASK) {
-               /* force ss cpl to cs cpl */
-               sregs.ss.selector = (sregs.ss.selector & ~3) |
-                       (sregs.cs.selector & 3);
-               sregs.ss.dpl = sregs.ss.selector & 3;
-           }
+        set_seg(&sregs->cs, &env->segs[R_CS]);
+        set_seg(&sregs->ds, &env->segs[R_DS]);
+        set_seg(&sregs->es, &env->segs[R_ES]);
+        set_seg(&sregs->fs, &env->segs[R_FS]);
+        set_seg(&sregs->gs, &env->segs[R_GS]);
+        set_seg(&sregs->ss, &env->segs[R_SS]);
+
+        if (env->cr[0] & CR0_PE_MASK) {
+            /* force ss cpl to cs cpl */
+            sregs->ss.selector = (sregs->ss.selector & ~3) |
+                (sregs->cs.selector & 3);
+            sregs->ss.dpl = sregs->ss.selector & 3;
+        }
     }
 
-    set_seg(&sregs.tr, &env->tr);
-    set_seg(&sregs.ldt, &env->ldt);
-
-    sregs.idt.limit = env->idt.limit;
-    sregs.idt.base = env->idt.base;
-    sregs.gdt.limit = env->gdt.limit;
-    sregs.gdt.base = env->gdt.base;
+    set_seg(&sregs->tr, &env->tr);
+    set_seg(&sregs->ldt, &env->ldt);
 
-    sregs.cr0 = env->cr[0];
-    sregs.cr2 = env->cr[2];
-    sregs.cr3 = env->cr[3];
-    sregs.cr4 = env->cr[4];
+    sregs->idt.limit = env->idt.limit;
+    sregs->idt.base = env->idt.base;
+    sregs->gdt.limit = env->gdt.limit;
+    sregs->gdt.base = env->gdt.base;
 
-    sregs.cr8 = cpu_get_apic_tpr(env);
-    sregs.apic_base = cpu_get_apic_base(env);
+    sregs->cr0 = env->cr[0];
+    sregs->cr2 = env->cr[2];
+    sregs->cr3 = env->cr[3];
+    sregs->cr4 = env->cr[4];
 
-    sregs.efer = env->efer;
+    sregs->cr8 = cpu_get_apic_tpr(env);
+    sregs->apic_base = cpu_get_apic_base(env);
 
-    return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
+    sregs->efer = env->efer;
 }
 
 static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
@@ -470,20 +450,17 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
     entry->data = value;
 }
 
-static int kvm_put_msrs(CPUState *env)
+static void kvm_put_msrs(CPUState *env, struct kvm_msrs *msr_list)
 {
-    struct {
-        struct kvm_msrs info;
-        struct kvm_msr_entry entries[100];
-    } msr_data;
-    struct kvm_msr_entry *msrs = msr_data.entries;
+    struct kvm_msr_entry *msrs = msr_list->entries;
     int n = 0;
 
     kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
     kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
     kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
-    if (kvm_has_msr_star(env))
-       kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
+    if (kvm_has_msr_star(env)) {
+        kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
+    }
     kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
 #ifdef TARGET_X86_64
     /* FIXME if lm capable */
@@ -492,78 +469,63 @@ static int kvm_put_msrs(CPUState *env)
     kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
     kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
 #endif
-    msr_data.info.nmsrs = n;
-
-    return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
-
+    msr_list->nmsrs = n;
 }
 
-
-static int kvm_get_fpu(CPUState *env)
+static void kvm_get_fpu(CPUState *env, struct kvm_fpu *fpu)
 {
-    struct kvm_fpu fpu;
-    int i, ret;
-
-    ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
-    if (ret < 0)
-        return ret;
-
-    env->fpstt = (fpu.fsw >> 11) & 7;
-    env->fpus = fpu.fsw;
-    env->fpuc = fpu.fcw;
-    for (i = 0; i < 8; ++i)
-       env->fptags[i] = !((fpu.ftwx >> i) & 1);
-    memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
-    memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
-    env->mxcsr = fpu.mxcsr;
+    int i;
 
-    return 0;
+    env->fpstt = (fpu->fsw >> 11) & 7;
+    env->fpus = fpu->fsw;
+    env->fpuc = fpu->fcw;
+    for (i = 0; i < 8; ++i) {
+        env->fptags[i] = !((fpu->ftwx >> i) & 1);
+    }
+    memcpy(env->fpregs, fpu->fpr, sizeof env->fpregs);
+    memcpy(env->xmm_regs, fpu->xmm, sizeof env->xmm_regs);
+    env->mxcsr = fpu->mxcsr;
 }
 
-static int kvm_get_sregs(CPUState *env)
+static void kvm_get_sregs(CPUState *env, struct kvm_sregs *sregs)
 {
-    struct kvm_sregs sregs;
     uint32_t hflags;
-    int bit, i, ret;
-
-    ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
-    if (ret < 0)
-        return ret;
+    int bit, i;
 
     /* There can only be one pending IRQ set in the bitmap at a time, so try
        to find it and save its number instead (-1 for none). */
     env->interrupt_injected = -1;
-    for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
-        if (sregs.interrupt_bitmap[i]) {
-            bit = ctz64(sregs.interrupt_bitmap[i]);
+    for (i = 0; i < ARRAY_SIZE(sregs->interrupt_bitmap); i++) {
+        if (sregs->interrupt_bitmap[i]) {
+            bit = ctz64(sregs->interrupt_bitmap[i]);
             env->interrupt_injected = i * 64 + bit;
             break;
         }
     }
 
-    get_seg(&env->segs[R_CS], &sregs.cs);
-    get_seg(&env->segs[R_DS], &sregs.ds);
-    get_seg(&env->segs[R_ES], &sregs.es);
-    get_seg(&env->segs[R_FS], &sregs.fs);
-    get_seg(&env->segs[R_GS], &sregs.gs);
-    get_seg(&env->segs[R_SS], &sregs.ss);
+    get_seg(&env->segs[R_CS], &sregs->cs);
+    get_seg(&env->segs[R_DS], &sregs->ds);
+    get_seg(&env->segs[R_ES], &sregs->es);
+    get_seg(&env->segs[R_FS], &sregs->fs);
+    get_seg(&env->segs[R_GS], &sregs->gs);
+    get_seg(&env->segs[R_SS], &sregs->ss);
 
-    get_seg(&env->tr, &sregs.tr);
-    get_seg(&env->ldt, &sregs.ldt);
+    get_seg(&env->tr, &sregs->tr);
+    get_seg(&env->ldt, &sregs->ldt);
 
-    env->idt.limit = sregs.idt.limit;
-    env->idt.base = sregs.idt.base;
-    env->gdt.limit = sregs.gdt.limit;
-    env->gdt.base = sregs.gdt.base;
+    env->idt.limit = sregs->idt.limit;
+    env->idt.base = sregs->idt.base;
+    env->gdt.limit = sregs->gdt.limit;
+    env->gdt.base = sregs->gdt.base;
 
-    env->cr[0] = sregs.cr0;
-    env->cr[2] = sregs.cr2;
-    env->cr[3] = sregs.cr3;
-    env->cr[4] = sregs.cr4;
+    env->cr[0] = sregs->cr0;
+    env->cr[2] = sregs->cr2;
+    env->cr[3] = sregs->cr3;
+    env->cr[4] = sregs->cr4;
 
-    cpu_set_apic_base(env, sregs.apic_base);
+    cpu_set_apic_base(env, sregs->apic_base);
 
-    env->efer = sregs.efer;
+    env->efer = sregs->efer;
     //cpu_set_apic_tpr(env, sregs.cr8);
 
 #define HFLAG_COPY_MASK ~( \
@@ -572,8 +534,6 @@ static int kvm_get_sregs(CPUState *env)
                        HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
                        HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
 
-
-
     hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
     hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
     hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
@@ -605,25 +565,20 @@ static int kvm_get_sregs(CPUState *env)
             }
     }
     env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
-
-    return 0;
 }
 
-static int kvm_get_msrs(CPUState *env)
+static void kvm_prepare_get_msrs(CPUState *env, struct kvm_msrs *msr_list)
 {
-    struct {
-        struct kvm_msrs info;
-        struct kvm_msr_entry entries[100];
-    } msr_data;
-    struct kvm_msr_entry *msrs = msr_data.entries;
-    int ret, i, n;
+    struct kvm_msr_entry *msrs = msr_list->entries;
+    int n;
 
     n = 0;
     msrs[n++].index = MSR_IA32_SYSENTER_CS;
     msrs[n++].index = MSR_IA32_SYSENTER_ESP;
     msrs[n++].index = MSR_IA32_SYSENTER_EIP;
-    if (kvm_has_msr_star(env))
-       msrs[n++].index = MSR_STAR;
+    if (kvm_has_msr_star(env)) {
+        msrs[n++].index = MSR_STAR;
+    }
     msrs[n++].index = MSR_IA32_TSC;
 #ifdef TARGET_X86_64
     /* FIXME lm_capable_kernel */
@@ -632,12 +587,15 @@ static int kvm_get_msrs(CPUState *env)
     msrs[n++].index = MSR_FMASK;
     msrs[n++].index = MSR_LSTAR;
 #endif
-    msr_data.info.nmsrs = n;
-    ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
-    if (ret < 0)
-        return ret;
+    msr_list->nmsrs = n;
+}
 
-    for (i = 0; i < ret; i++) {
+static void kvm_get_msrs(CPUState *env, struct kvm_msrs *msr_list)
+{
+    struct kvm_msr_entry *msrs = msr_list->entries;
+    int i;
+
+    for (i = 0; i < msr_list->nmsrs; i++) {
         switch (msrs[i].index) {
         case MSR_IA32_SYSENTER_CS:
             env->sysenter_cs = msrs[i].data;
@@ -670,60 +628,147 @@ static int kvm_get_msrs(CPUState *env)
             break;
         }
     }
-
-    return 0;
 }
 
 int kvm_arch_put_registers(CPUState *env)
 {
+    struct kvm_regs regs;
+    struct kvm_sregs sregs;
+    struct kvm_fpu fpu;
+    struct {
+        struct kvm_msrs info;
+        struct kvm_msr_entry entries[100];
+    } msrs;
     int ret;
+#ifdef KVM_CAP_VCPU_STATE
+    struct kvm_mp_state mp_state;
+    struct {
+        struct kvm_vcpu_state header;
+        struct kvm_vcpu_substate substates[5];
+    } request;
+#endif
 
-    ret = kvm_getput_regs(env, 1);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_put_fpu(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_put_sregs(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_put_msrs(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_put_mp_state(env);
-    if (ret < 0)
-        return ret;
+    kvm_getput_regs(env, &regs, 1);
+    kvm_put_fpu(env, &fpu);
+    kvm_put_sregs(env, &sregs);
+    kvm_put_msrs(env, &msrs.info);
+#ifdef KVM_CAP_VCPU_STATE
+    mp_state.mp_state = env->mp_state;
+
+    if (kvm_has_vcpu_state()) {
+        request.header.nsubstates = ARRAY_SIZE(request.substates);
+        request.header.substates[0].type = KVM_VCPU_STATE_REGS;
+        request.header.substates[0].offset = (size_t)&regs - (size_t)&request;
+        request.header.substates[1].type = KVM_VCPU_STATE_FPU;
+        request.header.substates[1].offset = (size_t)&fpu - (size_t)&request;
+        request.header.substates[2].type = KVM_VCPU_STATE_SREGS;
+        request.header.substates[2].offset = (size_t)&sregs - (size_t)&request;
+        request.header.substates[3].type = KVM_X86_VCPU_STATE_MSRS;
+        request.header.substates[3].offset = (size_t)&msrs - (size_t)&request;
+        request.header.substates[4].type = KVM_VCPU_STATE_MP;
+        request.header.substates[4].offset = (size_t)&mp_state - 
(size_t)&request;
+
+        ret = kvm_vcpu_ioctl(env, KVM_SET_VCPU_STATE, &request);
+        if (ret < 0) {
+            return ret;
+        }
+    } else
+#endif
+    {
+        ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
+        if (ret < 0) {
+            return ret;
+        }
+        ret = kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
+        if (ret < 0) {
+            return ret;
+        }
+        ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
+        if (ret < 0) {
+            return ret;
+        }
+        ret = kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msrs);
+        if (ret < 0) {
+            return ret;
+        }
+        ret = kvm_put_mp_state(env);
+        if (ret < 0) {
+            return ret;
+        }
+    }
 
     return 0;
 }
 
 int kvm_arch_get_registers(CPUState *env)
 {
-    int ret;
-
-    ret = kvm_getput_regs(env, 0);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_get_fpu(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_get_sregs(env);
-    if (ret < 0)
-        return ret;
-
-    ret = kvm_get_msrs(env);
-    if (ret < 0)
-        return ret;
+    struct kvm_regs regs;
+    struct kvm_sregs sregs;
+    struct kvm_fpu fpu;
+    struct {
+        struct kvm_msrs info;
+        struct kvm_msr_entry entries[100];
+    } msrs;
+    int ret = -1;
+#ifdef KVM_CAP_VCPU_STATE
+    struct kvm_mp_state mp_state;
+    struct {
+        struct kvm_vcpu_state header;
+        struct kvm_vcpu_substate substates[5];
+    } request;
+#endif
 
-    ret = kvm_get_mp_state(env);
-    if (ret < 0)
-        return ret;
+    kvm_prepare_get_msrs(env, &msrs.info);
+
+#ifdef KVM_CAP_VCPU_STATE
+    if (kvm_has_vcpu_state()) {
+        request.header.nsubstates = ARRAY_SIZE(request.substates);
+        request.header.substates[0].type = KVM_VCPU_STATE_REGS;
+        request.header.substates[0].offset = (size_t)&regs - (size_t)&request;
+        request.header.substates[1].type = KVM_VCPU_STATE_FPU;
+        request.header.substates[1].offset = (size_t)&fpu - (size_t)&request;
+        request.header.substates[2].type = KVM_VCPU_STATE_SREGS;
+        request.header.substates[2].offset = (size_t)&sregs - (size_t)&request;
+        request.header.substates[3].type = KVM_X86_VCPU_STATE_MSRS;
+        request.header.substates[3].offset = (size_t)&msrs - (size_t)&request;
+        request.header.substates[4].type = KVM_VCPU_STATE_MP;
+        request.header.substates[4].offset = (size_t)&mp_state - 
(size_t)&request;
+
+        ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_STATE, &request);
+        if (ret < 0) {
+            return ret;
+        }
+        msrs.info.nmsrs = msrs.info.nprocessed;
+        env->mp_state = mp_state.mp_state;
+    } else
+#endif
+    {
+        ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
+        if (ret < 0) {
+            return ret;
+        }
+        ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
+        if (ret < 0) {
+            return ret;
+        }
+        ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+        if (ret < 0) {
+            return ret;
+        }
+        ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msrs);
+        if (ret < 0) {
+            return ret;
+        }
+        msrs.info.nmsrs = ret;
+        ret = kvm_get_mp_state(env);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+    kvm_getput_regs(env, &regs, 0);
+    kvm_get_fpu(env, &fpu);
+    kvm_get_sregs(env, &sregs);
+    kvm_get_msrs(env, &msrs.info);
 
     return 0;
 }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]