qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 4/6] target-ppc: Synchronize FPU state with KVM


From: Alexander Graf
Subject: Re: [Qemu-devel] [PATCH 4/6] target-ppc: Synchronize FPU state with KVM
Date: Thu, 24 Jan 2013 17:39:09 +0100

On 24.01.2013, at 04:20, David Gibson wrote:

> Currently qemu does not get and put the state of the floating point and
> vector registers to KVM.  This is obviously a problem for savevm, as well
> as possibly being problematic for debugging of FP-using guests.
> 
> This patch fixes this by using new extensions to the ONE_REG interface to
> synchronize the qemu floating point state with KVM.
> 
> Signed-off-by: David Gibson <address@hidden>
> ---
> target-ppc/kvm.c |  123 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 123 insertions(+)
> 
> diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
> index 7604d0b..11e123f 100644
> --- a/target-ppc/kvm.c
> +++ b/target-ppc/kvm.c
> @@ -516,6 +516,125 @@ static void kvm_put_one_spr(CPUState *cs, uint64_t id, 
> int spr)
>     }
> }
> 
> +static void kvm_put_fp(CPUState *cs)
> +{
> +    PowerPCCPU *cpu = POWERPC_CPU(cs);
> +    CPUPPCState *env = &cpu->env;
> +    struct kvm_one_reg reg;
> +    int i;
> +    int ret;
> +
> +    if (env->insns_flags & PPC_FLOAT) {
> +        uint64_t fpscr = env->fpscr;
> +        bool vsx = !!(env->insns_flags2 & PPC2_VSX);
> +
> +        reg.id = KVM_REG_PPC_FPSCR;
> +        reg.addr = (uintptr_t)&fpscr;
> +        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);

How about a helper kvm_set_one_reg_tl(cs, KVM_REG_PPC_FPSCR, env->fpscr)?

> +        if (ret < 0) {
> +            fprintf(stderr, "Warning: Unable to set FPSCR to KVM: %s\n",
> +                    strerror(errno));

Why would I care? It's not supported; just abort.

> +        }
> +
> +        for (i = 0; i < 32; i++) {
> +            uint64_t vsr[2];
> +
> +            vsr[0] = float64_val(env->fpr[i]);
> +            vsr[1] = env->vsr[i];
> +            reg.addr = (uintptr_t) &vsr;
> +            reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
> +
> +            ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
> +            if (ret < 0) {
> +                fprintf(stderr, "Warning Unable to set %s%d to KVM: %s\n",
> +                        vsx ? "VSR" : "FPR", i, strerror(errno));

Just remove the prints here too. And below. Nobody wants to know that syncing 
didn't work. But you definitely want to stop syncing when you hit the first 
error.


Alex

> +            }
> +        }
> +    }
> +
> +    if (env->insns_flags & PPC_ALTIVEC) {
> +        reg.id = KVM_REG_PPC_VSCR;
> +        reg.addr = (uintptr_t)&env->vscr;
> +        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
> +        if (ret < 0) {
> +            fprintf(stderr, "Warning: Unable to set VSCR to KVM: %s\n",
> +                    strerror(errno));
> +        }
> +
> +        for (i = 0; i < 32; i++) {
> +            reg.id = KVM_REG_PPC_VR(i);
> +            reg.addr = (uintptr_t)&env->avr[i];
> +            ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
> +            if (ret < 0) {
> +                fprintf(stderr, "Warning Unable to set VR%d to KVM: %s\n",
> +                        i, strerror(errno));
> +            }
> +        }
> +    }
> +}
> +
> +static void kvm_get_fp(CPUState *cs)
> +{
> +    PowerPCCPU *cpu = POWERPC_CPU(cs);
> +    CPUPPCState *env = &cpu->env;
> +    struct kvm_one_reg reg;
> +    int i;
> +    int ret;
> +
> +    if (env->insns_flags & PPC_FLOAT) {
> +        uint64_t fpscr;
> +        bool vsx = !!(env->insns_flags2 & PPC2_VSX);
> +
> +        reg.id = KVM_REG_PPC_FPSCR;
> +        reg.addr = (uintptr_t)&fpscr;
> +        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
> +        if (ret < 0) {
> +            fprintf(stderr, "Warning: Unable to get FPSCR from KVM: %s\n",
> +                    strerror(errno));
> +        } else {
> +            env->fpscr = fpscr;
> +        }
> +
> +        for (i = 0; i < 32; i++) {
> +            uint64_t vsr[2];
> +
> +            reg.addr = (uintptr_t) &vsr;
> +            reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
> +
> +            ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
> +            if (ret < 0) {
> +                fprintf(stderr, "Warning Unable to get %s%d from KVM: %s\n",
> +                        vsx ? "VSR" : "FPR", i, strerror(errno));
> +            } else {
> +                env->fpr[i] = vsr[0];
> +                if (vsx) {
> +                    env->vsr[i] = vsr[1];
> +                }
> +            }
> +        }
> +    }
> +
> +    if (env->insns_flags & PPC_ALTIVEC) {
> +        reg.id = KVM_REG_PPC_VSCR;
> +        reg.addr = (uintptr_t)&env->vscr;
> +        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
> +        if (ret < 0) {
> +            fprintf(stderr, "Warning: Unable to get VSCR from KVM: %s\n",
> +                    strerror(errno));
> +        }
> +
> +        for (i = 0; i < 32; i++) {
> +            reg.id = KVM_REG_PPC_VR(i);
> +            reg.addr = (uintptr_t)&env->avr[i];
> +            ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
> +            if (ret < 0) {
> +                fprintf(stderr, "Warning Unable to get VR%d from KVM: %s\n",
> +                        i, strerror(errno));
> +            }
> +        }
> +    }
> +}
> +
> int kvm_arch_put_registers(CPUState *cs, int level)
> {
>     PowerPCCPU *cpu = POWERPC_CPU(cs);
> @@ -556,6 +675,8 @@ int kvm_arch_put_registers(CPUState *cs, int level)
>     if (ret < 0)
>         return ret;
> 
> +    kvm_put_fp(cs);
> +
>     if (env->tlb_dirty) {
>         kvm_sw_tlb_put(cpu);
>         env->tlb_dirty = false;
> @@ -673,6 +794,8 @@ int kvm_arch_get_registers(CPUState *cs)
>     for (i = 0;i < 32; i++)
>         env->gpr[i] = regs.gpr[i];
> 
> +    kvm_get_fp(cs);
> +
>     if (cap_booke_sregs) {
>         ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
>         if (ret < 0) {
> -- 
> 1.7.10.4
> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]