qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 1/9] kvm/x86: move Hyper-V MSR's/hypercall code


From: Peter Hornyack
Subject: Re: [Qemu-devel] [PATCH 1/9] kvm/x86: move Hyper-V MSR's/hypercall code into hyperv.c file
Date: Tue, 30 Jun 2015 17:11:33 -0700

On Tue, Jun 30, 2015 at 4:33 AM, Denis V. Lunev <address@hidden> wrote:
> From: Andrey Smetanin <address@hidden>
>
> This patch introduce Hyper-V related source code file - hyperv.c and
> per vm and per vcpu hyperv context structures.
> All Hyper-V MSR's and hypercall code moved into hyperv.c.
> All Hyper-V kvm/vcpu fields moved into appropriate hyperv context
> structures. Copyrights and authors information copied from x86.c
> to hyperv.c.
>
> Signed-off-by: Andrey Smetanin <address@hidden>
> Signed-off-by: Denis V. Lunev <address@hidden>
> CC: Paolo Bonzini <address@hidden>
> CC: Gleb Natapov <address@hidden>
Reviewed-by: Peter Hornyack <address@hidden>

> ---
>  arch/x86/include/asm/kvm_host.h |  20 ++-
>  arch/x86/kvm/Makefile           |   4 +-
>  arch/x86/kvm/hyperv.c           | 307 
> ++++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/hyperv.h           |  32 +++++
>  arch/x86/kvm/lapic.h            |   2 +-
>  arch/x86/kvm/x86.c              | 265 +---------------------------------
>  arch/x86/kvm/x86.h              |   5 +
>  7 files changed, 366 insertions(+), 269 deletions(-)
>  create mode 100644 arch/x86/kvm/hyperv.c
>  create mode 100644 arch/x86/kvm/hyperv.h
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index c7fa57b..78616aa 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -358,6 +358,11 @@ struct kvm_mtrr {
>         struct list_head head;
>  };
>
> +/* Hyper-V per vcpu emulation context */
> +struct kvm_vcpu_hv {
> +       u64 hv_vapic;
> +};
> +
>  struct kvm_vcpu_arch {
>         /*
>          * rip and regs accesses must go through
> @@ -514,8 +519,7 @@ struct kvm_vcpu_arch {
>         /* used for guest single stepping over the given code position */
>         unsigned long singlestep_rip;
>
> -       /* fields used by HYPER-V emulation */
> -       u64 hv_vapic;
> +       struct kvm_vcpu_hv hyperv;
>
>         cpumask_var_t wbinvd_dirty_mask;
>
> @@ -586,6 +590,13 @@ struct kvm_apic_map {
>         struct kvm_lapic *logical_map[16][16];
>  };
>
> +/* Hyper-V emulation context */
> +struct kvm_hv {
> +       u64 hv_guest_os_id;
> +       u64 hv_hypercall;
> +       u64 hv_tsc_page;
> +};
> +
>  struct kvm_arch {
>         unsigned int n_used_mmu_pages;
>         unsigned int n_requested_mmu_pages;
> @@ -643,10 +654,7 @@ struct kvm_arch {
>         /* reads protected by irq_srcu, writes by irq_lock */
>         struct hlist_head mask_notifier_list;
>
> -       /* fields used by HYPER-V emulation */
> -       u64 hv_guest_os_id;
> -       u64 hv_hypercall;
> -       u64 hv_tsc_page;
> +       struct kvm_hv hyperv;
>
>         #ifdef CONFIG_KVM_MMU_AUDIT
>         int audit_point;
> diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
> index 67d215c..a1ff508 100644
> --- a/arch/x86/kvm/Makefile
> +++ b/arch/x86/kvm/Makefile
> @@ -12,7 +12,9 @@ kvm-y                 += $(KVM)/kvm_main.o 
> $(KVM)/coalesced_mmio.o \
>  kvm-$(CONFIG_KVM_ASYNC_PF)     += $(KVM)/async_pf.o
>
>  kvm-y                  += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
> -                          i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o
> +                          i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
> +                          hyperv.o
> +
>  kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT)    += assigned-dev.o iommu.o
>  kvm-intel-y            += vmx.o pmu_intel.o
>  kvm-amd-y              += svm.o pmu_amd.o
> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> new file mode 100644
> index 0000000..2b49f10
> --- /dev/null
> +++ b/arch/x86/kvm/hyperv.c
> @@ -0,0 +1,307 @@
> +/*
> + * KVM Microsoft Hyper-V emulation
> + *
> + * derived from arch/x86/kvm/x86.c
> + *
> + * Copyright (C) 2006 Qumranet, Inc.
> + * Copyright (C) 2008 Qumranet, Inc.
> + * Copyright IBM Corporation, 2008
> + * Copyright 2010 Red Hat, Inc. and/or its affiliates.
> + * Copyright (C) 2015 Andrey Smetanin <address@hidden>
> + *
> + * Authors:
> + *   Avi Kivity   <address@hidden>
> + *   Yaniv Kamay  <address@hidden>
> + *   Amit Shah    <address@hidden>
> + *   Ben-Ami Yassour <address@hidden>
> + *   Andrey Smetanin <address@hidden>
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2.  See
> + * the COPYING file in the top-level directory.
> + *
> + */
> +
> +#include "x86.h"
> +#include "lapic.h"
> +#include "hyperv.h"
> +
> +#include <linux/kvm_host.h>
> +#include <trace/events/kvm.h>
> +
> +#include "trace.h"
> +
> +static bool kvm_hv_msr_partition_wide(u32 msr)
> +{
> +       bool r = false;
> +
> +       switch (msr) {
> +       case HV_X64_MSR_GUEST_OS_ID:
> +       case HV_X64_MSR_HYPERCALL:
> +       case HV_X64_MSR_REFERENCE_TSC:
> +       case HV_X64_MSR_TIME_REF_COUNT:
> +               r = true;
> +               break;
> +       }
> +
> +       return r;
> +}
> +
> +static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
> +{
> +       struct kvm *kvm = vcpu->kvm;
> +       struct kvm_hv *hv = &kvm->arch.hyperv;
> +
> +       switch (msr) {
> +       case HV_X64_MSR_GUEST_OS_ID:
> +               hv->hv_guest_os_id = data;
> +               /* setting guest os id to zero disables hypercall page */
> +               if (!hv->hv_guest_os_id)
> +                       hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
> +               break;
> +       case HV_X64_MSR_HYPERCALL: {
> +               u64 gfn;
> +               unsigned long addr;
> +               u8 instructions[4];
> +
> +               /* if guest os id is not set hypercall should remain disabled 
> */
> +               if (!hv->hv_guest_os_id)
> +                       break;
> +               if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
> +                       hv->hv_hypercall = data;
> +                       break;
> +               }
> +               gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
> +               addr = gfn_to_hva(kvm, gfn);
> +               if (kvm_is_error_hva(addr))
> +                       return 1;
> +               kvm_x86_ops->patch_hypercall(vcpu, instructions);
> +               ((unsigned char *)instructions)[3] = 0xc3; /* ret */
> +               if (__copy_to_user((void __user *)addr, instructions, 4))
> +                       return 1;
> +               hv->hv_hypercall = data;
> +               mark_page_dirty(kvm, gfn);
> +               break;
> +       }
> +       case HV_X64_MSR_REFERENCE_TSC: {
> +               u64 gfn;
> +               HV_REFERENCE_TSC_PAGE tsc_ref;
> +
> +               memset(&tsc_ref, 0, sizeof(tsc_ref));
> +               hv->hv_tsc_page = data;
> +               if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
> +                       break;
> +               gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
> +               if (kvm_write_guest(
> +                               kvm,
> +                               gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
> +                               &tsc_ref, sizeof(tsc_ref)))
> +                       return 1;
> +               mark_page_dirty(kvm, gfn);
> +               break;
> +       }
> +       default:
> +               vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 
> 0x%llx\n",
> +                           msr, data);
> +               return 1;
> +       }
> +       return 0;
> +}
> +
> +static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
> +{
> +       struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
> +
> +       switch (msr) {
> +       case HV_X64_MSR_APIC_ASSIST_PAGE: {
> +               u64 gfn;
> +               unsigned long addr;
> +
> +               if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
> +                       hv->hv_vapic = data;
> +                       if (kvm_lapic_enable_pv_eoi(vcpu, 0))
> +                               return 1;
> +                       break;
> +               }
> +               gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
> +               addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
> +               if (kvm_is_error_hva(addr))
> +                       return 1;
> +               if (__clear_user((void __user *)addr, PAGE_SIZE))
> +                       return 1;
> +               hv->hv_vapic = data;
> +               kvm_vcpu_mark_page_dirty(vcpu, gfn);
> +               if (kvm_lapic_enable_pv_eoi(vcpu,
> +                                           gfn_to_gpa(gfn) | 
> KVM_MSR_ENABLED))
> +                       return 1;
> +               break;
> +       }
> +       case HV_X64_MSR_EOI:
> +               return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
> +       case HV_X64_MSR_ICR:
> +               return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
> +       case HV_X64_MSR_TPR:
> +               return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
> +       default:
> +               vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 
> 0x%llx\n",
> +                           msr, data);
> +               return 1;
> +       }
> +
> +       return 0;
> +}
> +
> +static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
> +{
> +       u64 data = 0;
> +       struct kvm *kvm = vcpu->kvm;
> +       struct kvm_hv *hv = &kvm->arch.hyperv;
> +
> +       switch (msr) {
> +       case HV_X64_MSR_GUEST_OS_ID:
> +               data = hv->hv_guest_os_id;
> +               break;
> +       case HV_X64_MSR_HYPERCALL:
> +               data = hv->hv_hypercall;
> +               break;
> +       case HV_X64_MSR_TIME_REF_COUNT: {
> +               data =
> +                    div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 
> 100);
> +               break;
> +       }
> +       case HV_X64_MSR_REFERENCE_TSC:
> +               data = hv->hv_tsc_page;
> +               break;
> +       default:
> +               vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
> +               return 1;
> +       }
> +
> +       *pdata = data;
> +       return 0;
> +}
> +
> +static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
> +{
> +       u64 data = 0;
> +       struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
> +
> +       switch (msr) {
> +       case HV_X64_MSR_VP_INDEX: {
> +               int r;
> +               struct kvm_vcpu *v;
> +
> +               kvm_for_each_vcpu(r, v, vcpu->kvm) {
> +                       if (v == vcpu) {
> +                               data = r;
> +                               break;
> +                       }
> +               }
> +               break;
> +       }
> +       case HV_X64_MSR_EOI:
> +               return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
> +       case HV_X64_MSR_ICR:
> +               return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
> +       case HV_X64_MSR_TPR:
> +               return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
> +       case HV_X64_MSR_APIC_ASSIST_PAGE:
> +               data = hv->hv_vapic;
> +               break;
> +       default:
> +               vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
> +               return 1;
> +       }
> +       *pdata = data;
> +       return 0;
> +}
> +
> +int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
> +{
> +       if (kvm_hv_msr_partition_wide(msr)) {
> +               int r;
> +
> +               mutex_lock(&vcpu->kvm->lock);
> +               r = kvm_hv_set_msr_pw(vcpu, msr, data);
> +               mutex_unlock(&vcpu->kvm->lock);
> +               return r;
> +       } else
> +               return kvm_hv_set_msr(vcpu, msr, data);
> +}
> +
> +int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
> +{
> +       if (kvm_hv_msr_partition_wide(msr)) {
> +               int r;
> +
> +               mutex_lock(&vcpu->kvm->lock);
> +               r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
> +               mutex_unlock(&vcpu->kvm->lock);
> +               return r;
> +       } else
> +               return kvm_hv_get_msr(vcpu, msr, pdata);
> +}
> +
> +bool kvm_hv_hypercall_enabled(struct kvm *kvm)
> +{
> +       return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
> +}
> +
> +int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
> +{
> +       u64 param, ingpa, outgpa, ret;
> +       uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 
> 0;
> +       bool fast, longmode;
> +
> +       /*
> +        * hypercall generates UD from non zero cpl and real mode
> +        * per HYPER-V spec
> +        */
> +       if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
> +               kvm_queue_exception(vcpu, UD_VECTOR);
> +               return 0;
> +       }
> +
> +       longmode = is_64_bit_mode(vcpu);
> +
> +       if (!longmode) {
> +               param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
> +                       (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
> +               ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
> +                       (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
> +               outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
> +                       (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
> +       }
> +#ifdef CONFIG_X86_64
> +       else {
> +               param = kvm_register_read(vcpu, VCPU_REGS_RCX);
> +               ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
> +               outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
> +       }
> +#endif
> +
> +       code = param & 0xffff;
> +       fast = (param >> 16) & 0x1;
> +       rep_cnt = (param >> 32) & 0xfff;
> +       rep_idx = (param >> 48) & 0xfff;
> +
> +       trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
> +
> +       switch (code) {
> +       case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
> +               kvm_vcpu_on_spin(vcpu);
> +               break;
> +       default:
> +               res = HV_STATUS_INVALID_HYPERCALL_CODE;
> +               break;
> +       }
> +
> +       ret = res | (((u64)rep_done & 0xfff) << 32);
> +       if (longmode) {
> +               kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
> +       } else {
> +               kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
> +               kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
> +       }
> +
> +       return 1;
> +}
> diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
> new file mode 100644
> index 0000000..115c738
> --- /dev/null
> +++ b/arch/x86/kvm/hyperv.h
> @@ -0,0 +1,32 @@
> +/*
> + * KVM Microsoft Hyper-V emulation
> + *
> + * derived from arch/x86/kvm/x86.c
> + *
> + * Copyright (C) 2006 Qumranet, Inc.
> + * Copyright (C) 2008 Qumranet, Inc.
> + * Copyright IBM Corporation, 2008
> + * Copyright 2010 Red Hat, Inc. and/or its affiliates.
> + * Copyright (C) 2015 Andrey Smetanin <address@hidden>
> + *
> + * Authors:
> + *   Avi Kivity   <address@hidden>
> + *   Yaniv Kamay  <address@hidden>
> + *   Amit Shah    <address@hidden>
> + *   Ben-Ami Yassour <address@hidden>
> + *   Andrey Smetanin <address@hidden>
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2.  See
> + * the COPYING file in the top-level directory.
> + *
> + */
> +
> +#ifndef __ARCH_X86_KVM_HYPERV_H__
> +#define __ARCH_X86_KVM_HYPERV_H__
> +
> +int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
> +int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
> +bool kvm_hv_hypercall_enabled(struct kvm *kvm);
> +int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
> +
> +#endif
> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
> index f2f4e10..26f7817 100644
> --- a/arch/x86/kvm/lapic.h
> +++ b/arch/x86/kvm/lapic.h
> @@ -90,7 +90,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, 
> u64 *data);
>
>  static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
>  {
> -       return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE;
> +       return vcpu->arch.hyperv.hv_vapic & 
> HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE;
>  }
>
>  int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index ac165c2..301ee01 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -29,6 +29,7 @@
>  #include "cpuid.h"
>  #include "assigned-dev.h"
>  #include "pmu.h"
> +#include "hyperv.h"
>
>  #include <linux/clocksource.h>
>  #include <linux/interrupt.h>
> @@ -1217,11 +1218,6 @@ static void kvm_get_time_scale(uint32_t scaled_khz, 
> uint32_t base_khz,
>                  __func__, base_khz, scaled_khz, shift, *pmultiplier);
>  }
>
> -static inline u64 get_kernel_ns(void)
> -{
> -       return ktime_get_boot_ns();
> -}
> -
>  #ifdef CONFIG_X86_64
>  static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
>  #endif
> @@ -1869,123 +1865,6 @@ out:
>         return r;
>  }
>
> -static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
> -{
> -       return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
> -}
> -
> -static bool kvm_hv_msr_partition_wide(u32 msr)
> -{
> -       bool r = false;
> -       switch (msr) {
> -       case HV_X64_MSR_GUEST_OS_ID:
> -       case HV_X64_MSR_HYPERCALL:
> -       case HV_X64_MSR_REFERENCE_TSC:
> -       case HV_X64_MSR_TIME_REF_COUNT:
> -               r = true;
> -               break;
> -       }
> -
> -       return r;
> -}
> -
> -static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
> -{
> -       struct kvm *kvm = vcpu->kvm;
> -
> -       switch (msr) {
> -       case HV_X64_MSR_GUEST_OS_ID:
> -               kvm->arch.hv_guest_os_id = data;
> -               /* setting guest os id to zero disables hypercall page */
> -               if (!kvm->arch.hv_guest_os_id)
> -                       kvm->arch.hv_hypercall &= 
> ~HV_X64_MSR_HYPERCALL_ENABLE;
> -               break;
> -       case HV_X64_MSR_HYPERCALL: {
> -               u64 gfn;
> -               unsigned long addr;
> -               u8 instructions[4];
> -
> -               /* if guest os id is not set hypercall should remain disabled 
> */
> -               if (!kvm->arch.hv_guest_os_id)
> -                       break;
> -               if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
> -                       kvm->arch.hv_hypercall = data;
> -                       break;
> -               }
> -               gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
> -               addr = gfn_to_hva(kvm, gfn);
> -               if (kvm_is_error_hva(addr))
> -                       return 1;
> -               kvm_x86_ops->patch_hypercall(vcpu, instructions);
> -               ((unsigned char *)instructions)[3] = 0xc3; /* ret */
> -               if (__copy_to_user((void __user *)addr, instructions, 4))
> -                       return 1;
> -               kvm->arch.hv_hypercall = data;
> -               mark_page_dirty(kvm, gfn);
> -               break;
> -       }
> -       case HV_X64_MSR_REFERENCE_TSC: {
> -               u64 gfn;
> -               HV_REFERENCE_TSC_PAGE tsc_ref;
> -               memset(&tsc_ref, 0, sizeof(tsc_ref));
> -               kvm->arch.hv_tsc_page = data;
> -               if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
> -                       break;
> -               gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
> -               if (kvm_write_guest(kvm, gfn << 
> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
> -                       &tsc_ref, sizeof(tsc_ref)))
> -                       return 1;
> -               mark_page_dirty(kvm, gfn);
> -               break;
> -       }
> -       default:
> -               vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
> -                           "data 0x%llx\n", msr, data);
> -               return 1;
> -       }
> -       return 0;
> -}
> -
> -static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
> -{
> -       switch (msr) {
> -       case HV_X64_MSR_APIC_ASSIST_PAGE: {
> -               u64 gfn;
> -               unsigned long addr;
> -
> -               if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
> -                       vcpu->arch.hv_vapic = data;
> -                       if (kvm_lapic_enable_pv_eoi(vcpu, 0))
> -                               return 1;
> -                       break;
> -               }
> -               gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
> -               addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
> -               if (kvm_is_error_hva(addr))
> -                       return 1;
> -               if (__clear_user((void __user *)addr, PAGE_SIZE))
> -                       return 1;
> -               vcpu->arch.hv_vapic = data;
> -               kvm_vcpu_mark_page_dirty(vcpu, gfn);
> -               if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | 
> KVM_MSR_ENABLED))
> -                       return 1;
> -               break;
> -       }
> -       case HV_X64_MSR_EOI:
> -               return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
> -       case HV_X64_MSR_ICR:
> -               return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
> -       case HV_X64_MSR_TPR:
> -               return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
> -       default:
> -               vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
> -                           "data 0x%llx\n", msr, data);
> -               return 1;
> -       }
> -
> -       return 0;
> -}
> -
>  static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
>  {
>         gpa_t gpa = data & ~0x3f;
> @@ -2224,15 +2103,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
> msr_data *msr_info)
>                  */
>                 break;
>         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
> -               if (kvm_hv_msr_partition_wide(msr)) {
> -                       int r;
> -                       mutex_lock(&vcpu->kvm->lock);
> -                       r = set_msr_hyperv_pw(vcpu, msr, data);
> -                       mutex_unlock(&vcpu->kvm->lock);
> -                       return r;
> -               } else
> -                       return set_msr_hyperv(vcpu, msr, data);
> -               break;
> +               return kvm_hv_set_msr_common(vcpu, msr, data);
>         case MSR_IA32_BBL_CR_CTL3:
>                 /* Drop writes to this legacy MSR -- see rdmsr
>                  * counterpart for further detail.
> @@ -2315,68 +2186,6 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, 
> u64 *pdata)
>         return 0;
>  }
>
> -static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
> -{
> -       u64 data = 0;
> -       struct kvm *kvm = vcpu->kvm;
> -
> -       switch (msr) {
> -       case HV_X64_MSR_GUEST_OS_ID:
> -               data = kvm->arch.hv_guest_os_id;
> -               break;
> -       case HV_X64_MSR_HYPERCALL:
> -               data = kvm->arch.hv_hypercall;
> -               break;
> -       case HV_X64_MSR_TIME_REF_COUNT: {
> -               data =
> -                    div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 
> 100);
> -               break;
> -       }
> -       case HV_X64_MSR_REFERENCE_TSC:
> -               data = kvm->arch.hv_tsc_page;
> -               break;
> -       default:
> -               vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
> -               return 1;
> -       }
> -
> -       *pdata = data;
> -       return 0;
> -}
> -
> -static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
> -{
> -       u64 data = 0;
> -
> -       switch (msr) {
> -       case HV_X64_MSR_VP_INDEX: {
> -               int r;
> -               struct kvm_vcpu *v;
> -               kvm_for_each_vcpu(r, v, vcpu->kvm) {
> -                       if (v == vcpu) {
> -                               data = r;
> -                               break;
> -                       }
> -               }
> -               break;
> -       }
> -       case HV_X64_MSR_EOI:
> -               return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
> -       case HV_X64_MSR_ICR:
> -               return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
> -       case HV_X64_MSR_TPR:
> -               return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
> -       case HV_X64_MSR_APIC_ASSIST_PAGE:
> -               data = vcpu->arch.hv_vapic;
> -               break;
> -       default:
> -               vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
> -               return 1;
> -       }
> -       *pdata = data;
> -       return 0;
> -}
> -
>  int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  {
>         u64 data;
> @@ -2495,14 +2304,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct 
> msr_data *msr_info)
>                 msr_info->data = 0x20000000;
>                 break;
>         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
> -               if (kvm_hv_msr_partition_wide(msr_info->index)) {
> -                       int r;
> -                       mutex_lock(&vcpu->kvm->lock);
> -                       r = get_msr_hyperv_pw(vcpu, msr_info->index, 
> &msr_info->data);
> -                       mutex_unlock(&vcpu->kvm->lock);
> -                       return r;
> -               } else
> -                       return get_msr_hyperv(vcpu, msr_info->index, 
> &msr_info->data);
> +               return kvm_hv_get_msr_common(vcpu,
> +                                            msr_info->index, 
> &msr_info->data);
>                 break;
>         case MSR_IA32_BBL_CR_CTL3:
>                 /* This legacy MSR exists but isn't fully documented in 
> current
> @@ -5885,66 +5688,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
>  }
>  EXPORT_SYMBOL_GPL(kvm_emulate_halt);
>
> -int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
> -{
> -       u64 param, ingpa, outgpa, ret;
> -       uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 
> 0;
> -       bool fast, longmode;
> -
> -       /*
> -        * hypercall generates UD from non zero cpl and real mode
> -        * per HYPER-V spec
> -        */
> -       if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
> -               kvm_queue_exception(vcpu, UD_VECTOR);
> -               return 0;
> -       }
> -
> -       longmode = is_64_bit_mode(vcpu);
> -
> -       if (!longmode) {
> -               param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
> -                       (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
> -               ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
> -                       (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
> -               outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
> -                       (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
> -       }
> -#ifdef CONFIG_X86_64
> -       else {
> -               param = kvm_register_read(vcpu, VCPU_REGS_RCX);
> -               ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
> -               outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
> -       }
> -#endif
> -
> -       code = param & 0xffff;
> -       fast = (param >> 16) & 0x1;
> -       rep_cnt = (param >> 32) & 0xfff;
> -       rep_idx = (param >> 48) & 0xfff;
> -
> -       trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
> -
> -       switch (code) {
> -       case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
> -               kvm_vcpu_on_spin(vcpu);
> -               break;
> -       default:
> -               res = HV_STATUS_INVALID_HYPERCALL_CODE;
> -               break;
> -       }
> -
> -       ret = res | (((u64)rep_done & 0xfff) << 32);
> -       if (longmode) {
> -               kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
> -       } else {
> -               kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
> -               kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
> -       }
> -
> -       return 1;
> -}
> -
>  /*
>   * kvm_pv_kick_cpu_op:  Kick a vcpu.
>   *
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index edc8cdc..c04b56b 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu 
> *vcpu,
>         return kvm_register_write(vcpu, reg, val);
>  }
>
> +static inline u64 get_kernel_ns(void)
> +{
> +       return ktime_get_boot_ns();
> +}
> +
>  void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
>  void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
>  void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
> --
> 2.1.4
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to address@hidden
> More majordomo info at  http://vger.kernel.org/majordomo-info.html



reply via email to

[Prev in Thread] Current Thread [Next in Thread]