qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-ppc] [PATCH v3 2/3] ppc/hash64: Add proper real mode translati


From: David Gibson
Subject: Re: [Qemu-ppc] [PATCH v3 2/3] ppc/hash64: Add proper real mode translation support
Date: Tue, 5 Jul 2016 13:21:54 +1000
User-agent: Mutt/1.6.1 (2016-04-27)

On Tue, Jul 05, 2016 at 07:37:08AM +1000, Benjamin Herrenschmidt wrote:
> This adds proper support for translating real mode addresses based
> on the combination of HV and LPCR bits. This handles HRMOR offset
> for hypervisor real mode, and both RMA and VRMA modes for guest
> real mode. PAPR mode adjusts the offsets appropriately to match the
> RMA used in TCG, but we need to limit to the max supported by the
> implementation (16G).
> 
> This includes some fixes by Cédric Le Goater <address@hidden>
> 
> Signed-off-by: Benjamin Herrenschmidt <address@hidden>

I've adjusted this for my replacedment for 1/3 and applied to ppc-for-2.7.

> ---
> 
> v2. Reworked to calculate rmls and VRMA slb when updating LPCR
> v3. Fix 970 "apple mode"
> 
>  hw/ppc/spapr.c              |   7 ++
>  target-ppc/cpu.h            |   2 +
>  target-ppc/mmu-hash64.c     | 165 
> +++++++++++++++++++++++++++++++++++++++++---
>  target-ppc/mmu-hash64.h     |   3 +
>  target-ppc/translate_init.c |  14 +++-
>  5 files changed, 181 insertions(+), 10 deletions(-)
> 
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index 78ebd9e..0771c93 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -1771,6 +1771,13 @@ static void ppc_spapr_init(MachineState *machine)
>              spapr->vrma_adjust = 1;
>              spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
>          }
> +
> +        /* Actually we don't support unbounded RMA anymore since we
> +         * added proper emulation of HV mode. The max we can get is
> +         * 16G which also happens to be what we configure for PAPR
> +         * mode so make sure we don't do anything bigger than that
> +         */
> +        spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull);
>      }
>  
>      if (spapr->rma_size > node0_size) {
> diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
> index af73bce..2666a3f 100644
> --- a/target-ppc/cpu.h
> +++ b/target-ppc/cpu.h
> @@ -1047,6 +1047,8 @@ struct CPUPPCState {
>      uint64_t insns_flags2;
>  #if defined(TARGET_PPC64)
>      struct ppc_segment_page_sizes sps;
> +    ppc_slb_t vrma_slb;
> +    target_ulong rmls;
>      bool ci_large_pages;
>  #endif
>  
> diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
> index 8a39295..347f084 100644
> --- a/target-ppc/mmu-hash64.c
> +++ b/target-ppc/mmu-hash64.c
> @@ -684,11 +684,52 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr 
> eaddr,
>  
>      assert((rwx == 0) || (rwx == 1) || (rwx == 2));
>  
> +    /* Note on LPCR usage: 970 uses HID4, but our special variant
> +     * of store_spr copies relevant fields into env->spr[SPR_LPCR].
> +     * Similarily we filter unimplemented bits when storing into
> +     * LPCR depending on the MMU version. This code can thus just
> +     * use the LPCR "as-is".
> +     */
> +
>      /* 1. Handle real mode accesses */
>      if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
> -        /* Translation is off */
> -        /* In real mode the top 4 effective address bits are ignored */
> +        /* Translation is supposedly "off"  */
> +        /* In real mode the top 4 effective address bits are (mostly) 
> ignored */
>          raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
> +
> +        /* In HV mode, add HRMOR if top EA bit is clear */
> +        if (msr_hv || !env->has_hv_mode) {
> +            if (!(eaddr >> 63)) {
> +                raddr |= env->spr[SPR_HRMOR];
> +            }
> +        } else {
> +            /* Otherwise, check VPM for RMA vs VRMA */
> +            if (env->spr[SPR_LPCR] & LPCR_VPM0) {
> +                slb = &env->vrma_slb;
> +                if (slb->sps) {
> +                    goto skip_slb_search;
> +                }
> +                /* Not much else to do here */
> +                cs->exception_index = POWERPC_EXCP_MCHECK;
> +                env->error_code = 0;
> +                return 1;
> +            } else if (raddr < env->rmls) {
> +                /* RMA. Check bounds in RMLS */
> +                raddr |= env->spr[SPR_RMOR];
> +            } else {
> +                /* The access failed, generate the approriate interrupt */
> +                if (rwx == 2) {
> +                    ppc_hash64_set_isi(cs, env, 0x08000000);
> +                } else {
> +                    dsisr = 0x08000000;
> +                    if (rwx == 1) {
> +                        dsisr |= 0x02000000;
> +                    }
> +                    ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
> +                }
> +                return 1;
> +            }
> +        }
>          tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
>                       PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
>                       TARGET_PAGE_SIZE);
> @@ -697,7 +738,6 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr 
> eaddr,
>  
>      /* 2. Translation is on, so look up the SLB */
>      slb = slb_lookup(cpu, eaddr);
> -
>      if (!slb) {
>          if (rwx == 2) {
>              cs->exception_index = POWERPC_EXCP_ISEG;
> @@ -710,6 +750,8 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr 
> eaddr,
>          return 1;
>      }
>  
> +skip_slb_search:
> +
>      /* 3. Check for segment level no-execute violation */
>      if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
>          ppc_hash64_set_isi(cs, env, 0x10000000);
> @@ -792,18 +834,37 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, 
> target_ulong addr)
>  {
>      CPUPPCState *env = &cpu->env;
>      ppc_slb_t *slb;
> -    hwaddr pte_offset;
> +    hwaddr pte_offset, raddr;
>      ppc_hash_pte64_t pte;
>      unsigned pshift;
>  
> +    /* Handle real mode */
>      if (msr_dr == 0) {
>          /* In real mode the top 4 effective address bits are ignored */
> -        return addr & 0x0FFFFFFFFFFFFFFFULL;
> -    }
> +        raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
>  
> -    slb = slb_lookup(cpu, addr);
> -    if (!slb) {
> -        return -1;
> +        /* In HV mode, add HRMOR if top EA bit is clear */
> +        if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
> +            return raddr | env->spr[SPR_HRMOR];
> +        }
> +
> +        /* Otherwise, check VPM for RMA vs VRMA */
> +        if (env->spr[SPR_LPCR] & LPCR_VPM0) {
> +             slb = &env->vrma_slb;
> +             if (!slb->sps) {
> +                 return -1;
> +             }
> +        } else if (raddr < env->rmls) {
> +            /* RMA. Check bounds in RMLS */
> +            return raddr | env->spr[SPR_RMOR];
> +        } else {
> +            return -1;
> +        }
> +    } else {
> +        slb = slb_lookup(cpu, addr);
> +        if (!slb) {
> +            return -1;
> +        }
>      }
>  
>      pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &pshift);
> @@ -849,6 +910,90 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
>      tlb_flush(CPU(cpu), 1);
>  }
>  
> +void ppc_hash64_update_rmls(CPUPPCState *env)
> +{
> +    uint64_t lpcr = env->spr[SPR_LPCR];
> +
> +    /*
> +     * This is the full 4 bits encoding of POWER8. Previous
> +     * CPUs only support a subset of these but the filtering
> +     * is done when writing LPCR
> +     */
> +    switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) {
> +    case 0x8: /* 32MB */
> +        env->rmls = 0x2000000ull;
> +        break;
> +    case 0x3: /* 64MB */
> +        env->rmls = 0x4000000ull;
> +        break;
> +    case 0x7: /* 128MB */
> +        env->rmls = 0x8000000ull;
> +        break;
> +    case 0x4: /* 256MB */
> +        env->rmls = 0x10000000ull;
> +        break;
> +    case 0x2: /* 1GB */
> +        env->rmls = 0x40000000ull;
> +        break;
> +    case 0x1: /* 16GB */
> +        env->rmls = 0x400000000ull;
> +        break;
> +    default:
> +        /* What to do here ??? */
> +        env->rmls = 0;
> +    }
> +}
> +
> +void ppc_hash64_update_vrma(CPUPPCState *env)
> +{
> +    const struct ppc_one_seg_page_size *sps = NULL;
> +    target_ulong esid, vsid, lpcr;
> +    ppc_slb_t *slb = &env->vrma_slb;
> +    uint32_t vrmasd;
> +    int i;
> +
> +    /* First clear it */
> +    slb->esid = slb->vsid = 0;
> +    slb->sps = NULL;
> +
> +    /* Is VRMA enabled ? */
> +    lpcr = env->spr[SPR_LPCR];
> +    if (!(lpcr & LPCR_VPM0)) {
> +        return;
> +    }
> +
> +    /* Make one up. Mostly ignore the ESID which will not be
> +     * needed for translation
> +     */
> +    vsid = SLB_VSID_VRMA;
> +    vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
> +    vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
> +    esid = SLB_ESID_V;
> +
> +   for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> +        const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
> +
> +        if (!sps1->page_shift) {
> +            break;
> +        }
> +
> +        if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
> +            sps = sps1;
> +            break;
> +        }
> +    }
> +
> +    if (!sps) {
> +        error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
> +                     " vsid 0x"TARGET_FMT_lx, esid, vsid);
> +        return;
> +    }
> +
> +    slb->vsid = vsid;
> +    slb->esid = esid;
> +    slb->sps = sps;
> +}
> +
>  void helper_store_lpcr(CPUPPCState *env, target_ulong val)
>  {
>      uint64_t lpcr = 0;
> @@ -904,4 +1049,6 @@ void helper_store_lpcr(CPUPPCState *env, target_ulong 
> val)
>          ;
>      }
>      env->spr[SPR_LPCR] = lpcr;
> +    ppc_hash64_update_rmls(env);
> +    ppc_hash64_update_vrma(env);
>  }
> diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
> index 154a306..3a7476b 100644
> --- a/target-ppc/mmu-hash64.h
> +++ b/target-ppc/mmu-hash64.h
> @@ -18,6 +18,8 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
>                                 target_ulong pte0, target_ulong pte1);
>  unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
>                                            uint64_t pte0, uint64_t pte1);
> +void ppc_hash64_update_vrma(CPUPPCState *env);
> +void ppc_hash64_update_rmls(CPUPPCState *env);
>  #endif
>  
>  /*
> @@ -36,6 +38,7 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
>  #define SLB_VSID_B_256M         0x0000000000000000ULL
>  #define SLB_VSID_B_1T           0x4000000000000000ULL
>  #define SLB_VSID_VSID           0x3FFFFFFFFFFFF000ULL
> +#define SLB_VSID_VRMA           (0x0001FFFFFF000000ULL | SLB_VSID_B_1T)
>  #define SLB_VSID_PTEM           (SLB_VSID_B | SLB_VSID_VSID)
>  #define SLB_VSID_KS             0x0000000000000800ULL
>  #define SLB_VSID_KP             0x0000000000000400ULL
> diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
> index 843f19b..7bc27d0 100644
> --- a/target-ppc/translate_init.c
> +++ b/target-ppc/translate_init.c
> @@ -8791,11 +8791,19 @@ void cpu_ppc_set_papr(PowerPCCPU *cpu)
>      /* Set emulated LPCR to not send interrupts to hypervisor. Note that
>       * under KVM, the actual HW LPCR will be set differently by KVM itself,
>       * the settings below ensure proper operations with TCG in absence of
> -     * a real hypervisor
> +     * a real hypervisor.
> +     *
> +     * Clearing VPM0 will also cause us to use RMOR in mmu-hash64.c for
> +     * real mode accesses, which thankfully defaults to 0 and isn't
> +     * accessible in guest mode.
>       */
>      lpcr->default_value &= ~(LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV);
>      lpcr->default_value |= LPCR_LPES0 | LPCR_LPES1;
>  
> +    /* Set RMLS to the max (ie, 16G) */
> +    lpcr->default_value &= ~LPCR_RMLS;
> +    lpcr->default_value |= 1ull << LPCR_RMLS_SHIFT;
> +
>      /* P7 and P8 has slightly different PECE bits, mostly because P8 adds
>       * bit 47 and 48 which are reserved on P7. Here we set them all, which
>       * will work as expected for both implementations
> @@ -8811,6 +8819,10 @@ void cpu_ppc_set_papr(PowerPCCPU *cpu)
>      /* Set a full AMOR so guest can use the AMR as it sees fit */
>      env->spr[SPR_AMOR] = amor->default_value = 0xffffffffffffffffull;
>  
> +    /* Update some env bits based on new LPCR value */
> +    ppc_hash64_update_rmls(env);
> +    ppc_hash64_update_vrma(env);
> +
>      /* Tell KVM that we're in PAPR mode */
>      if (kvm_enabled()) {
>          kvmppc_set_papr(cpu);
> 
> 

-- 
David Gibson                    | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au  | minimalist, thank you.  NOT _the_ _other_
                                | _way_ _around_!
http://www.ozlabs.org/~dgibson

Attachment: signature.asc
Description: PGP signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]