[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-ppc] [PATCH 3/3] ppc/hash64: Fix support for LPCR:ISL
From: |
David Gibson |
Subject: |
Re: [Qemu-ppc] [PATCH 3/3] ppc/hash64: Fix support for LPCR:ISL |
Date: |
Tue, 5 Jul 2016 13:22:12 +1000 |
User-agent: |
Mutt/1.6.1 (2016-04-27) |
On Mon, Jul 04, 2016 at 05:44:11PM +1000, Benjamin Herrenschmidt wrote:
> We need to ignore the segment page size and essentially treat
> all pages as coming from a 4K segment.
>
> Signed-off-by: Benjamin Herrenschmidt <address@hidden>
I've adjusted this for my different version of 1/3 and applied to
ppc-for-2.7.
>
> # Conflicts:
> # target-ppc/mmu-hash64.c
> ---
> target-ppc/mmu-hash64.c | 29 +++++++++++++++++++----------
> 1 file changed, 19 insertions(+), 10 deletions(-)
>
> diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
> index 87a2c26..878a0a0 100644
> --- a/target-ppc/mmu-hash64.c
> +++ b/target-ppc/mmu-hash64.c
> @@ -487,9 +487,10 @@ static unsigned hpte_decode_psize(const struct
> ppc_one_seg_page_size *sps,
> }
>
> static target_long ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
> - ppc_slb_t *slb, target_ulong ptem,
> - ppc_hash_pte64_t *pte,
> - unsigned *pshift)
> + const struct ppc_one_seg_page_size
> *sps,
> + target_ulong ptem,
> + ppc_hash_pte64_t *pte,
> + unsigned *pshift)
> {
> CPUPPCState *env = &cpu->env;
> target_ulong pte_index, pte0, pte1;
> @@ -511,7 +512,7 @@ static target_long ppc_hash64_pteg_search(PowerPCCPU
> *cpu, hwaddr hash,
> pte1 = ppc_hash64_load_hpte1(cpu, token, i);
>
> /* Decode the actual page size */
> - *pshift = hpte_decode_psize(slb->sps, pte0, pte1);
> + *pshift = hpte_decode_psize(sps, pte0, pte1);
>
> /* If there is no match, ignore the PTE, it could simply be
> * for a different segment size encoding and the architecture
> @@ -544,23 +545,31 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
> hwaddr pte_offset;
> hwaddr hash;
> uint64_t vsid, epnmask, epn, ptem;
> + const struct ppc_one_seg_page_size *sps = slb->sps;
>
> /* The SLB store path should prevent any bad page size encodings
> * getting in there, so: */
> - assert(slb->sps);
> + assert(sps);
>
> - epnmask = ~((1ULL << slb->sps->page_shift) - 1);
> + /* If ISL is set in LPCR we need to clamp the page size to 4K */
> + if (env->spr[SPR_LPCR] & LPCR_ISL) {
> + /* We assume that when using TCG, 4k is first entry of SPS */
> + sps = &env->sps.sps[0];
> + assert(sps->page_shift == 12);
> + }
> +
> + epnmask = ~((1ULL << sps->page_shift) - 1);
>
> if (slb->vsid & SLB_VSID_B) {
> /* 1TB segment */
> vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
> epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
> - hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
> + hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
> } else {
> /* 256M segment */
> vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
> epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
> - hash = vsid ^ (epn >> slb->sps->page_shift);
> + hash = vsid ^ (epn >> sps->page_shift);
> }
> ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
> ptem |= HPTE64_V_VALID;
> @@ -577,7 +586,7 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
> " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
> " hash=" TARGET_FMT_plx "\n",
> env->htab_base, env->htab_mask, vsid, ptem, hash);
> - pte_offset = ppc_hash64_pteg_search(cpu, hash, slb,
> + pte_offset = ppc_hash64_pteg_search(cpu, hash, sps,
> ptem, pte, pshift);
>
> if (pte_offset == -1) {
> @@ -589,7 +598,7 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
> " hash=" TARGET_FMT_plx "\n", env->htab_base,
> env->htab_mask, vsid, ptem, ~hash);
>
> - pte_offset = ppc_hash64_pteg_search(cpu, ~hash, slb,
> + pte_offset = ppc_hash64_pteg_search(cpu, ~hash, sps,
> ptem, pte, pshift);
> }
>
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
signature.asc
Description: PGP signature