[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v6 11/18] target/ppc: Only calculate RMLS derived RMA limit o
From: |
Greg Kurz |
Subject: |
Re: [PATCH v6 11/18] target/ppc: Only calculate RMLS derived RMA limit on demand |
Date: |
Wed, 26 Feb 2020 14:24:53 +0100 |
On Tue, 25 Feb 2020 10:37:17 +1100
David Gibson <address@hidden> wrote:
> When the LPCR is written, we update the env->rmls field with the RMA limit
> it implies. Simplify things by just calculating the value directly from
> the LPCR value when we need it.
>
> It's possible this is a little slower, but it's unlikely to be significant,
> since this is only for real mode accesses in a translation configuration
> that's not used very often, and the whole thing is behind the qemu TLB
> anyway. Therefore, keeping the number of state variables down and not
> having to worry about making sure it's always in sync seems the better
> option.
>
This patch also refactors the code of ppc_hash64_update_vrma(), which
is definitely an improvement, but seems a bit unrelated to the title...
I'd personally make it a separate patch but you decide of course :)
Also, a cosmetic remark. See below.
> Signed-off-by: David Gibson <address@hidden>
> Reviewed-by: Cédric Le Goater <address@hidden>
> ---
> target/ppc/cpu.h | 1 -
> target/ppc/mmu-hash64.c | 84 ++++++++++++++++++++---------------------
> 2 files changed, 40 insertions(+), 45 deletions(-)
>
> diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
> index 8077fdb068..f9871b1233 100644
> --- a/target/ppc/cpu.h
> +++ b/target/ppc/cpu.h
> @@ -1046,7 +1046,6 @@ struct CPUPPCState {
> uint64_t insns_flags2;
> #if defined(TARGET_PPC64)
> ppc_slb_t vrma_slb;
> - target_ulong rmls;
> #endif
>
> int error_code;
> diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
> index dd0df6fd01..ac21c14f68 100644
> --- a/target/ppc/mmu-hash64.c
> +++ b/target/ppc/mmu-hash64.c
> @@ -791,6 +791,35 @@ static target_ulong rmls_limit(PowerPCCPU *cpu)
> }
> }
>
> +static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
> +{
> + CPUPPCState *env = &cpu->env;
> + target_ulong lpcr = env->spr[SPR_LPCR];
> + uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
> + target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
> + int i;
> +
> + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> + const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
> +
> + if (!sps->page_shift) {
> + break;
> + }
> +
> + if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
> + slb->esid = SLB_ESID_V;
> + slb->vsid = vsid;
> + slb->sps = sps;
> + return 0;
> + }
> + }
> +
> + error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
> + TARGET_FMT_lx"\n", lpcr);
> +
> + return -1;
> +}
> +
> int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
> int rwx, int mmu_idx)
> {
> @@ -844,8 +873,10 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr
> eaddr,
>
> goto skip_slb_search;
> } else {
> + target_ulong limit = rmls_limit(cpu);
> +
> /* Emulated old-style RMO mode, bounds check against RMLS */
> - if (raddr >= env->rmls) {
> + if (raddr >= limit) {
> if (rwx == 2) {
> ppc_hash64_set_isi(cs, SRR1_PROTFAULT);
> } else {
> @@ -1007,8 +1038,9 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu,
> target_ulong addr)
> return -1;
> }
> } else {
> + target_ulong limit = rmls_limit(cpu);
Maybe add an empty line like you did above for consistency and better
readability ?
Anyway, feel free to add:
Reviewed-by: Greg Kurz <address@hidden>
> /* Emulated old-style RMO mode, bounds check against RMLS */
> - if (raddr >= env->rmls) {
> + if (raddr >= limit) {
> return -1;
> }
> return raddr | env->spr[SPR_RMOR];
> @@ -1043,53 +1075,18 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
> target_ulong ptex,
> static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
> {
> CPUPPCState *env = &cpu->env;
> - const PPCHash64SegmentPageSizes *sps = NULL;
> - target_ulong esid, vsid, lpcr;
> ppc_slb_t *slb = &env->vrma_slb;
> - uint32_t vrmasd;
> - int i;
> -
> - /* First clear it */
> - slb->esid = slb->vsid = 0;
> - slb->sps = NULL;
>
> /* Is VRMA enabled ? */
> - if (!ppc_hash64_use_vrma(env)) {
> - return;
> - }
> -
> - /*
> - * Make one up. Mostly ignore the ESID which will not be needed
> - * for translation
> - */
> - lpcr = env->spr[SPR_LPCR];
> - vsid = SLB_VSID_VRMA;
> - vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
> - vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
> - esid = SLB_ESID_V;
> -
> - for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> - const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
> -
> - if (!sps1->page_shift) {
> - break;
> + if (ppc_hash64_use_vrma(env)) {
> + if (build_vrma_slbe(cpu, slb) == 0) {
> + return;
> }
> -
> - if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
> - sps = sps1;
> - break;
> - }
> - }
> -
> - if (!sps) {
> - error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
> - " vsid 0x"TARGET_FMT_lx, esid, vsid);
> - return;
> }
>
> - slb->vsid = vsid;
> - slb->esid = esid;
> - slb->sps = sps;
> + /* Otherwise, clear it to indicate error */
> + slb->esid = slb->vsid = 0;
> + slb->sps = NULL;
> }
>
> void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
> @@ -1098,7 +1095,6 @@ void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
> CPUPPCState *env = &cpu->env;
>
> env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
> - env->rmls = rmls_limit(cpu);
> ppc_hash64_update_vrma(cpu);
> }
>
- Re: [PATCH v6 08/18] target/ppc: Use class fields to simplify LPCR masking, (continued)
- [PATCH v6 10/18] target/ppc: Correct RMLS table, David Gibson, 2020/02/24
- [PATCH v6 13/18] spapr: Don't use weird units for MIN_RMA_SLOF, David Gibson, 2020/02/24
- [PATCH v6 12/18] target/ppc: Don't store VRMA SLBE persistently, David Gibson, 2020/02/24
- [PATCH v6 11/18] target/ppc: Only calculate RMLS derived RMA limit on demand, David Gibson, 2020/02/24
- Re: [PATCH v6 11/18] target/ppc: Only calculate RMLS derived RMA limit on demand,
Greg Kurz <=
- [PATCH v6 09/18] target/ppc: Streamline calculation of RMA limit from LPCR[RMLS], David Gibson, 2020/02/24
[PATCH v6 17/18] spapr: Clean up RMA size calculation, David Gibson, 2020/02/24