qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 04/10] target-ppc: Rework SLB page size lookup


From: David Gibson
Subject: Re: [Qemu-devel] [PATCH 04/10] target-ppc: Rework SLB page size lookup
Date: Wed, 27 Jan 2016 11:59:18 +1100
User-agent: Mutt/1.5.24 (2015-08-30)

On Mon, Jan 25, 2016 at 08:38:49PM +0100, Alexander Graf wrote:
> 
> 
> On 01/25/2016 06:15 AM, David Gibson wrote:
> >Currently, the ppc_hash64_page_shift() function looks up a page size based
> >on information in an SLB entry.  It open codes the bit translation for
> >existing CPUs, however different CPU models can have different SLB
> >encodings.  We already store those in the 'sps' table in CPUPPCState, but
> >we don't currently enforce that that actually matches the logic in
> >ppc_hash64_page_shift.
> >
> >This patch reworks lookup of page size from SLB in several ways:
> >   * ppc_store_slb() will now fail (triggering an illegal instruction
> >     exception) if given a bad SLB page size encoding
> >   * On success ppc_store_slb() stores a pointer to the relevant entry in
> >     the page size table in the SLB entry.  This is looked up directly from
> >     the published table of page size encodings, so can't get out ot sync.
> >   * ppc_hash64_htab_lookup() and others now use this precached page size
> >     information rather than decoding the SLB values
> >   * Adjust ppc_hash64_pte_raddr() to take a page shift directly
> >     instead of an SLB entry.  We'll be wanting the flexibility shortly.
> >   * Adjust ppc_hash64_pte_raddr() to take a page shift directly
> >     instead of an SLB entry.  Both callers now have easy access to this,
> >     and we'll need the flexibility shortly.
> >
> >Signed-off-by: David Gibson <address@hidden>
> >---
> >  target-ppc/cpu.h        |  1 +
> >  target-ppc/machine.c    | 20 ++++++++++++++
> >  target-ppc/mmu-hash64.c | 71 
> > ++++++++++++++++++++++++++-----------------------
> >  3 files changed, 59 insertions(+), 33 deletions(-)
> >
> >diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
> >index 2bc96b4..0820390 100644
> >--- a/target-ppc/cpu.h
> >+++ b/target-ppc/cpu.h
> >@@ -419,6 +419,7 @@ typedef struct ppc_slb_t ppc_slb_t;
> >  struct ppc_slb_t {
> >      uint64_t esid;
> >      uint64_t vsid;
> >+    const struct ppc_one_seg_page_size *sps;
> >  };
> >  #define MAX_SLB_ENTRIES         64
> >diff --git a/target-ppc/machine.c b/target-ppc/machine.c
> >index b61c060..ca62d3e 100644
> >--- a/target-ppc/machine.c
> >+++ b/target-ppc/machine.c
> >@@ -2,6 +2,7 @@
> >  #include "hw/boards.h"
> >  #include "sysemu/kvm.h"
> >  #include "helper_regs.h"
> >+#include "mmu-hash64.h"
> >  static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
> >  {
> >@@ -352,11 +353,30 @@ static bool slb_needed(void *opaque)
> >      return (cpu->env.mmu_model & POWERPC_MMU_64);
> >  }
> >+static int slb_post_load(void *opaque, int version_id)
> >+{
> >+    PowerPCCPU *cpu = opaque;
> >+    CPUPPCState *env = &cpu->env;
> >+    int i;
> >+
> >+    /* We've pulled in the raw esid and vsid values from the migration
> >+     * stream, but we need to recompute the page size pointers */
> >+    for (i = 0; i < env->slb_nr; i++) {
> >+        if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
> >+            /* Migration source had bad values in its SLB */
> >+            return -1;
> >+        }
> >+    }
> >+
> >+    return 0;
> >+}
> >+
> >  static const VMStateDescription vmstate_slb = {
> >      .name = "cpu/slb",
> >      .version_id = 1,
> >      .minimum_version_id = 1,
> >      .needed = slb_needed,
> >+    .post_load = slb_post_load,
> >      .fields = (VMStateField[]) {
> >          VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU),
> >          VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
> >diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
> >index 5a6d33b..28ad361 100644
> >--- a/target-ppc/mmu-hash64.c
> >+++ b/target-ppc/mmu-hash64.c
> >@@ -19,6 +19,7 @@
> >   */
> >  #include "cpu.h"
> >  #include "exec/helper-proto.h"
> >+#include "qemu/error-report.h"
> >  #include "sysemu/kvm.h"
> >  #include "kvm_ppc.h"
> >  #include "mmu-hash64.h"
> >@@ -140,6 +141,8 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
> >  {
> >      CPUPPCState *env = &cpu->env;
> >      ppc_slb_t *slb = &env->slb[slot];
> >+    const struct ppc_one_seg_page_size *sps = NULL;
> >+    int i;
> >      if (slot >= env->slb_nr) {
> >          return -1; /* Bad slot number */
> >@@ -154,8 +157,29 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
> >          return -1; /* 1T segment on MMU that doesn't support it */
> >      }
> >+    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> >+        const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
> >+
> >+        if (!sps1->page_shift) {
> >+            break;
> >+        }
> >+
> >+        if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
> >+            sps = sps1;
> >+            break;
> >+        }
> >+    }
> >+
> >+    if (!sps) {
> >+        error_report("Bad page size encoding in SLB store: slot 
> >"TARGET_FMT_lu
> >+                     " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
> >+                     slot, esid, vsid);
> >+        return -1;
> >+    }
> >+
> >      slb->esid = esid;
> >      slb->vsid = vsid;
> >+    slb->sps = sps;
> >      LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
> >              " %016" PRIx64 "\n", __func__, slot, esid, vsid,
> >@@ -394,24 +418,6 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, 
> >hwaddr hash,
> >      return -1;
> >  }
> >-static uint64_t ppc_hash64_page_shift(ppc_slb_t *slb)
> >-{
> >-    uint64_t epnshift;
> >-
> >-    /* Page size according to the SLB, which we use to generate the
> >-     * EPN for hash table lookup..  When we implement more recent MMU
> >-     * extensions this might be different from the actual page size
> >-     * encoded in the PTE */
> >-    if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_4K) {
> >-        epnshift = TARGET_PAGE_BITS;
> >-    } else if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_64K) {
> >-        epnshift = TARGET_PAGE_BITS_64K;
> >-    } else {
> >-        epnshift = TARGET_PAGE_BITS_16M;
> >-    }
> >-    return epnshift;
> >-}
> >-
> >  static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
> >                                       ppc_slb_t *slb, target_ulong eaddr,
> >                                       ppc_hash_pte64_t *pte)
> >@@ -419,21 +425,24 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
> >      CPUPPCState *env = &cpu->env;
> >      hwaddr pte_offset;
> >      hwaddr hash;
> >-    uint64_t vsid, epnshift, epnmask, epn, ptem;
> >+    uint64_t vsid, epnmask, epn, ptem;
> >+
> >+    /* The SLB store path should prevent any bad page size encodings
> >+     * getting in there, so: */
> >+    assert(slb->sps);
> >-    epnshift = ppc_hash64_page_shift(slb);
> >-    epnmask = ~((1ULL << epnshift) - 1);
> >+    epnmask = ~((1ULL << slb->sps->page_shift) - 1);
> >      if (slb->vsid & SLB_VSID_B) {
> >          /* 1TB segment */
> >          vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
> >          epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
> >-        hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
> >+        hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
> >      } else {
> >          /* 256M segment */
> >          vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
> >          epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
> >-        hash = vsid ^ (epn >> epnshift);
> >+        hash = vsid ^ (epn >> slb->sps->page_shift);
> >      }
> >      ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
> >@@ -465,17 +474,12 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
> >      return pte_offset;
> >  }
> >-static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
> >+static hwaddr ppc_hash64_pte_raddr(unsigned page_shift, ppc_hash_pte64_t 
> >pte,
> >                                     target_ulong eaddr)
> >  {
> >-    hwaddr mask;
> >-    int target_page_bits;
> >+    hwaddr mask = (1ULL << page_shift) - 1;
> >      hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
> >-    /*
> >-     * We support 4K, 64K and 16M now
> >-     */
> >-    target_page_bits = ppc_hash64_page_shift(slb);
> >-    mask = (1ULL << target_page_bits) - 1;
> >+
> >      return (rpn & ~mask) | (eaddr & mask);
> >  }
> 
> Isn't this basically deposit64 by now?

True.  I'll remove ppc_hash64_pte_raddr() entirely and use deposit64
instead.

-- 
David Gibson                    | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au  | minimalist, thank you.  NOT _the_ _other_
                                | _way_ _around_!
http://www.ozlabs.org/~dgibson

Attachment: signature.asc
Description: PGP signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]