qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-ppc] [PATCH 1/3] ppc/hash64: Various fixes in PTE search in the ha


From: Benjamin Herrenschmidt
Subject: [Qemu-ppc] [PATCH 1/3] ppc/hash64: Various fixes in PTE search in the hash table
Date: Mon, 4 Jul 2016 17:44:09 +1000

We need to properly ignore different base size encodings,
since Linux might end up mixing them up. This also has the
advantage of speeding things up by no longer searching the
whole SPS array but only the sub-array for the SLB size.

This includes some fixes by Cédric Le Goater <address@hidden>

Signed-off-by: Benjamin Herrenschmidt <address@hidden>
---
 hw/ppc/spapr_hcall.c    |   8 +--
 target-ppc/mmu-hash64.c | 172 +++++++++++++++++++-----------------------------
 target-ppc/mmu-hash64.h |   5 +-
 3 files changed, 75 insertions(+), 110 deletions(-)

diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index e011ed4..fe05078 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -83,18 +83,18 @@ static target_ulong h_enter(PowerPCCPU *cpu, 
sPAPRMachineState *spapr,
     target_ulong pte_index = args[1];
     target_ulong pteh = args[2];
     target_ulong ptel = args[3];
-    unsigned apshift, spshift;
+    unsigned pshift;
     target_ulong raddr;
     target_ulong index;
     uint64_t token;
 
-    apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift);
-    if (!apshift) {
+    pshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
+    if (!pshift) {
         /* Bad page size encoding */
         return H_PARAMETER;
     }
 
-    raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
+    raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << pshift) - 1);
 
     if (is_ram_address(spapr, raddr)) {
         /* Regular RAM - should have WIMG=0010 */
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 3b1357a..8a39295 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -450,37 +450,51 @@ void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t 
token)
     }
 }
 
-/* Returns the effective page shift or 0. MPSS isn't supported yet so
- * this will always be the slb_pshift or 0
- */
-static uint32_t ppc_hash64_pte_size_decode(uint64_t pte1, uint32_t slb_pshift)
+
+static unsigned hpte_decode_psize(const struct ppc_one_seg_page_size *sps,
+                                  uint64_t pte0, uint64_t pte1)
 {
-    switch (slb_pshift) {
-    case 12:
-        return 12;
-    case 16:
-        if ((pte1 & 0xf000) == 0x1000) {
-            return 16;
+    int i;
+
+    /* A 4K PTE is only valid on a 4K segment */
+    if (!(pte0 & HPTE64_V_LARGE)) {
+        return sps->page_shift == 12 ? 12 : 0;
+    }
+
+    /* Compare the PTE with the various encodings for the
+     * segment base page size.
+     */
+    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+        const struct ppc_one_page_size *ps = &sps->enc[i];
+        uint64_t mask;
+
+        if (!ps->page_shift) {
+            break;
         }
-        return 0;
-    case 24:
-        if ((pte1 & 0xff000) == 0) {
-            return 24;
+
+        if (ps->page_shift == 12) {
+            /* L bit is set so this can't be a 4kiB page */
+            continue;
+        }
+
+        mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
+
+        if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
+            return ps->page_shift;
         }
-        return 0;
     }
     return 0;
 }
 
-static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
-                                     uint32_t slb_pshift, bool secondary,
-                                     target_ulong ptem, ppc_hash_pte64_t *pte)
+static target_long ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
+                                          ppc_slb_t *slb, target_ulong ptem,
+                                          ppc_hash_pte64_t *pte,
+                                          unsigned *pshift)
 {
     CPUPPCState *env = &cpu->env;
-    int i;
+    target_ulong pte_index, pte0, pte1;
     uint64_t token;
-    target_ulong pte0, pte1;
-    target_ulong pte_index;
+    int i;
 
     pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
     token = ppc_hash64_start_access(cpu, pte_index);
@@ -489,18 +503,25 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, 
hwaddr hash,
     }
     for (i = 0; i < HPTES_PER_GROUP; i++) {
         pte0 = ppc_hash64_load_hpte0(cpu, token, i);
-        pte1 = ppc_hash64_load_hpte1(cpu, token, i);
 
-        if ((pte0 & HPTE64_V_VALID)
-            && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
-            && HPTE64_V_COMPARE(pte0, ptem)) {
-            uint32_t pshift = ppc_hash64_pte_size_decode(pte1, slb_pshift);
-            if (pshift == 0) {
+        /* This compares V, B, H (secondary) and the AVPN */
+        if (HPTE64_V_COMPARE(pte0, ptem)) {
+
+            /* Load the rest of the PTE */
+            pte1 = ppc_hash64_load_hpte1(cpu, token, i);
+
+            /* Decode the actual page size */
+            *pshift = hpte_decode_psize(slb->sps, pte0, pte1);
+
+            /* If there is no match, ignore the PTE, it could simply be
+             * for a different segment size encoding and the architecture
+             * specifies we should not match. Linux will potentially leave
+             * behind PTEs for the wrong base page size when demoting
+             * segments.
+             */
+            if ((*pshift) == 0) {
                 continue;
             }
-            /* We don't do anything with pshift yet as qemu TLB only deals
-             * with 4K pages anyway
-             */
             pte->pte0 = pte0;
             pte->pte1 = pte1;
             ppc_hash64_stop_access(cpu, token);
@@ -508,6 +529,7 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, 
hwaddr hash,
         }
     }
     ppc_hash64_stop_access(cpu, token);
+
     /*
      * We didn't find a valid entry.
      */
@@ -516,7 +538,7 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, 
hwaddr hash,
 
 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
                                      ppc_slb_t *slb, target_ulong eaddr,
-                                     ppc_hash_pte64_t *pte)
+                                     ppc_hash_pte64_t *pte, unsigned *pshift)
 {
     CPUPPCState *env = &cpu->env;
     hwaddr pte_offset;
@@ -541,6 +563,7 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
         hash = vsid ^ (epn >> slb->sps->page_shift);
     }
     ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
+    ptem |= HPTE64_V_VALID;
 
     /* Page address translation */
     qemu_log_mask(CPU_LOG_MMU,
@@ -554,70 +577,33 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
             " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
             " hash=" TARGET_FMT_plx "\n",
             env->htab_base, env->htab_mask, vsid, ptem,  hash);
-    pte_offset = ppc_hash64_pteg_search(cpu, hash, slb->sps->page_shift,
-                                        0, ptem, pte);
+    pte_offset = ppc_hash64_pteg_search(cpu, hash, slb,
+                                        ptem, pte, pshift);
 
     if (pte_offset == -1) {
         /* Secondary PTEG lookup */
+        ptem |= HPTE64_V_SECONDARY;
         qemu_log_mask(CPU_LOG_MMU,
                 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
                 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
                 " hash=" TARGET_FMT_plx "\n", env->htab_base,
                 env->htab_mask, vsid, ptem, ~hash);
 
-        pte_offset = ppc_hash64_pteg_search(cpu, ~hash, slb->sps->page_shift, 
1,
-                                            ptem, pte);
+        pte_offset = ppc_hash64_pteg_search(cpu, ~hash, slb,
+                                            ptem, pte, pshift);
     }
 
     return pte_offset;
 }
 
-static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
-    uint64_t pte0, uint64_t pte1)
-{
-    int i;
-
-    if (!(pte0 & HPTE64_V_LARGE)) {
-        if (sps->page_shift != 12) {
-            /* 4kiB page in a non 4kiB segment */
-            return 0;
-        }
-        /* Normal 4kiB page */
-        return 12;
-    }
-
-    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
-        const struct ppc_one_page_size *ps = &sps->enc[i];
-        uint64_t mask;
-
-        if (!ps->page_shift) {
-            break;
-        }
-
-        if (ps->page_shift == 12) {
-            /* L bit is set so this can't be a 4kiB page */
-            continue;
-        }
-
-        mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
-
-        if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
-            return ps->page_shift;
-        }
-    }
-
-    return 0; /* Bad page size encoding */
-}
-
+/* This is used by the H_ENTER implementation in hw/ppc/spapr.c */
 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
-                                          uint64_t pte0, uint64_t pte1,
-                                          unsigned *seg_page_shift)
+                                          uint64_t pte0, uint64_t pte1)
 {
     CPUPPCState *env = &cpu->env;
     int i;
 
     if (!(pte0 & HPTE64_V_LARGE)) {
-        *seg_page_shift = 12;
         return 12;
     }
 
@@ -633,14 +619,11 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
             break;
         }
 
-        shift = hpte_page_shift(sps, pte0, pte1);
+        shift = hpte_decode_psize(sps, pte0, pte1);
         if (shift) {
-            *seg_page_shift = sps->page_shift;
             return shift;
         }
     }
-
-    *seg_page_shift = 0;
     return 0;
 }
 
@@ -691,7 +674,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr 
eaddr,
     CPUState *cs = CPU(cpu);
     CPUPPCState *env = &cpu->env;
     ppc_slb_t *slb;
-    unsigned apshift;
+    unsigned pshift;
     hwaddr pte_offset;
     ppc_hash_pte64_t pte;
     int pp_prot, amr_prot, prot;
@@ -734,7 +717,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr 
eaddr,
     }
 
     /* 4. Locate the PTE in the hash table */
-    pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
+    pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &pshift);
     if (pte_offset == -1) {
         dsisr = 0x40000000;
         if (rwx == 2) {
@@ -750,18 +733,6 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr 
eaddr,
     qemu_log_mask(CPU_LOG_MMU,
                 "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
 
-    /* Validate page size encoding */
-    apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
-    if (!apshift) {
-        error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
-                     " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
-        /* Not entirely sure what the right action here, but machine
-         * check seems reasonable */
-        cs->exception_index = POWERPC_EXCP_MCHECK;
-        env->error_code = 0;
-        return 1;
-    }
-
     /* 5. Check access permissions */
 
     pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
@@ -809,10 +780,10 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr 
eaddr,
 
     /* 7. Determine the real address from the PTE */
 
-    raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
+    raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, pshift, eaddr);
 
     tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
-                 prot, mmu_idx, 1ULL << apshift);
+                 prot, mmu_idx, 1ULL << pshift);
 
     return 0;
 }
@@ -823,7 +794,7 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, 
target_ulong addr)
     ppc_slb_t *slb;
     hwaddr pte_offset;
     ppc_hash_pte64_t pte;
-    unsigned apshift;
+    unsigned pshift;
 
     if (msr_dr == 0) {
         /* In real mode the top 4 effective address bits are ignored */
@@ -835,17 +806,12 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, 
target_ulong addr)
         return -1;
     }
 
-    pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
+    pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &pshift);
     if (pte_offset == -1) {
         return -1;
     }
 
-    apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
-    if (!apshift) {
-        return -1;
-    }
-
-    return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
+    return deposit64(pte.pte1 & HPTE64_R_RPN, 0, pshift, addr)
         & TARGET_PAGE_MASK;
 }
 
diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
index 6423b9f..154a306 100644
--- a/target-ppc/mmu-hash64.h
+++ b/target-ppc/mmu-hash64.h
@@ -17,8 +17,7 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
                                target_ulong pte_index,
                                target_ulong pte0, target_ulong pte1);
 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
-                                          uint64_t pte0, uint64_t pte1,
-                                          unsigned *seg_page_shift);
+                                          uint64_t pte0, uint64_t pte1);
 #endif
 
 /*
@@ -63,7 +62,7 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
 #define HPTE64_V_AVPN_SHIFT     7
 #define HPTE64_V_AVPN           0x3fffffffffffff80ULL
 #define HPTE64_V_AVPN_VAL(x)    (((x) & HPTE64_V_AVPN) >> HPTE64_V_AVPN_SHIFT)
-#define HPTE64_V_COMPARE(x, y)  (!(((x) ^ (y)) & 0xffffffffffffff80ULL))
+#define HPTE64_V_COMPARE(x, y)  (!(((x) ^ (y)) & 0xffffffffffffff83ULL))
 #define HPTE64_V_LARGE          0x0000000000000004ULL
 #define HPTE64_V_SECONDARY      0x0000000000000002ULL
 #define HPTE64_V_VALID          0x0000000000000001ULL
-- 
2.7.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]