Check if each page dir/table base address is properly aligned and
log a guest error if not, as real hardware behave incorrectly in
this case.
These checks are only performed when DEBUG_MMU is defined, to avoid
hurting the performance.
Signed-off-by: Leandro Lupori <leandro.lupori@eldorado.org.br>
---
target/ppc/mmu-radix64.c | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index 2f0bcbfe2e..80d945a7c3 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -28,6 +28,8 @@
#include "mmu-radix64.h"
#include "mmu-book3s-v3.h"
+/* #define DEBUG_MMU */
+
static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
vaddr eaddr,
uint64_t *lpid, uint64_t
*pid)
@@ -277,6 +279,16 @@ static int ppc_radix64_next_level(AddressSpace *as, vaddr
eaddr,
if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
++*level;
*nls = pde & R_PDE_NLS;
+
+#ifdef DEBUG_MMU
+ if ((pde & R_PDE_NLB) & MAKE_64BIT_MASK(0, *nls + 3)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: misaligned page dir/table base: 0x%"VADDR_PRIx
+ " page dir size: 0x%"PRIx64" level: %d\n",
+ __func__, (pde & R_PDE_NLB), BIT(*nls + 3), *level);
+ }
+#endif