[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 5/7] Convert 'info mem' to use generic iterator
From: |
Don Porter |
Subject: |
[PATCH v4 5/7] Convert 'info mem' to use generic iterator |
Date: |
Mon, 22 Jul 2024 21:05:43 -0400 |
In the case of nested paging, change the output slightly
to show both the guest's and host's view.
For example:
(qemu) info mem
Info guest mem (guest virtual to guest physical mappings):
0000008000800000-000000800085c000 000000000005c000 ur-
0000008000a00000-0000008000a10000 0000000000010000 ur-
0000008003fa8000-0000008003fb8000 0000000000010000 -rw
0000008003fc0000-0000008003fd0000 0000000000010000 -rw
0000008003fd8000-0000008003fe8000 0000000000010000 -rw
0000008003ff0000-0000008005000000 0000000001010000 -rw
Info host mem (guest physical to host physical mappings):
0000000000001000-000000000000f000 000000000000e000 -xwr
00000000000b8000-00000000000b9000 0000000000001000 -xwr
0000000000100000-0000000000108000 0000000000008000 -xwr
0000000000200000-00000000007c6000 00000000005c6000 -xwr
Signed-off-by: Don Porter <porter@cs.unc.edu>
---
include/hw/core/sysemu-cpu-ops.h | 6 +
target/i386/cpu.c | 1 +
target/i386/cpu.h | 1 +
target/i386/monitor.c | 387 +++++++------------------------
4 files changed, 95 insertions(+), 300 deletions(-)
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
index 083df4717c..f8b71fb60d 100644
--- a/include/hw/core/sysemu-cpu-ops.h
+++ b/include/hw/core/sysemu-cpu-ops.h
@@ -232,6 +232,12 @@ typedef struct SysemuCPUOps {
void (*mon_print_pte) (CPUState *cs, GString *buf, hwaddr addr,
hwaddr pte, uint64_t prot, int mmu_idx);
+ /**
+ * @mon_print_mem: Hook called by the monitor to print a range
+ * of memory mappings in 'info mem'
+ */
+ bool (*mon_print_mem)(CPUState *cs, struct mem_print_state *state);
+
} SysemuCPUOps;
int compressing_iterator(CPUState *cs, void *data, DecodedPTE *pte,
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 030198497a..f9ca2cddd3 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -8389,6 +8389,7 @@ static const struct SysemuCPUOps i386_sysemu_ops = {
.mon_info_pg_print_header = &x86_mon_info_pg_print_header,
.mon_flush_page_print_state = &x86_mon_flush_print_pg_state,
.mon_print_pte = &x86_mon_print_pte,
+ .mon_print_mem = &x86_mon_print_mem,
};
#endif
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 413c743c1a..da565bb7da 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -2266,6 +2266,7 @@ void x86_mon_info_pg_print_header(struct mem_print_state
*state);
bool x86_mon_flush_print_pg_state(CPUState *cs, struct mem_print_state *state);
void x86_mon_print_pte(CPUState *cs, GString *out_buf, hwaddr addr,
hwaddr child, uint64_t prot, int mmu_idx);
+bool x86_mon_print_mem(CPUState *cs, struct mem_print_state *state);
bool x86_ptw_translate(CPUState *cs, vaddr vaddress, hwaddr *hpa,
bool debug, int mmu_idx, bool user_access,
const MMUAccessType access_type, uint64_t *page_size,
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index d88347684b..318f9b7ca2 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -319,331 +319,118 @@ void hmp_info_tlb(Monitor *mon, const QDict *qdict)
}
}
-static void mem_print(Monitor *mon, CPUArchState *env,
- hwaddr *pstart, int *plast_prot,
- hwaddr end, int prot)
+bool x86_mon_print_mem(CPUState *cs, struct mem_print_state *state)
{
- int prot1;
- prot1 = *plast_prot;
- if (prot != prot1) {
- if (*pstart != -1) {
- monitor_printf(mon, HWADDR_FMT_plx "-" HWADDR_FMT_plx " "
- HWADDR_FMT_plx " %c%c%c\n",
- addr_canonical(env, *pstart),
- addr_canonical(env, end),
- addr_canonical(env, end - *pstart),
- prot1 & PG_USER_MASK ? 'u' : '-',
- 'r',
- prot1 & PG_RW_MASK ? 'w' : '-');
- }
- if (prot != 0)
- *pstart = end;
- else
- *pstart = -1;
- *plast_prot = prot;
- }
-}
+ CPUArchState *env = state->env;
+ int i = 0;
-static void mem_info_32(Monitor *mon, CPUArchState *env)
-{
- unsigned int l1, l2;
- int prot, last_prot;
- uint32_t pgd, pde, pte;
- hwaddr start, end;
-
- pgd = env->cr[3] & ~0xfff;
- last_prot = 0;
- start = -1;
- for(l1 = 0; l1 < 1024; l1++) {
- cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
- pde = le32_to_cpu(pde);
- end = l1 << 22;
- if (pde & PG_PRESENT_MASK) {
- if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
- prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
- mem_print(mon, env, &start, &last_prot, end, prot);
- } else {
- for(l2 = 0; l2 < 1024; l2++) {
- cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
- pte = le32_to_cpu(pte);
- end = (l1 << 22) + (l2 << 12);
- if (pte & PG_PRESENT_MASK) {
- prot = pte & pde &
- (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
- } else {
- prot = 0;
- }
- mem_print(mon, env, &start, &last_prot, end, prot);
- }
- }
- } else {
- prot = 0;
- mem_print(mon, env, &start, &last_prot, end, prot);
+ /* We need to figure out the lowest populated level */
+ for ( ; i < state->max_height; i++) {
+ if (state->vstart[i] != -1) {
+ break;
}
}
- /* Flush last range */
- mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
-}
-static void mem_info_pae32(Monitor *mon, CPUArchState *env)
-{
- unsigned int l1, l2, l3;
- int prot, last_prot;
- uint64_t pdpe, pde, pte;
- uint64_t pdp_addr, pd_addr, pt_addr;
- hwaddr start, end;
-
- pdp_addr = env->cr[3] & ~0x1f;
- last_prot = 0;
- start = -1;
- for (l1 = 0; l1 < 4; l1++) {
- cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
- end = l1 << 30;
- if (pdpe & PG_PRESENT_MASK) {
- pd_addr = pdpe & 0x3fffffffff000ULL;
- for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
- end = (l1 << 30) + (l2 << 21);
- if (pde & PG_PRESENT_MASK) {
- if (pde & PG_PSE_MASK) {
- prot = pde & (PG_USER_MASK | PG_RW_MASK |
- PG_PRESENT_MASK);
- mem_print(mon, env, &start, &last_prot, end, prot);
- } else {
- pt_addr = pde & 0x3fffffffff000ULL;
- for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pt_addr + l3 * 8, &pte,
8);
- pte = le64_to_cpu(pte);
- end = (l1 << 30) + (l2 << 21) + (l3 << 12);
- if (pte & PG_PRESENT_MASK) {
- prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
- PG_PRESENT_MASK);
- } else {
- prot = 0;
- }
- mem_print(mon, env, &start, &last_prot, end, prot);
- }
- }
- } else {
- prot = 0;
- mem_print(mon, env, &start, &last_prot, end, prot);
- }
- }
- } else {
- prot = 0;
- mem_print(mon, env, &start, &last_prot, end, prot);
- }
+ hwaddr vstart = state->vstart[i];
+ hwaddr end = state->vend[i] + state->pg_size[i];
+ int prot = state->prot[i];
+
+ if (state->mmu_idx == 0
+ || (state->mmu_idx == 1 && env->vm_state_valid
+ && env->nested_pg_format == 1)){
+
+ g_string_append_printf(state->buf, HWADDR_FMT_plx "-" HWADDR_FMT_plx "
"
+ HWADDR_FMT_plx " %c%c%c\n",
+ addr_canonical(env, vstart),
+ addr_canonical(env, end),
+ addr_canonical(env, end - vstart),
+ prot & PG_USER_MASK ? 'u' : '-',
+ 'r',
+ prot & PG_RW_MASK ? 'w' : '-');
+ return true;
+ } else if (state->mmu_idx == 1) {
+ g_string_append_printf(state->buf, HWADDR_FMT_plx "-" HWADDR_FMT_plx "
"
+ HWADDR_FMT_plx " %c%c%c%c\n",
+ addr_canonical(env, vstart),
+ addr_canonical(env, end),
+ addr_canonical(env, end - vstart),
+ prot & PG_EPT_X_USER_MASK ? 'u' : '-',
+ prot & PG_EPT_X_SUPER_MASK ? 'x' : '-',
+ prot & PG_EPT_W_MASK ? 'w' : '-',
+ prot & PG_EPT_R_MASK ? 'r' : '-');
+
+ return true;
+ } else {
+ return false;
}
- /* Flush last range */
- mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
-}
-#ifdef TARGET_X86_64
-static void mem_info_la48(Monitor *mon, CPUArchState *env)
-{
- int prot, last_prot;
- uint64_t l1, l2, l3, l4;
- uint64_t pml4e, pdpe, pde, pte;
- uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
-
- pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
- last_prot = 0;
- start = -1;
- for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
- end = l1 << 39;
- if (pml4e & PG_PRESENT_MASK) {
- pdp_addr = pml4e & 0x3fffffffff000ULL;
- for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
- end = (l1 << 39) + (l2 << 30);
- if (pdpe & PG_PRESENT_MASK) {
- if (pdpe & PG_PSE_MASK) {
- prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
- PG_PRESENT_MASK);
- prot &= pml4e;
- mem_print(mon, env, &start, &last_prot, end, prot);
- } else {
- pd_addr = pdpe & 0x3fffffffff000ULL;
- for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde,
8);
- pde = le64_to_cpu(pde);
- end = (l1 << 39) + (l2 << 30) + (l3 << 21);
- if (pde & PG_PRESENT_MASK) {
- if (pde & PG_PSE_MASK) {
- prot = pde & (PG_USER_MASK | PG_RW_MASK |
- PG_PRESENT_MASK);
- prot &= pml4e & pdpe;
- mem_print(mon, env, &start,
- &last_prot, end, prot);
- } else {
- pt_addr = pde & 0x3fffffffff000ULL;
- for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr
- + l4 * 8,
- &pte, 8);
- pte = le64_to_cpu(pte);
- end = (l1 << 39) + (l2 << 30) +
- (l3 << 21) + (l4 << 12);
- if (pte & PG_PRESENT_MASK) {
- prot = pte & (PG_USER_MASK |
PG_RW_MASK |
- PG_PRESENT_MASK);
- prot &= pml4e & pdpe & pde;
- } else {
- prot = 0;
- }
- mem_print(mon, env, &start,
- &last_prot, end, prot);
- }
- }
- } else {
- prot = 0;
- mem_print(mon, env, &start,
- &last_prot, end, prot);
- }
- }
- }
- } else {
- prot = 0;
- mem_print(mon, env, &start, &last_prot, end, prot);
- }
- }
- } else {
- prot = 0;
- mem_print(mon, env, &start, &last_prot, end, prot);
- }
- }
- /* Flush last range */
- mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
}
-static void mem_info_la57(Monitor *mon, CPUArchState *env)
+static
+void helper_hmp_info_mem(CPUState *cs, Monitor *mon, int mmu_idx)
{
- int prot, last_prot;
- uint64_t l0, l1, l2, l3, l4;
- uint64_t pml5e, pml4e, pdpe, pde, pte;
- uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
-
- pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
- last_prot = 0;
- start = -1;
- for (l0 = 0; l0 < 512; l0++) {
- cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
- pml5e = le64_to_cpu(pml5e);
- end = l0 << 48;
- if (!(pml5e & PG_PRESENT_MASK)) {
- prot = 0;
- mem_print(mon, env, &start, &last_prot, end, prot);
- continue;
- }
+ struct mem_print_state state;
+ g_autoptr(GString) buf = g_string_new("");
- pml4_addr = pml5e & 0x3fffffffff000ULL;
- for (l1 = 0; l1 < 512; l1++) {
- cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
- pml4e = le64_to_cpu(pml4e);
- end = (l0 << 48) + (l1 << 39);
- if (!(pml4e & PG_PRESENT_MASK)) {
- prot = 0;
- mem_print(mon, env, &start, &last_prot, end, prot);
- continue;
- }
+ CPUClass *cc = CPU_GET_CLASS(cs);
- pdp_addr = pml4e & 0x3fffffffff000ULL;
- for (l2 = 0; l2 < 512; l2++) {
- cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
- pdpe = le64_to_cpu(pdpe);
- end = (l0 << 48) + (l1 << 39) + (l2 << 30);
- if (pdpe & PG_PRESENT_MASK) {
- prot = 0;
- mem_print(mon, env, &start, &last_prot, end, prot);
- continue;
- }
-
- if (pdpe & PG_PSE_MASK) {
- prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
- PG_PRESENT_MASK);
- prot &= pml5e & pml4e;
- mem_print(mon, env, &start, &last_prot, end, prot);
- continue;
- }
-
- pd_addr = pdpe & 0x3fffffffff000ULL;
- for (l3 = 0; l3 < 512; l3++) {
- cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
- pde = le64_to_cpu(pde);
- end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
- if (pde & PG_PRESENT_MASK) {
- prot = 0;
- mem_print(mon, env, &start, &last_prot, end, prot);
- continue;
- }
-
- if (pde & PG_PSE_MASK) {
- prot = pde & (PG_USER_MASK | PG_RW_MASK |
- PG_PRESENT_MASK);
- prot &= pml5e & pml4e & pdpe;
- mem_print(mon, env, &start, &last_prot, end, prot);
- continue;
- }
-
- pt_addr = pde & 0x3fffffffff000ULL;
- for (l4 = 0; l4 < 512; l4++) {
- cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
- pte = le64_to_cpu(pte);
- end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
- (l3 << 21) + (l4 << 12);
- if (pte & PG_PRESENT_MASK) {
- prot = pte & (PG_USER_MASK | PG_RW_MASK |
- PG_PRESENT_MASK);
- prot &= pml5e & pml4e & pdpe & pde;
- } else {
- prot = 0;
- }
- mem_print(mon, env, &start, &last_prot, end, prot);
- }
- }
- }
- }
+ if (!cc->sysemu_ops->mon_init_page_table_iterator(cs, buf, mmu_idx,
+ &state)) {
+ monitor_printf(mon, "Unable to initialize page table iterator\n");
+ return;
}
- /* Flush last range */
- mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 57, 0);
+
+ state.flusher = cc->sysemu_ops->mon_print_mem;
+
+ /**
+ * We must visit interior entries to update prot
+ */
+ for_each_pte(cs, &compressing_iterator, &state, true, false, false,
+ mmu_idx);
+
+ /* Flush the last entry, if needed */
+ cc->sysemu_ops->mon_print_mem(cs, &state);
+
+ monitor_printf(mon, "%s", buf->str);
}
-#endif /* TARGET_X86_64 */
void hmp_info_mem(Monitor *mon, const QDict *qdict)
{
- CPUArchState *env;
+ CPUState *cs = mon_get_cpu(mon);
+ bool nested;
- env = mon_get_cpu_env(mon);
- if (!env) {
- monitor_printf(mon, "No CPU available\n");
+ if (!cs) {
+ monitor_printf(mon, "Unable to get CPUState. Internal error\n");
return;
}
- if (!(env->cr[0] & CR0_PG_MASK)) {
+ if (!cpu_paging_enabled(cs, 0)) {
monitor_printf(mon, "PG disabled\n");
return;
}
- if (env->cr[4] & CR4_PAE_MASK) {
-#ifdef TARGET_X86_64
- if (env->hflags & HF_LMA_MASK) {
- if (env->cr[4] & CR4_LA57_MASK) {
- mem_info_la57(mon, env);
- } else {
- mem_info_la48(mon, env);
- }
- } else
-#endif
- {
- mem_info_pae32(mon, env);
- }
- } else {
- mem_info_32(mon, env);
+
+ CPUClass *cc = CPU_GET_CLASS(cs);
+
+ if (!cc->sysemu_ops->mon_print_mem
+ || !cc->sysemu_ops->mon_init_page_table_iterator) {
+ monitor_printf(mon, "Info tlb unsupported on this ISA\n");
+ }
+
+ nested = cpu_paging_enabled(cs, 1);
+
+ if (nested) {
+ monitor_printf(mon,
+ "Info guest mem (guest virtual to guest physical
mappings):\n");
+ }
+
+ helper_hmp_info_mem(cs, mon, 0);
+
+ if (nested) {
+ monitor_printf(mon,
+ "Info host mem (guest physical to host physical
mappings):\n");
+
+ helper_hmp_info_mem(cs, mon, 1);
}
}
--
2.34.1
- [PATCH v4 0/7] Rework x86 page table walks, Don Porter, 2024/07/22
- [PATCH v4 1/7] Code motion: expose some TCG definitions for page table walk consolidation., Don Porter, 2024/07/22
- [PATCH v4 2/7] Import vmcs12 definition from Linux/KVM, Don Porter, 2024/07/22
- [PATCH v4 4/7] Convert 'info tlb' to use generic iterator., Don Porter, 2024/07/22
- [PATCH v4 5/7] Convert 'info mem' to use generic iterator,
Don Porter <=
- [PATCH v4 3/7] Add an "info pg" command that prints the current page tables, Don Porter, 2024/07/22
- [PATCH v4 6/7] Convert x86_cpu_get_memory_mapping() to use generic iterators, Don Porter, 2024/07/22
- [PATCH v4 7/7] Convert x86_mmu_translate() to use common code., Don Porter, 2024/07/22
- Re: [PATCH v4 0/7] Rework x86 page table walks, Richard Henderson, 2024/07/23