[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 22/40] xenner: kernel: mmu support for 64-bit
From: |
Alexander Graf |
Subject: |
[Qemu-devel] [PATCH 22/40] xenner: kernel: mmu support for 64-bit |
Date: |
Mon, 1 Nov 2010 16:01:35 +0100 |
This patch adds support for memory management on 64 bit systems.
Signed-off-by: Alexander Graf <address@hidden>
---
pc-bios/xenner/xenner-mm64.c | 369 ++++++++++++++++++++++++++++++++++++++++++
1 files changed, 369 insertions(+), 0 deletions(-)
create mode 100644 pc-bios/xenner/xenner-mm64.c
diff --git a/pc-bios/xenner/xenner-mm64.c b/pc-bios/xenner/xenner-mm64.c
new file mode 100644
index 0000000..89cb076
--- /dev/null
+++ b/pc-bios/xenner/xenner-mm64.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright (C) Red Hat 2007
+ * Copyright (C) Novell Inc. 2010
+ *
+ * Author(s): Gerd Hoffmann <address@hidden>
+ * Alexander Graf <address@hidden>
+ *
+ * Xenner memory management for 64 bit mode
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; under version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <inttypes.h>
+#include <xen/xen.h>
+
+#include "xenner.h"
+#include "xenner-mm.c"
+
+/* --------------------------------------------------------------------- */
+
+uintptr_t emu_pa(uintptr_t va)
+{
+ switch(va & 0xfffffff000000000) {
+ case XEN_RAM_64:
+ return va - (uintptr_t)_vstart;
+ case XEN_M2P_64:
+ return va - XEN_M2P_64 + frame_to_addr(vmconf.mfn_m2p);
+ }
+
+ panic("unknown address", NULL);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static char *print_pgflags(uint32_t flags)
+{
+ static char buf[80];
+
+ snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s\n",
+ flags & _PAGE_GLOBAL ? " global" : "",
+ flags & _PAGE_PSE ? " pse" : "",
+ flags & _PAGE_DIRTY ? " dirty" : "",
+ flags & _PAGE_ACCESSED ? " accessed" : "",
+ flags & _PAGE_PCD ? " pcd" : "",
+ flags & _PAGE_PWT ? " pwt" : "",
+ flags & _PAGE_USER ? " user" : "",
+ flags & _PAGE_RW ? " write" : "",
+ flags & _PAGE_PRESENT ? " present" : "");
+ return buf;
+}
+
+void pgtable_walk(int level, uint64_t va, uint64_t root_mfn)
+{
+ void *physmem = (void*)XEN_RAM_64;
+ uint64_t *pgd, *pud, *pmd, *pte;
+ uint64_t mfn;
+ uint32_t slot, flags;
+
+ if (vmconf.debug_level < level)
+ return;
+
+ printk(level, "page table walk for va %" PRIx64 ", root_mfn %" PRIx64 "\n",
+ va, root_mfn);
+
+ pgd = physmem + frame_to_addr(root_mfn);
+ slot = PGD_INDEX_64(va);
+ mfn = get_pgframe_64(pgd[slot]);
+ flags = get_pgflags_64(pgd[slot]);
+ printk(level, "pgd : %p +%3d | mfn %4" PRIx64 " | %s",
+ pgd, slot, mfn, print_pgflags(flags));
+ if (!(flags & _PAGE_PRESENT))
+ return;
+
+ pud = physmem + frame_to_addr(mfn);
+ slot = PUD_INDEX_64(va);
+ mfn = get_pgframe_64(pud[slot]);
+ flags = get_pgflags_64(pud[slot]);
+ printk(level, " pud : %p +%3d | mfn %4" PRIx64 " | %s",
+ pud, slot, mfn, print_pgflags(flags));
+ if (!(flags & _PAGE_PRESENT))
+ return;
+
+ pmd = physmem + frame_to_addr(mfn);
+ slot = PMD_INDEX_64(va);
+ mfn = get_pgframe_64(pmd[slot]);
+ flags = get_pgflags_64(pmd[slot]);
+ printk(level, " pmd : %p +%3d | mfn %4" PRIx64 " | %s",
+ pmd, slot, mfn, print_pgflags(flags));
+ if (!(flags & _PAGE_PRESENT))
+ return;
+ if (flags & _PAGE_PSE)
+ return;
+
+ pte = physmem + frame_to_addr(mfn);
+ slot = PTE_INDEX_64(va);
+ mfn = get_pgframe_64(pte[slot]);
+ flags = get_pgflags_64(pte[slot]);
+ printk(level, " pte: %p +%3d | mfn %4" PRIx64 " | %s",
+ pte, slot, mfn, print_pgflags(flags));
+}
+
+static int is_pse(uint64_t va)
+{
+ switch (va & 0xffffff8000000000ULL) {
+ case XEN_RAM_64:
+ case XEN_M2P_64:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int pgtable_fixup_flag(struct xen_cpu *cpu, uint64_t va, uint32_t flag)
+{
+ void *physmem = (void*)XEN_RAM_64;
+ uint64_t *pgd, *pud, *pmd, *pte;
+ uint32_t slot;
+ int fixes = 0;
+
+ /* quick test on the leaf page via linear page table when we're sure
+ we're not touching a 2mb page which doesn't have a pte */
+ pte = find_pte_64(va);
+ if (!is_pse(va) && !test_pgflag_64(*pte, flag)) {
+ *pte |= flag;
+ fixes++;
+ goto done;
+ }
+
+ /* do full page table walk */
+ pgd = physmem + frame_to_addr(read_cr3_mfn(cpu));
+ slot = PGD_INDEX_64(va);
+ if (!test_pgflag_64(pgd[slot], flag)) {
+ pgd[slot] |= flag;
+ fixes++;
+ }
+
+ pud = physmem + frame_to_addr(get_pgframe_64(pgd[slot]));
+ slot = PUD_INDEX_64(va);
+ if (!test_pgflag_64(pud[slot], flag)) {
+ pud[slot] |= flag;
+ fixes++;
+ }
+
+ pmd = physmem + frame_to_addr(get_pgframe_64(pud[slot]));
+ slot = PMD_INDEX_64(va);
+ if (!test_pgflag_64(pmd[slot], flag)) {
+ pmd[slot] |= flag;
+ fixes++;
+ }
+
+done:
+ if (fixes)
+ flush_tlb_addr(va);
+ return fixes;
+}
+
+int pgtable_is_present(uint64_t va, uint64_t root_mfn)
+{
+ void *physmem = (void*)XEN_RAM_64;
+ uint64_t *pgd, *pud, *pmd, *pte;
+ uint32_t slot;
+
+ pgd = physmem + frame_to_addr(root_mfn);
+ slot = PGD_INDEX_64(va);
+ if (!test_pgflag_64(pgd[slot], _PAGE_PRESENT)) {
+ return 0;
+ }
+
+ pud = physmem + frame_to_addr(get_pgframe_64(pgd[slot]));
+ slot = PUD_INDEX_64(va);
+ if (!test_pgflag_64(pud[slot], _PAGE_PRESENT)) {
+ return 0;
+ }
+
+ pmd = physmem + frame_to_addr(get_pgframe_64(pud[slot]));
+ slot = PMD_INDEX_64(va);
+ if (!test_pgflag_64(pmd[slot], _PAGE_PRESENT)) {
+ return 0;
+ }
+ if (!test_pgflag_64(pmd[slot], _PAGE_PSE)) {
+ return 1;
+ }
+
+ pte = physmem + frame_to_addr(get_pgframe_64(pmd[slot]));
+ slot = PTE_INDEX_64(va);
+ if (!test_pgflag_64(pmd[slot], _PAGE_PRESENT)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* --------------------------------------------------------------------- */
+
+void *map_page(uint64_t maddr)
+{
+ void *ram = (void*)XEN_RAM_64;
+ return ram + maddr;
+}
+
+uint64_t *find_pte_64(uint64_t va)
+{
+ uint64_t *lpt_base = (void*)XEN_LPT_64;
+ uint64_t offset = (va & 0xffffffffffff) >> PAGE_SHIFT;
+
+ return lpt_base + offset;
+}
+
+void update_emu_mappings(uint64_t cr3_mfn)
+{
+ uint64_t *new_pgd;
+ int idx;
+
+ new_pgd = map_page(frame_to_addr(cr3_mfn));
+
+ idx = PGD_INDEX_64(XEN_M2P_64);
+ for (; idx < PGD_INDEX_64(XEN_DOM_64); idx++) {
+ if ((test_pgflag_64(new_pgd[idx], _PAGE_PRESENT)) ||
+ (!test_pgflag_64(emu_pgd[idx], _PAGE_PRESENT)) ||
+ (idx == PGD_INDEX_64(XEN_LPT_64))) {
+ continue;
+ }
+
+ new_pgd[idx] = emu_pgd[idx];
+ }
+
+ /* linear pgtable mapping */
+ idx = PGD_INDEX_64(XEN_LPT_64);
+ new_pgd[idx] = get_pgentry_64(cr3_mfn, LPT_PGFLAGS);
+}
+
+static inline uint64_t *find_pgd(uint64_t va, uint64_t mfn, int alloc, int
sync)
+{
+ void *physmem = (void*)XEN_RAM_64;
+ uint64_t *pgd, *pud, idx;
+
+ pgd = physmem + frame_to_addr(mfn);
+ idx = PGD_INDEX_64(va);
+ pgd += idx;
+ if (!test_pgflag_64(*pgd, _PAGE_PRESENT) && alloc) {
+ pud = get_pages(1, "pud");
+ *pgd = get_pgentry_64(EMU_MFN(pud), PGT_PGFLAGS_64) & ~_PAGE_GLOBAL;
+ if (sync && !test_pgflag_64(emu_pgd[idx], _PAGE_PRESENT)) {
+ /* sync emu boot pgd */
+ emu_pgd[idx] = *pgd;
+ }
+ }
+ return pgd;
+}
+
+static inline uint64_t *find_pud(uint64_t va, uint64_t mfn, int alloc)
+{
+ void *physmem = (void*)XEN_RAM_64;
+ uint64_t *pud, *pmd;
+
+ pud = physmem + frame_to_addr(mfn);
+ pud += PUD_INDEX_64(va);
+ if (!test_pgflag_64(*pud, _PAGE_PRESENT) && alloc) {
+ pmd = get_pages(1, "pmd");
+ *pud = get_pgentry_64(EMU_MFN(pmd), PGT_PGFLAGS_64) & ~_PAGE_GLOBAL;
+ }
+ return pud;
+}
+
+static inline uint64_t *find_pmd(uint64_t va, uint64_t mfn, int alloc)
+{
+ void *physmem = (void*)XEN_RAM_64;
+ uint64_t *pmd, *pte;
+
+ pmd = physmem + frame_to_addr(mfn);
+ pmd += PMD_INDEX_64(va);
+ if (!test_pgflag_64(*pmd, _PAGE_PRESENT) && alloc) {
+ pte = get_pages(1, "pte");
+ *pmd = get_pgentry_64(EMU_MFN(pte), PGT_PGFLAGS_64);
+ }
+ return pmd;
+}
+
+static inline uint64_t *find_pte(uint64_t va, uint64_t mfn)
+{
+ void *physmem = (void*)XEN_RAM_64;
+ uint64_t *pte;
+
+ pte = physmem + frame_to_addr(mfn);
+ pte += PTE_INDEX_64(va);
+ return pte;
+}
+
+static int map_region_pse(struct xen_cpu *cpu, uint64_t va_start,
+ uint32_t flags, uint64_t start, uint64_t count)
+{
+ uint64_t *pgd;
+ uint64_t *pud;
+ uint64_t *pmd;
+ uint64_t va;
+ uint64_t mfn;
+
+ flags |= _PAGE_PSE;
+ for (mfn = start; mfn < (start + count); mfn += PMD_COUNT_64) {
+ va = va_start + frame_to_addr(mfn-start);
+
+ pgd = find_pgd(va, read_cr3_mfn(cpu), 1, 1);
+ pud = find_pud(va, get_pgframe_64(*pgd), 1);
+ pmd = find_pmd(va, get_pgframe_64(*pud), 0);
+ *pmd = get_pgentry_64(mfn, flags);
+ }
+ return 0;
+}
+
+static void map_one_page(struct xen_cpu *cpu, uint64_t va, uint64_t maddr,
+ int flags, int sync)
+{
+ uint64_t mfn = addr_to_frame(maddr);
+ uint64_t *pgd;
+ uint64_t *pud;
+ uint64_t *pmd;
+ uint64_t *pte;
+
+ pgd = find_pgd(va, read_cr3_mfn(cpu), 1, sync);
+ pud = find_pud(va, get_pgframe_64(*pgd), 1);
+ pmd = find_pmd(va, get_pgframe_64(*pud), 1);
+ if (*pmd & _PAGE_PSE) {
+ *pmd = 0;
+ pmd = find_pmd(va, get_pgframe_64(*pud), 1);
+ }
+ pte = find_pte(va, get_pgframe_64(*pmd));
+ *pte = get_pgentry_64(mfn, flags);
+}
+
+void map_region(struct xen_cpu *cpu, uint64_t va, uint32_t flags,
+ uint64_t start, uint64_t count)
+{
+ uint64_t maddr = frame_to_addr(start);
+ uint64_t maddr_end = maddr + frame_to_addr(count);
+
+ for (; maddr < maddr_end; maddr += PAGE_SIZE, va += PAGE_SIZE) {
+ map_one_page(cpu, va, maddr, flags, 0);
+ }
+}
+
+void *fixmap_page(struct xen_cpu *cpu, uint64_t maddr)
+{
+ static int fixmap_slot = 0;
+ uint32_t off = addr_offset(maddr);
+ uint64_t va;
+
+ va = XEN_MAP_64 + PAGE_SIZE * fixmap_slot++;
+ map_one_page(cpu, va, maddr, EMU_PGFLAGS, 1);
+
+ return (void*)va + off;
+}
+
+void paging_init(struct xen_cpu *cpu)
+{
+ map_region_pse(cpu, XEN_RAM_64, EMU_PGFLAGS, 0,
vmconf.pg_total);
+ map_region_pse(cpu, XEN_M2P_64, M2P_PGFLAGS_64, vmconf.mfn_m2p,
vmconf.pg_m2p);
+ m2p = (void*)XEN_M2P_64;
+}
--
1.6.0.2
- Re: [Qemu-devel] Re: [PATCH 28/40] xenner: libxc emu: evtchn, (continued)
[Qemu-devel] [PATCH 29/40] xenner: libxc emu: grant tables, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 17/40] xenner: kernel: Main (x86_64), Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 01/40] elf: Move translate_fn to helper struct, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 13/40] xenner: kernel: Headers, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 12/40] xenner: kernel: Hypercall handler (generic), Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 20/40] xenner: kernel: mmu support for 32-bit PAE, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 22/40] xenner: kernel: mmu support for 64-bit,
Alexander Graf <=
[Qemu-devel] [PATCH 15/40] xenner: kernel: lapic code, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 27/40] xenner: add xc_dom.h, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 34/40] xenner: PV machine, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 32/40] xenner: emudev, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 18/40] xenner: kernel: Main, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 21/40] xenner: kernel: mmu support for 32-bit normal, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 23/40] xenner: kernel: generic MM functionality, Alexander Graf, 2010/11/01
Re: [Qemu-devel] [PATCH 00/40] RFC: Xenner, Alexander Graf, 2010/11/01
[Qemu-devel] [PATCH 36/40] xen: only create dummy env when necessary, Alexander Graf, 2010/11/02
[Qemu-devel] [PATCH 38/40] xenner: integrate into build system, Alexander Graf, 2010/11/02