qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] RFC: target-ppc savevm fixup, cleanup and conversion to VMS


From: David Gibson
Subject: [Qemu-devel] RFC: target-ppc savevm fixup, cleanup and conversion to VMState
Date: Fri, 21 Sep 2012 14:03:03 +1000

Hi Alex, Juan,

I know Juan has an outstanding patch that converts target-ppc to use
the new VMStateDescription approach for savevm at

http://lists.gnu.org/archive/html/qemu-devel/2012-05/msg00532.html

Before I noticed that patch was out there, I had done my own
conversion (patch below) using a different approach.  Jaun's patch
preserves the existing save format, simply converting it to the new
description format.  However, the existing save format is rather
broken - failing to save a bunch of newer state that is needed on some
CPUs, and redundantly saving some information that can be more
elegantly reconstructed from other saved data.

My patch, instead, leaves the old loadvm function intact for the old
format, and introduces a new save format (version 5) having reexamined
exactly what needs saving when.  Note that I don't consider this v5
format finalized yet - I'm hoping Alex can have audit the patch below
and see if there are any things I've forgotten - I think I've got most
stuff, although I know I still need to handle the BATs (not sure how
to properly describe a multidimensional array in the
VMStateDescription, preferably with one dimension variable).

It's possible, of course, to set up so that both the old and new
formats are described by the VMStateDescription.  Because the new
format is reworked quite a bit though, that will lead to a *lot* of
ugly version conditionals there.

So, comments please, on where to go next with this.

Note that the patch below does depend on some (straightforward)
extensions to the savevm infrastructure, so it may not compile as is.

>From 43b4c47a6f04a24f2938120cdbee19c142711628 Mon Sep 17 00:00:00 2001
From: David Gibson <address@hidden>
Date: Mon, 10 Sep 2012 14:43:51 +1000
Subject: [PATCH] target-ppc: Convert ppc cpu savevm to VMStateDescription

The savevm code for the powerpc cpu emulation is currently based around
the old register_savevm() rather than register_vmstate() method.  It's also
rather broken, missing some important state on some CPU models.

This patch completely rewrites the savevm for target-ppc, using the new
VMStateDescription approach.  Exactly what needs to be saved in what
configurations has been more carefully examined, too.  This introduces a
new version (5) of the cpu save format.  The old load function is retained
to support version 4 images.

Signed-off-by: David Gibson <address@hidden>

Conflicts:
        target-ppc/machine.c
---
 target-ppc/cpu.h     |    7 +-
 target-ppc/machine.c |  517 +++++++++++++++++++++++++++++++++++++++++---------
 2 files changed, 435 insertions(+), 89 deletions(-)

diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index b4c6c71..8f8ef98 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -980,7 +980,7 @@ struct CPUPPCState {
     target_ulong asr;
     /* PowerPC 64 SLB area */
     ppc_slb_t slb[64];
-    int slb_nr;
+    int32_t slb_nr;
 #endif
     /* segment registers */
     target_phys_addr_t htab_base;
@@ -993,7 +993,7 @@ struct CPUPPCState {
     target_ulong DBAT[2][8];
     target_ulong IBAT[2][8];
     /* PowerPC TLB registers (for 4xx, e500 and 60x software driven TLBs) */
-    int nb_tlb;      /* Total number of TLB                                  */
+    int32_t nb_tlb;      /* Total number of TLB                              */
     int tlb_per_way; /* Speed-up helper: used to avoid divisions at run time */
     int nb_ways;     /* Number of ways in the TLB set                        */
     int last_way;    /* Last used way used to allocate TLB in a LRU way      */
@@ -1010,6 +1010,7 @@ struct CPUPPCState {
     /* Other registers */
     /* Special purpose registers */
     target_ulong spr[1024];
+    uint32_t cr; /* Full CR value used during vmsave/load */
     ppc_spr_t spr_cb[1024];
     /* Altivec registers */
     ppc_avr_t avr[32];
@@ -1238,7 +1239,7 @@ static inline CPUPPCState *cpu_init(const char *cpu_model)
 #define cpu_signal_handler cpu_ppc_signal_handler
 #define cpu_list ppc_cpu_list
 
-#define CPU_SAVE_VERSION 4
+#define CPU_SAVE_VERSION 5
 
 /* MMU modes definitions */
 #define MMU_MODE0_SUFFIX _user
diff --git a/target-ppc/machine.c b/target-ppc/machine.c
index 5e7bc00..c379f56 100644
--- a/target-ppc/machine.c
+++ b/target-ppc/machine.c
@@ -1,93 +1,9 @@
 #include "hw/hw.h"
 #include "hw/boards.h"
 #include "kvm.h"
+#include "helper_regs.h"
 
-void cpu_save(QEMUFile *f, void *opaque)
-{
-    CPUPPCState *env = (CPUPPCState *)opaque;
-    unsigned int i, j;
-    uint32_t fpscr;
-
-    for (i = 0; i < 32; i++)
-        qemu_put_betls(f, &env->gpr[i]);
-#if !defined(TARGET_PPC64)
-    for (i = 0; i < 32; i++)
-        qemu_put_betls(f, &env->gprh[i]);
-#endif
-    qemu_put_betls(f, &env->lr);
-    qemu_put_betls(f, &env->ctr);
-    for (i = 0; i < 8; i++)
-        qemu_put_be32s(f, &env->crf[i]);
-    qemu_put_betls(f, &env->xer);
-    qemu_put_betls(f, &env->reserve_addr);
-    qemu_put_betls(f, &env->msr);
-    for (i = 0; i < 4; i++)
-        qemu_put_betls(f, &env->tgpr[i]);
-    for (i = 0; i < 32; i++) {
-        union {
-            float64 d;
-            uint64_t l;
-        } u;
-        u.d = env->fpr[i];
-        qemu_put_be64(f, u.l);
-    }
-    fpscr = env->fpscr;
-    qemu_put_be32s(f, &fpscr);
-    qemu_put_sbe32s(f, &env->access_type);
-#if defined(TARGET_PPC64)
-    qemu_put_betls(f, &env->asr);
-    qemu_put_sbe32s(f, &env->slb_nr);
-#endif
-    qemu_put_betls(f, &env->spr[SPR_SDR1]);
-    for (i = 0; i < 32; i++)
-        qemu_put_betls(f, &env->sr[i]);
-    for (i = 0; i < 2; i++)
-        for (j = 0; j < 8; j++)
-            qemu_put_betls(f, &env->DBAT[i][j]);
-    for (i = 0; i < 2; i++)
-        for (j = 0; j < 8; j++)
-            qemu_put_betls(f, &env->IBAT[i][j]);
-    qemu_put_sbe32s(f, &env->nb_tlb);
-    qemu_put_sbe32s(f, &env->tlb_per_way);
-    qemu_put_sbe32s(f, &env->nb_ways);
-    qemu_put_sbe32s(f, &env->last_way);
-    qemu_put_sbe32s(f, &env->id_tlbs);
-    qemu_put_sbe32s(f, &env->nb_pids);
-    if (env->tlb.tlb6) {
-        // XXX assumes 6xx
-        for (i = 0; i < env->nb_tlb; i++) {
-            qemu_put_betls(f, &env->tlb.tlb6[i].pte0);
-            qemu_put_betls(f, &env->tlb.tlb6[i].pte1);
-            qemu_put_betls(f, &env->tlb.tlb6[i].EPN);
-        }
-    }
-    for (i = 0; i < 4; i++)
-        qemu_put_betls(f, &env->pb[i]);
-    for (i = 0; i < 1024; i++)
-        qemu_put_betls(f, &env->spr[i]);
-    qemu_put_be32s(f, &env->vscr);
-    qemu_put_be64s(f, &env->spe_acc);
-    qemu_put_be32s(f, &env->spe_fscr);
-    qemu_put_betls(f, &env->msr_mask);
-    qemu_put_be32s(f, &env->flags);
-    qemu_put_sbe32s(f, &env->error_code);
-    qemu_put_be32s(f, &env->pending_interrupts);
-    qemu_put_be32s(f, &env->irq_input_state);
-    for (i = 0; i < POWERPC_EXCP_NB; i++)
-        qemu_put_betls(f, &env->excp_vectors[i]);
-    qemu_put_betls(f, &env->excp_prefix);
-    qemu_put_betls(f, &env->hreset_excp_prefix);
-    qemu_put_betls(f, &env->ivor_mask);
-    qemu_put_betls(f, &env->ivpr_mask);
-    qemu_put_betls(f, &env->hreset_vector);
-    qemu_put_betls(f, &env->nip);
-    qemu_put_betls(f, &env->hflags);
-    qemu_put_betls(f, &env->hflags_nmsr);
-    qemu_put_sbe32s(f, &env->mmu_idx);
-    qemu_put_sbe32(f, 0);
-}
-
-int cpu_load(QEMUFile *f, void *opaque, int version_id)
+static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
 {
     CPUPPCState *env = (CPUPPCState *)opaque;
     unsigned int i, j;
@@ -175,3 +91,432 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
 
     return 0;
 }
+
+static int get_avr(QEMUFile *f, void *pv, size_t size)
+{
+    ppc_avr_t *v = pv;
+
+    v->u64[0] = qemu_get_be64(f);
+    v->u64[1] = qemu_get_be64(f);
+
+    return 0;
+}
+
+static void put_avr(QEMUFile *f, void *pv, size_t size)
+{
+    ppc_avr_t *v = pv;
+
+    qemu_put_be64(f, v->u64[0]);
+    qemu_put_be64(f, v->u64[1]);
+}
+
+const VMStateInfo vmstate_info_avr = {
+    .name = "avr",
+    .get  = get_avr,
+    .put  = put_avr,
+};
+
+#define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v)                       \
+    VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_avr, ppc_avr_t)
+
+#define VMSTATE_AVR_ARRAY(_f, _s, _n)                             \
+    VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
+
+static void cpu_pre_save(void *opaque)
+{
+    CPUPPCState *env = opaque;
+    int i;
+
+    env->spr[SPR_LR] = env->lr;
+    env->spr[SPR_CTR] = env->ctr;
+    env->spr[SPR_XER] = env->xer;
+#if defined(TARGET_PPC64)
+    env->spr[SPR_CFAR] = env->cfar;
+    env->spr[SPR_ASR] = env->asr;
+#endif
+    env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
+
+    env->cr = 0;
+    for (i = 0; i < 8; i++) {
+        env->cr = (env->cr << 4) | (env->crf[i] & 0xf);
+    }
+}
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+    CPUPPCState *env = opaque;
+    int i;
+
+    env->lr = env->spr[SPR_LR];
+    env->ctr = env->spr[SPR_CTR];
+    env->xer = env->spr[SPR_XER];
+#if defined(TARGET_PPC64)
+    env->cfar = env->spr[SPR_CFAR];
+    env->asr = env->spr[SPR_ASR];
+#endif
+    env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
+
+    for (i = 0; i < 8; i++) {
+        env->crf[i] = env->cr >> (4*(7-i)) & 0xf;
+    }
+
+    /* Restore htab_base and htab_mask variables */
+    ppc_store_sdr1(env, env->spr[SPR_SDR1]);
+
+    hreg_compute_hflags(env);
+    hreg_compute_mem_idx(env);
+
+    return 0;
+}
+
+static bool fpu_needed(void *opaque)
+{
+    CPUPPCState *env = opaque;
+
+    return (env->insns_flags & PPC_FLOAT);
+}
+
+static const VMStateDescription vmstate_fpu = {
+    .name = "cpu/fpu",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_FLOAT64_ARRAY(fpr, CPUPPCState, 32),
+        VMSTATE_UINT64(fpscr, CPUPPCState),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+static bool altivec_needed(void *opaque)
+{
+    CPUPPCState *env = opaque;
+
+    return (env->insns_flags & PPC_ALTIVEC);
+}
+
+static const VMStateDescription vmstate_altivec = {
+    .name = "cpu/altivec",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_AVR_ARRAY(avr, CPUPPCState, 32),
+        VMSTATE_UINT32(vscr, CPUPPCState),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+static bool vsx_needed(void *opaque)
+{
+    CPUPPCState *env = opaque;
+
+    return (env->insns_flags2 & PPC2_VSX);
+}
+
+static const VMStateDescription vmstate_vsx = {
+    .name = "cpu/vsx",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_UINT64_ARRAY(vsr, CPUPPCState, 32),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+static bool sr_needed(void *opaque)
+{
+#ifdef TARGET_PPC64
+    CPUPPCState *env = opaque;
+
+    return !(env->mmu_model & POWERPC_MMU_64);
+#else
+    return true;
+#endif
+}
+
+static const VMStateDescription vmstate_sr = {
+    .name = "cpu/sr",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_UINTTL_ARRAY(sr, CPUPPCState, 32),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+#ifdef TARGET_PPC64
+static int get_slbe(QEMUFile *f, void *pv, size_t size)
+{
+    ppc_slb_t *v = pv;
+
+    v->esid = qemu_get_be64(f);
+    v->vsid = qemu_get_be64(f);
+
+    return 0;
+}
+
+static void put_slbe(QEMUFile *f, void *pv, size_t size)
+{
+    ppc_slb_t *v = pv;
+
+    qemu_put_be64(f, v->esid);
+    qemu_put_be64(f, v->vsid);
+}
+
+const VMStateInfo vmstate_info_slbe = {
+    .name = "slbe",
+    .get  = get_slbe,
+    .put  = put_slbe,
+};
+
+#define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v)                       \
+    VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
+
+#define VMSTATE_SLB_ARRAY(_f, _s, _n)                             \
+    VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
+
+static bool slb_needed(void *opaque)
+{
+    CPUPPCState *env = opaque;
+
+    /* We don't support any of the old segment table based 64-bit CPUs */
+    return (env->mmu_model & POWERPC_MMU_64);
+}
+
+static const VMStateDescription vmstate_slb = {
+    .name = "cpu/slb",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_INT32_EQUAL(slb_nr, CPUPPCState),
+        VMSTATE_SLB_ARRAY(slb, CPUPPCState, 64),
+        VMSTATE_END_OF_LIST()
+    }
+};
+#endif /* TARGET_PPC64 */
+
+static const VMStateDescription vmstate_tlb6xx_entry = {
+    .name = "cpu/tlb6xx_entry",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
+        VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
+        VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+static bool tlb6xx_needed(void *opaque)
+{
+    CPUPPCState *env = opaque;
+
+    return env->nb_tlb && (env->tlb_type == TLB_6XX);
+}
+
+static const VMStateDescription vmstate_tlb6xx = {
+    .name = "cpu/tlb6xx",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_INT32_EQUAL(nb_tlb, CPUPPCState),
+        VMSTATE_STRUCT_VARRAY_POINTER_INT32(tlb.tlb6, CPUPPCState, nb_tlb,
+                                            vmstate_tlb6xx_entry,
+                                            ppc6xx_tlb_t),
+        VMSTATE_UINTTL_ARRAY(tgpr, CPUPPCState, 4),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static const VMStateDescription vmstate_tlbemb_entry = {
+    .name = "cpu/tlbemb_entry",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_TPA(RPN, ppcemb_tlb_t),
+        VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
+        VMSTATE_UINTTL(PID, ppcemb_tlb_t),
+        VMSTATE_UINTTL(size, ppcemb_tlb_t),
+        VMSTATE_UINT32(prot, ppcemb_tlb_t),
+        VMSTATE_UINT32(attr, ppcemb_tlb_t),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+static bool tlbemb_needed(void *opaque)
+{
+    CPUPPCState *env = opaque;
+
+    return env->nb_tlb && (env->tlb_type == TLB_EMB);
+}
+
+static bool pbr403_needed(void *opaque)
+{
+    CPUPPCState *env = opaque;
+    uint32_t pvr = env->spr[SPR_PVR];
+
+    return (pvr & 0xffff0000) == 0x00200000;
+}
+
+static const VMStateDescription vmstate_pbr403 = {
+    .name = "cpu/pbr403",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_UINTTL_ARRAY(pb, CPUPPCState, 4),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+static const VMStateDescription vmstate_tlbemb = {
+    .name = "cpu/tlb6xx",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_INT32_EQUAL(nb_tlb, CPUPPCState),
+        VMSTATE_STRUCT_VARRAY_POINTER_INT32(tlb.tlbe, CPUPPCState, nb_tlb,
+                                            vmstate_tlbemb_entry,
+                                            ppcemb_tlb_t),
+        /* 403 protection registers */
+        VMSTATE_END_OF_LIST()
+    },
+    .subsections = (VMStateSubsection []) {
+        {
+            .vmsd = &vmstate_pbr403,
+            .needed = pbr403_needed,
+        } , {
+            /* empty */
+        }
+    }
+};
+
+static const VMStateDescription vmstate_tlbmas_entry = {
+    .name = "cpu/tlbmas_entry",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_UINT32(mas8, ppcmas_tlb_t),
+        VMSTATE_UINT32(mas1, ppcmas_tlb_t),
+        VMSTATE_UINT64(mas2, ppcmas_tlb_t),
+        VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
+        VMSTATE_END_OF_LIST()
+    },
+};
+
+static bool tlbmas_needed(void *opaque)
+{
+    CPUPPCState *env = opaque;
+
+    return env->nb_tlb && (env->tlb_type == TLB_MAS);
+}
+
+static const VMStateDescription vmstate_tlbmas = {
+    .name = "cpu/tlbmas",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_INT32_EQUAL(nb_tlb, CPUPPCState),
+        VMSTATE_STRUCT_VARRAY_POINTER_INT32(tlb.tlbm, CPUPPCState, nb_tlb,
+                                            vmstate_tlbmas_entry,
+                                            ppcmas_tlb_t),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static const VMStateDescription vmstate_cpu = {
+    .name = "cpu",
+    .version_id = CPU_SAVE_VERSION,
+    .minimum_version_id = 5,
+    .minimum_version_id_old = 4,
+    .load_state_old = cpu_load_old,
+    .pre_save = cpu_pre_save,
+    .post_load = cpu_post_load,
+    .fields      = (VMStateField []) {
+        /* Verify we haven't changed the pvr */
+        VMSTATE_UINTTL_EQUAL(spr[SPR_PVR], CPUPPCState),
+
+        /* User mode architected state */
+        VMSTATE_UINTTL_ARRAY(gpr, CPUPPCState, 32),
+#if !defined(TARGET_PPC64)
+        VMSTATE_UINTTL_ARRAY(gprh, CPUPPCState, 32),
+#endif
+        VMSTATE_UINT32(cr, CPUPPCState),
+        VMSTATE_UINTTL(nip, CPUPPCState),
+
+        /* SPRs */
+        VMSTATE_UINTTL_ARRAY(spr, CPUPPCState, 1024),
+        VMSTATE_UINT64(spe_acc, CPUPPCState),
+
+        /* Reservation */
+        VMSTATE_UINTTL(reserve_addr, CPUPPCState),
+
+        /* Supervisor mode architected state */
+        VMSTATE_UINTTL(msr, CPUPPCState),
+
+        /* Internal state */
+        VMSTATE_UINTTL(hflags_nmsr, CPUPPCState),
+        /* FIXME: access_type? */
+
+        /* Sanity checking */
+        VMSTATE_UINTTL_EQUAL(msr_mask, CPUPPCState),
+        VMSTATE_UINT64_EQUAL(insns_flags, CPUPPCState),
+        VMSTATE_UINT64_EQUAL(insns_flags2, CPUPPCState),
+        VMSTATE_END_OF_LIST()
+    },
+    .subsections = (VMStateSubsection []) {
+        {
+            .vmsd = &vmstate_fpu,
+            .needed = fpu_needed,
+        } , {
+            .vmsd = &vmstate_altivec,
+            .needed = altivec_needed,
+        } , {
+            .vmsd = &vmstate_vsx,
+            .needed = vsx_needed,
+        } , {
+            .vmsd = &vmstate_sr,
+            .needed = sr_needed,
+        } , {
+#ifdef TARGET_PPC64
+            .vmsd = &vmstate_slb,
+            .needed = slb_needed,
+        } , {
+#endif /* TARGET_PPC64 */
+            .vmsd = &vmstate_tlb6xx,
+            .needed = tlb6xx_needed,
+        } , {
+            .vmsd = &vmstate_tlbemb,
+            .needed = tlbemb_needed,
+        } , {
+            .vmsd = &vmstate_tlbmas,
+            .needed = tlbmas_needed,
+        } , {
+            /* FIXME: BATS */
+            /* FIXME: DCRs? */
+            /* FIXME: timebase? */
+            /* empty */
+        }
+    }
+};
+
+void cpu_save(QEMUFile *f, void *opaque)
+{
+    vmstate_save_state(f, &vmstate_cpu, opaque);
+
+}
+
+int cpu_load(QEMUFile *f, void *opaque, int version_id)
+{
+    return vmstate_load_state(f, &vmstate_cpu, opaque, version_id);
+}
-- 
1.7.10.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]