qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v7 04/15] target-tricore: Add initialization for tra


From: Bastian Koppelmann
Subject: [Qemu-devel] [PATCH v7 04/15] target-tricore: Add initialization for translation and activate target
Date: Mon, 1 Sep 2014 12:59:49 +0100

Add tcg and cpu model initialization.
Add gen_intermediate_code function.
Activate target in configure and add softmmu config.

Signed-off-by: Bastian Koppelmann <address@hidden>
---
v6 -> v7:
    - configure: Remove empty disas case. Remove target_phys_bits=32.
    - tricore-softmmu.mak: Remove pci, SMC91C111 and PFLASH_CFI01
    - CPUTRICOREState -> CPUTriCoreState.

 configure                           |   2 +
 default-configs/tricore-softmmu.mak |   0
 target-tricore/translate.c          | 165 ++++++++++++++++++++++++++++++++++++
 3 files changed, 167 insertions(+)
 create mode 100644 default-configs/tricore-softmmu.mak

diff --git a/configure b/configure
index 2063cf6..021b3cf 100755
--- a/configure
+++ b/configure
@@ -5045,6 +5045,8 @@ case "$target_name" in
     TARGET_BASE_ARCH=mips
     echo "TARGET_ABI_MIPSN64=y" >> $config_target_mak
   ;;
+  tricore)
+  ;;
   moxie)
   ;;
   or32)
diff --git a/default-configs/tricore-softmmu.mak 
b/default-configs/tricore-softmmu.mak
new file mode 100644
index 0000000..e69de29
diff --git a/target-tricore/translate.c b/target-tricore/translate.c
index fae1b1a..7691b11 100644
--- a/target-tricore/translate.c
+++ b/target-tricore/translate.c
@@ -26,6 +26,26 @@
 #include "exec/helper-proto.h"
 #include "exec/helper-gen.h"
 
+/*
+ * TCG registers
+ */
+static TCGv cpu_PC;
+static TCGv cpu_PCXI;
+static TCGv cpu_PSW;
+static TCGv cpu_ICR;
+/* GPR registers */
+static TCGv cpu_gpr_a[16];
+static TCGv cpu_gpr_d[16];
+/* PSW Flag cache */
+static TCGv cpu_PSW_C;
+static TCGv cpu_PSW_V;
+static TCGv cpu_PSW_SV;
+static TCGv cpu_PSW_AV;
+static TCGv cpu_PSW_SAV;
+/* CPU env */
+static TCGv_ptr cpu_env;
+
+#include "exec/gen-icount.h"
 
 static const char *regnames_a[] = {
       "a0"  , "a1"  , "a2"  , "a3" , "a4"  , "a5" ,
@@ -39,6 +59,25 @@ static const char *regnames_d[] = {
       "d12" , "d13" , "d14" , "d15",
     };
 
+typedef struct DisasContext {
+    struct TranslationBlock *tb;
+    target_ulong pc, saved_pc, next_pc;
+    uint32_t opcode;
+    int singlestep_enabled;
+    /* Routine used to access memory */
+    int mem_idx;
+    uint32_t hflags, saved_hflags;
+    int bstate;
+} DisasContext;
+
+enum {
+
+    BS_NONE   = 0,
+    BS_STOP   = 1,
+    BS_BRANCH = 2,
+    BS_EXCP   = 3,
+};
+
 void tricore_cpu_dump_state(CPUState *cs, FILE *f,
                             fprintf_function cpu_fprintf, int flags)
 {
@@ -62,10 +101,88 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f,
 
 }
 
+static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
+{
+}
+
+static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
+{
+}
+
+static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch)
+{
+    /* 16-Bit Instruction */
+    if ((ctx->opcode & 0x1) == 0) {
+        ctx->next_pc = ctx->pc + 2;
+        decode_16Bit_opc(env, ctx);
+    /* 32-Bit Instruction */
+    } else {
+        ctx->next_pc = ctx->pc + 4;
+        decode_32Bit_opc(env, ctx);
+    }
+}
+
 static inline void
 gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
                               int search_pc)
 {
+    CPUState *cs = CPU(cpu);
+    CPUTriCoreState *env = &cpu->env;
+    DisasContext ctx;
+    target_ulong pc_start;
+    int num_insns;
+    uint16_t *gen_opc_end;
+
+    if (search_pc) {
+        qemu_log("search pc %d\n", search_pc);
+    }
+
+    num_insns = 0;
+    pc_start = tb->pc;
+    gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
+    ctx.pc = pc_start;
+    ctx.saved_pc = -1;
+    ctx.tb = tb;
+    ctx.singlestep_enabled = cs->singlestep_enabled;
+    ctx.bstate = BS_NONE;
+    ctx.mem_idx = cpu_mmu_index(env);
+
+    tcg_clear_temp_count();
+    gen_tb_start();
+    while (ctx.bstate == BS_NONE) {
+        ctx.opcode = cpu_ldl_code(env, ctx.pc);
+        decode_opc(env, &ctx, 0);
+
+        num_insns++;
+
+        if (tcg_ctx.gen_opc_ptr >= gen_opc_end) {
+            break;
+        }
+        if (singlestep) {
+            break;
+        }
+        ctx.pc = ctx.next_pc;
+    }
+
+    gen_tb_end(tb, num_insns);
+    *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+    if (search_pc) {
+        printf("done_generating search pc\n");
+    } else {
+        tb->size = ctx.pc - pc_start;
+        tb->icount = num_insns;
+    }
+    if (tcg_check_temp_count()) {
+        printf("LEAK at %08x\n", env->PC);
+    }
+
+#ifdef DEBUG_DISAS
+    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+        qemu_log("IN: %s\n", lookup_symbol(pc_start));
+        log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
+        qemu_log("\n");
+    }
+#endif
 }
 
 void
@@ -93,8 +210,56 @@ restore_state_to_opc(CPUTriCoreState *env, TranslationBlock 
*tb, int pc_pos)
 
 void cpu_state_reset(CPUTriCoreState *env)
 {
+    /* Reset Regs to Default Value */
+    env->PSW = 0xb80;
+}
+
+static void tricore_tcg_init_csfr(void)
+{
+    cpu_PCXI = tcg_global_mem_new(TCG_AREG0,
+                          offsetof(CPUTriCoreState, PCXI), "PCXI");
+    cpu_PSW = tcg_global_mem_new(TCG_AREG0,
+                          offsetof(CPUTriCoreState, PSW), "PSW");
+    cpu_PC = tcg_global_mem_new(TCG_AREG0,
+                          offsetof(CPUTriCoreState, PC), "PC");
+    cpu_ICR = tcg_global_mem_new(TCG_AREG0,
+                          offsetof(CPUTriCoreState, ICR), "ICR");
 }
 
 void tricore_tcg_init(void)
 {
+    int i;
+    static int inited;
+    if (inited) {
+        return;
+    }
+    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+    /* reg init */
+    for (i = 0 ; i < 16 ; i++) {
+        cpu_gpr_a[i] = tcg_global_mem_new(TCG_AREG0,
+                                          offsetof(CPUTriCoreState, gpr_a[i]),
+                                          regnames_a[i]);
+    }
+    for (i = 0 ; i < 16 ; i++) {
+        cpu_gpr_d[i] = tcg_global_mem_new(TCG_AREG0,
+                                  offsetof(CPUTriCoreState, gpr_d[i]),
+                                           regnames_d[i]);
+    }
+    tricore_tcg_init_csfr();
+    /* init PSW flag cache */
+    cpu_PSW_C = tcg_global_mem_new(TCG_AREG0,
+                                   offsetof(CPUTriCoreState, PSW_USB_C),
+                                   "PSW_C");
+    cpu_PSW_V = tcg_global_mem_new(TCG_AREG0,
+                                   offsetof(CPUTriCoreState, PSW_USB_V),
+                                   "PSW_V");
+    cpu_PSW_SV = tcg_global_mem_new(TCG_AREG0,
+                                    offsetof(CPUTriCoreState, PSW_USB_SV),
+                                    "PSW_SV");
+    cpu_PSW_AV = tcg_global_mem_new(TCG_AREG0,
+                                    offsetof(CPUTriCoreState, PSW_USB_AV),
+                                    "PSW_AV");
+    cpu_PSW_SAV = tcg_global_mem_new(TCG_AREG0,
+                                     offsetof(CPUTriCoreState, PSW_USB_SAV),
+                                     "PSW_SAV");
 }
-- 
2.1.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]