qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 10/14] target-alpha: Convert conditional compilation


From: Marc Marí
Subject: [Qemu-devel] [PATCH 10/14] target-alpha: Convert conditional compilation of debug printfs to regular ifs
Date: Mon, 28 Apr 2014 10:26:11 +0200

From: Marc Marí <address@hidden>

Modify debug macros as explained in 
https://lists.gnu.org/archive/html/qemu-devel/2014-04/msg03642.html

Signed-off-by: Marc Marí <address@hidden>
---
 target-alpha/fpu_helper.c |    7 +
 target-alpha/translate.c  | 2363 +++++++++++++++++----------------------------
 2 files changed, 878 insertions(+), 1492 deletions(-)

diff --git a/target-alpha/fpu_helper.c b/target-alpha/fpu_helper.c
index fad3575..ee73155 100644
--- a/target-alpha/fpu_helper.c
+++ b/target-alpha/fpu_helper.c
@@ -820,3 +820,10 @@ uint64_t helper_cvtqg(CPUAlphaState *env, uint64_t a)
     fr = int64_to_float64(a, &FP_STATUS);
     return float64_to_g(fr);
 }
+
+void helper_fcvtql_v_input(CPUAlphaState *env, uint64_t val)
+{
+    if (val != (int32_t)val) {
+        arith_excp(env, GETPC(), EXC_M_IOV, 0);
+    }
+}
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index e7e319b..d0357ff 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -49,6 +49,12 @@ struct DisasContext {
     /* implver value for this CPU.  */
     int implver;
 
+    /* Temporaries for $31 and $f31 as source and destination.  */
+    TCGv zero;
+    TCGv sink;
+    /* Temporary for immediate constants.  */
+    TCGv lit;
+
     bool singlestep_enabled;
 };
 
@@ -83,64 +89,128 @@ static TCGv cpu_pc;
 static TCGv cpu_lock_addr;
 static TCGv cpu_lock_st_addr;
 static TCGv cpu_lock_value;
-static TCGv cpu_unique;
-#ifndef CONFIG_USER_ONLY
-static TCGv cpu_sysval;
-static TCGv cpu_usp;
-#endif
-
-/* register names */
-static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
 
 #include "exec/gen-icount.h"
 
 void alpha_translate_init(void)
 {
+#define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
+
+    typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
+    static const GlobalVar vars[] = {
+        DEF_VAR(pc),
+        DEF_VAR(lock_addr),
+        DEF_VAR(lock_st_addr),
+        DEF_VAR(lock_value),
+    };
+
+#undef DEF_VAR
+
+    /* Use the symbolic register names that match the disassembler.  */
+    static const char greg_names[31][4] = {
+        "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+        "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
+        "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
+        "t10", "t11", "ra", "t12", "at", "gp", "sp"
+    };
+    static const char freg_names[31][4] = {
+        "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+        "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+        "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+        "f24", "f25", "f26", "f27", "f28", "f29", "f30"
+    };
+
+    static bool done_init = 0;
     int i;
-    char *p;
-    static int done_init = 0;
 
-    if (done_init)
+    if (done_init) {
         return;
+    }
+    done_init = 1;
 
     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
 
-    p = cpu_reg_names;
     for (i = 0; i < 31; i++) {
-        sprintf(p, "ir%d", i);
         cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
-                                           offsetof(CPUAlphaState, ir[i]), p);
-        p += (i < 10) ? 4 : 5;
+                                           offsetof(CPUAlphaState, ir[i]),
+                                           greg_names[i]);
+    }
 
-        sprintf(p, "fir%d", i);
+    for (i = 0; i < 31; i++) {
         cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
-                                            offsetof(CPUAlphaState, fir[i]), 
p);
-        p += (i < 10) ? 5 : 6;
+                                            offsetof(CPUAlphaState, fir[i]),
+                                            freg_names[i]);
     }
 
-    cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
-                                    offsetof(CPUAlphaState, pc), "pc");
-
-    cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
-                                          offsetof(CPUAlphaState, lock_addr),
-                                          "lock_addr");
-    cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
-                                             offsetof(CPUAlphaState, 
lock_st_addr),
-                                             "lock_st_addr");
-    cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
-                                           offsetof(CPUAlphaState, lock_value),
-                                           "lock_value");
-
-    cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
-                                        offsetof(CPUAlphaState, unique), 
"unique");
-#ifndef CONFIG_USER_ONLY
-    cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
-                                        offsetof(CPUAlphaState, sysval), 
"sysval");
-    cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
-                                     offsetof(CPUAlphaState, usp), "usp");
-#endif
+    for (i = 0; i < ARRAY_SIZE(vars); ++i) {
+        const GlobalVar *v = &vars[i];
+        *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
+    }
+}
 
-    done_init = 1;
+static TCGv load_zero(DisasContext *ctx)
+{
+    if (TCGV_IS_UNUSED_I64(ctx->zero)) {
+        ctx->zero = tcg_const_i64(0);
+    }
+    return ctx->zero;
+}
+
+static TCGv dest_sink(DisasContext *ctx)
+{
+    if (TCGV_IS_UNUSED_I64(ctx->sink)) {
+        ctx->sink = tcg_temp_new();
+    }
+    return ctx->sink;
+}
+
+static TCGv load_gpr(DisasContext *ctx, unsigned reg)
+{
+    if (likely(reg < 31)) {
+        return cpu_ir[reg];
+    } else {
+        return load_zero(ctx);
+    }
+}
+
+static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
+                         uint8_t lit, bool islit)
+{
+    if (islit) {
+        ctx->lit = tcg_const_i64(lit);
+        return ctx->lit;
+    } else if (likely(reg < 31)) {
+        return cpu_ir[reg];
+    } else {
+        return load_zero(ctx);
+    }
+}
+
+static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
+{
+    if (likely(reg < 31)) {
+        return cpu_ir[reg];
+    } else {
+        return dest_sink(ctx);
+    }
+}
+
+static TCGv load_fpr(DisasContext *ctx, unsigned reg)
+{
+    if (likely(reg < 31)) {
+        return cpu_fir[reg];
+    } else {
+        return load_zero(ctx);
+    }
+}
+
+static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
+{
+    if (likely(reg < 31)) {
+        return cpu_fir[reg];
+    } else {
+        return dest_sink(ctx);
+    }
 }
 
 static void gen_excp_1(int exception, int error_code)
@@ -207,10 +277,10 @@ static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int 
flags)
 static inline void gen_load_mem(DisasContext *ctx,
                                 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
                                                           int flags),
-                                int ra, int rb, int32_t disp16, int fp,
-                                int clear)
+                                int ra, int rb, int32_t disp16, bool fp,
+                                bool clear)
 {
-    TCGv addr, va;
+    TCGv tmp, addr, va;
 
     /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
        prefetches, which we can treat as nops.  No worries about
@@ -219,23 +289,22 @@ static inline void gen_load_mem(DisasContext *ctx,
         return;
     }
 
-    addr = tcg_temp_new();
-    if (rb != 31) {
-        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
-        if (clear) {
-            tcg_gen_andi_i64(addr, addr, ~0x7);
-        }
-    } else {
-        if (clear) {
-            disp16 &= ~0x7;
-        }
-        tcg_gen_movi_i64(addr, disp16);
+    tmp = tcg_temp_new();
+    addr = load_gpr(ctx, rb);
+
+    if (disp16) {
+        tcg_gen_addi_i64(tmp, addr, disp16);
+        addr = tmp;
+    }
+    if (clear) {
+        tcg_gen_andi_i64(tmp, addr, ~0x7);
+        addr = tmp;
     }
 
     va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
     tcg_gen_qemu_load(va, addr, ctx->mem_idx);
 
-    tcg_temp_free(addr);
+    tcg_temp_free(tmp);
 }
 
 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
@@ -265,35 +334,27 @@ static inline void gen_qemu_sts(TCGv t0, TCGv t1, int 
flags)
 static inline void gen_store_mem(DisasContext *ctx,
                                  void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
                                                             int flags),
-                                 int ra, int rb, int32_t disp16, int fp,
-                                 int clear)
+                                 int ra, int rb, int32_t disp16, bool fp,
+                                 bool clear)
 {
-    TCGv addr, va;
+    TCGv tmp, addr, va;
 
-    addr = tcg_temp_new();
-    if (rb != 31) {
-        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
-        if (clear) {
-            tcg_gen_andi_i64(addr, addr, ~0x7);
-        }
-    } else {
-        if (clear) {
-            disp16 &= ~0x7;
-        }
-        tcg_gen_movi_i64(addr, disp16);
-    }
+    tmp = tcg_temp_new();
+    addr = load_gpr(ctx, rb);
 
-    if (ra == 31) {
-        va = tcg_const_i64(0);
-    } else {
-        va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
+    if (disp16) {
+        tcg_gen_addi_i64(tmp, addr, disp16);
+        addr = tmp;
+    }
+    if (clear) {
+        tcg_gen_andi_i64(tmp, addr, ~0x7);
+        addr = tmp;
     }
+
+    va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
     tcg_gen_qemu_store(va, addr, ctx->mem_idx);
 
-    tcg_temp_free(addr);
-    if (ra == 31) {
-        tcg_temp_free(va);
-    }
+    tcg_temp_free(tmp);
 }
 
 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
@@ -313,11 +374,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, 
int ra, int rb,
     addr = tcg_temp_local_new();
 #endif
 
-    if (rb != 31) {
-        tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
-    } else {
-        tcg_gen_movi_i64(addr, disp16);
-    }
+    tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
 
 #if defined(CONFIG_USER_ONLY)
     /* ??? This is handled via a complicated version of compare-and-swap
@@ -367,7 +424,8 @@ static bool in_superpage(DisasContext *ctx, int64_t addr)
 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
 {
     /* Suppress goto_tb in the case of single-steping and IO.  */
-    if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
+    if ((ctx->tb->cflags & CF_LAST_IO)
+        || ctx->singlestep_enabled || singlestep) {
         return false;
     }
     /* If the destination is in the superpage, the page perms can't change.  */
@@ -438,15 +496,11 @@ static ExitStatus gen_bcond(DisasContext *ctx, TCGCond 
cond, int ra,
 {
     TCGv cmp_tmp;
 
-    if (unlikely(ra == 31)) {
-        cmp_tmp = tcg_const_i64(0);
-    } else {
+    if (mask) {
         cmp_tmp = tcg_temp_new();
-        if (mask) {
-            tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
-        } else {
-            tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
-        }
+        tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
+    } else {
+        cmp_tmp = load_gpr(ctx, ra);
     }
 
     return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
@@ -487,83 +541,23 @@ static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv 
src)
 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
                              int32_t disp)
 {
-    TCGv cmp_tmp;
-
-    if (unlikely(ra == 31)) {
-        /* Very uncommon case, but easier to optimize it to an integer
-           comparison than continuing with the floating point comparison.  */
-        return gen_bcond(ctx, cond, ra, disp, 0);
-    }
-
-    cmp_tmp = tcg_temp_new();
-    gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
+    TCGv cmp_tmp = tcg_temp_new();
+    gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
     return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
 }
 
-static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
-                     int islit, uint8_t lit, int mask)
-{
-    TCGv_i64 c1, z, v1;
-
-    if (unlikely(rc == 31)) {
-        return;
-    }
-
-    if (ra == 31) {
-        /* Very uncommon case - Do not bother to optimize.  */
-        c1 = tcg_const_i64(0);
-    } else if (mask) {
-        c1 = tcg_const_i64(1);
-        tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
-    } else {
-        c1 = cpu_ir[ra];
-    }
-    if (islit) {
-        v1 = tcg_const_i64(lit);
-    } else {
-        v1 = cpu_ir[rb];
-    }
-    z = tcg_const_i64(0);
-
-    tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
-
-    tcg_temp_free_i64(z);
-    if (ra == 31 || mask) {
-        tcg_temp_free_i64(c1);
-    }
-    if (islit) {
-        tcg_temp_free_i64(v1);
-    }
-}
-
-static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
+static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
 {
-    TCGv_i64 c1, z, v1;
+    TCGv_i64 va, vb, z;
 
-    if (unlikely(rc == 31)) {
-        return;
-    }
-
-    c1 = tcg_temp_new_i64();
-    if (unlikely(ra == 31)) {
-        tcg_gen_movi_i64(c1, 0);
-    } else {
-        gen_fold_mzero(cond, c1, cpu_fir[ra]);
-    }
-    if (rb == 31) {
-        v1 = tcg_const_i64(0);
-    } else {
-        v1 = cpu_fir[rb];
-    }
-    z = tcg_const_i64(0);
+    z = load_zero(ctx);
+    vb = load_fpr(ctx, rb);
+    va = tcg_temp_new();
+    gen_fold_mzero(cond, va, load_fpr(ctx, ra));
 
-    tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
+    tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
 
-    tcg_temp_free_i64(z);
-    tcg_temp_free_i64(c1);
-    if (rb == 31) {
-        tcg_temp_free_i64(v1);
-    }
+    tcg_temp_free(va);
 }
 
 #define QUAL_RM_N       0x080   /* Round mode nearest even */
@@ -647,21 +641,21 @@ static void gen_qual_flushzero(DisasContext *ctx, int 
fn11)
     tcg_temp_free_i32(tmp);
 }
 
-static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
+static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
 {
     TCGv val;
-    if (reg == 31) {
-        val = tcg_const_i64(0);
+
+    if (unlikely(reg == 31)) {
+        val = load_zero(ctx);
     } else {
+        val = cpu_fir[reg];
         if ((fn11 & QUAL_S) == 0) {
             if (is_cmp) {
-                gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
+                gen_helper_ieee_input_cmp(cpu_env, val);
             } else {
-                gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
+                gen_helper_ieee_input(cpu_env, val);
             }
         }
-        val = tcg_temp_new();
-        tcg_gen_mov_i64(val, cpu_fir[reg]);
     }
     return val;
 }
@@ -721,105 +715,46 @@ static inline void gen_fp_exc_raise(int rc, int fn11)
     gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
 }
 
-static void gen_fcvtlq(int rb, int rc)
+static void gen_fcvtlq(TCGv vc, TCGv vb)
 {
-    if (unlikely(rc == 31)) {
-        return;
-    }
-    if (unlikely(rb == 31)) {
-        tcg_gen_movi_i64(cpu_fir[rc], 0);
-    } else {
-        TCGv tmp = tcg_temp_new();
+    TCGv tmp = tcg_temp_new();
 
-        /* The arithmetic right shift here, plus the sign-extended mask below
-           yields a sign-extended result without an explicit ext32s_i64.  */
-        tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
-        tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
-        tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
-        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
-        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
+    /* The arithmetic right shift here, plus the sign-extended mask below
+       yields a sign-extended result without an explicit ext32s_i64.  */
+    tcg_gen_sari_i64(tmp, vb, 32);
+    tcg_gen_shri_i64(vc, vb, 29);
+    tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
+    tcg_gen_andi_i64(vc, vc, 0x3fffffff);
+    tcg_gen_or_i64(vc, vc, tmp);
 
-        tcg_temp_free(tmp);
-    }
+    tcg_temp_free(tmp);
 }
 
-static void gen_fcvtql(int rb, int rc)
+static void gen_fcvtql(TCGv vc, TCGv vb)
 {
-    if (unlikely(rc == 31)) {
-        return;
-    }
-    if (unlikely(rb == 31)) {
-        tcg_gen_movi_i64(cpu_fir[rc], 0);
-    } else {
-        TCGv tmp = tcg_temp_new();
+    TCGv tmp = tcg_temp_new();
 
-        tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
-        tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
-        tcg_gen_shli_i64(tmp, tmp, 32);
-        tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
-        tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
+    tcg_gen_andi_i64(tmp, vb, (int32_t)0xc0000000);
+    tcg_gen_andi_i64(vc, vb, 0x3FFFFFFF);
+    tcg_gen_shli_i64(tmp, tmp, 32);
+    tcg_gen_shli_i64(vc, vc, 29);
+    tcg_gen_or_i64(vc, vc, tmp);
 
-        tcg_temp_free(tmp);
-    }
+    tcg_temp_free(tmp);
 }
 
-static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
-{
-    if (rb != 31) {
-        int lab = gen_new_label();
-        TCGv tmp = tcg_temp_new();
-
-        tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
-        tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
-        gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
-
-        gen_set_label(lab);
-    }
-    gen_fcvtql(rb, rc);
-}
-
-#define FARITH2(name)                                                   \
-    static inline void glue(gen_f, name)(int rb, int rc)                \
-    {                                                                   \
-        if (unlikely(rc == 31)) {                                       \
-            return;                                                     \
-        }                                                               \
-        if (rb != 31) {                                                 \
-            gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]);     \
-        } else {                                                        \
-            TCGv tmp = tcg_const_i64(0);                                \
-            gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp);             \
-            tcg_temp_free(tmp);                                         \
-        }                                                               \
-    }
-
-/* ??? VAX instruction qualifiers ignored.  */
-FARITH2(sqrtf)
-FARITH2(sqrtg)
-FARITH2(cvtgf)
-FARITH2(cvtgq)
-FARITH2(cvtqf)
-FARITH2(cvtqg)
-
 static void gen_ieee_arith2(DisasContext *ctx,
                             void (*helper)(TCGv, TCGv_ptr, TCGv),
                             int rb, int rc, int fn11)
 {
     TCGv vb;
 
-    /* ??? This is wrong: the instruction is not a nop, it still may
-       raise exceptions.  */
-    if (unlikely(rc == 31)) {
-        return;
-    }
-
     gen_qual_roundmode(ctx, fn11);
     gen_qual_flushzero(ctx, fn11);
     gen_fp_exc_clear();
 
-    vb = gen_ieee_input(rb, fn11, 0);
-    helper(cpu_fir[rc], cpu_env, vb);
-    tcg_temp_free(vb);
+    vb = gen_ieee_input(ctx, rb, fn11, 0);
+    helper(dest_fpr(ctx, rc), cpu_env, vb);
 
     gen_fp_exc_raise(rc, fn11);
 }
@@ -837,40 +772,34 @@ IEEE_ARITH2(cvtts)
 
 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
 {
-    TCGv vb;
+    TCGv vb, vc;
     int ignore = 0;
 
-    /* ??? This is wrong: the instruction is not a nop, it still may
-       raise exceptions.  */
-    if (unlikely(rc == 31)) {
-        return;
-    }
-
     /* No need to set flushzero, since we have an integer output.  */
     gen_fp_exc_clear();
-    vb = gen_ieee_input(rb, fn11, 0);
+    vb = gen_ieee_input(ctx, rb, fn11, 0);
+    vc = dest_fpr(ctx, rc);
 
     /* Almost all integer conversions use cropped rounding, and most
        also do not have integer overflow enabled.  Special case that.  */
     switch (fn11) {
     case QUAL_RM_C:
-        gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
+        gen_helper_cvttq_c(vc, cpu_env, vb);
         break;
     case QUAL_V | QUAL_RM_C:
     case QUAL_S | QUAL_V | QUAL_RM_C:
         ignore = float_flag_inexact;
         /* FALLTHRU */
     case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
-        gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
+        gen_helper_cvttq_svic(vc, cpu_env, vb);
         break;
     default:
         gen_qual_roundmode(ctx, fn11);
-        gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
+        gen_helper_cvttq(vc, cpu_env, vb);
         ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
         ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
         break;
     }
-    tcg_temp_free(vb);
 
     gen_fp_exc_raise_ignore(rc, fn11, ignore);
 }
@@ -879,35 +808,21 @@ static void gen_ieee_intcvt(DisasContext *ctx,
                             void (*helper)(TCGv, TCGv_ptr, TCGv),
                            int rb, int rc, int fn11)
 {
-    TCGv vb;
-
-    /* ??? This is wrong: the instruction is not a nop, it still may
-       raise exceptions.  */
-    if (unlikely(rc == 31)) {
-        return;
-    }
+    TCGv vb, vc;
 
     gen_qual_roundmode(ctx, fn11);
-
-    if (rb == 31) {
-        vb = tcg_const_i64(0);
-    } else {
-        vb = cpu_fir[rb];
-    }
+    vb = load_fpr(ctx, rb);
+    vc = dest_fpr(ctx, rc);
 
     /* The only exception that can be raised by integer conversion
        is inexact.  Thus we only need to worry about exceptions when
        inexact handling is requested.  */
     if (fn11 & QUAL_I) {
         gen_fp_exc_clear();
-        helper(cpu_fir[rc], cpu_env, vb);
+        helper(vc, cpu_env, vb);
         gen_fp_exc_raise(rc, fn11);
     } else {
-        helper(cpu_fir[rc], cpu_env, vb);
-    }
-
-    if (rb == 31) {
-        tcg_temp_free(vb);
+        helper(vc, cpu_env, vb);
     }
 }
 
@@ -920,144 +835,38 @@ static inline void glue(gen_f, name)(DisasContext *ctx,  
       \
 IEEE_INTCVT(cvtqs)
 IEEE_INTCVT(cvtqt)
 
-static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
+static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
 {
-    TCGv va, vb, vmask;
-    int za = 0, zb = 0;
-
-    if (unlikely(rc == 31)) {
-        return;
-    }
-
-    vmask = tcg_const_i64(mask);
-
-    TCGV_UNUSED_I64(va);
-    if (ra == 31) {
-        if (inv_a) {
-            va = vmask;
-        } else {
-            za = 1;
-        }
-    } else {
-        va = tcg_temp_new_i64();
-        tcg_gen_mov_i64(va, cpu_fir[ra]);
-        if (inv_a) {
-            tcg_gen_andc_i64(va, vmask, va);
-        } else {
-            tcg_gen_and_i64(va, va, vmask);
-        }
-    }
+    TCGv vmask = tcg_const_i64(mask);
+    TCGv tmp = tcg_temp_new_i64();
 
-    TCGV_UNUSED_I64(vb);
-    if (rb == 31) {
-        zb = 1;
+    if (inv_a) {
+        tcg_gen_andc_i64(tmp, vmask, va);
     } else {
-        vb = tcg_temp_new_i64();
-        tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
+        tcg_gen_and_i64(tmp, va, vmask);
     }
 
-    switch (za << 1 | zb) {
-    case 0 | 0:
-        tcg_gen_or_i64(cpu_fir[rc], va, vb);
-        break;
-    case 0 | 1:
-        tcg_gen_mov_i64(cpu_fir[rc], va);
-        break;
-    case 2 | 0:
-        tcg_gen_mov_i64(cpu_fir[rc], vb);
-        break;
-    case 2 | 1:
-        tcg_gen_movi_i64(cpu_fir[rc], 0);
-        break;
-    }
+    tcg_gen_andc_i64(vc, vb, vmask);
+    tcg_gen_or_i64(vc, vc, tmp);
 
     tcg_temp_free(vmask);
-    if (ra != 31) {
-        tcg_temp_free(va);
-    }
-    if (rb != 31) {
-        tcg_temp_free(vb);
-    }
-}
-
-static inline void gen_fcpys(int ra, int rb, int rc)
-{
-    gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
-}
-
-static inline void gen_fcpysn(int ra, int rb, int rc)
-{
-    gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
+    tcg_temp_free(tmp);
 }
 
-static inline void gen_fcpyse(int ra, int rb, int rc)
-{
-    gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
-}
-
-#define FARITH3(name)                                                   \
-    static inline void glue(gen_f, name)(int ra, int rb, int rc)        \
-    {                                                                   \
-        TCGv va, vb;                                                    \
-                                                                        \
-        if (unlikely(rc == 31)) {                                       \
-            return;                                                     \
-        }                                                               \
-        if (ra == 31) {                                                 \
-            va = tcg_const_i64(0);                                      \
-        } else {                                                        \
-            va = cpu_fir[ra];                                           \
-        }                                                               \
-        if (rb == 31) {                                                 \
-            vb = tcg_const_i64(0);                                      \
-        } else {                                                        \
-            vb = cpu_fir[rb];                                           \
-        }                                                               \
-                                                                        \
-        gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb);              \
-                                                                        \
-        if (ra == 31) {                                                 \
-            tcg_temp_free(va);                                          \
-        }                                                               \
-        if (rb == 31) {                                                 \
-            tcg_temp_free(vb);                                          \
-        }                                                               \
-    }
-
-/* ??? VAX instruction qualifiers ignored.  */
-FARITH3(addf)
-FARITH3(subf)
-FARITH3(mulf)
-FARITH3(divf)
-FARITH3(addg)
-FARITH3(subg)
-FARITH3(mulg)
-FARITH3(divg)
-FARITH3(cmpgeq)
-FARITH3(cmpglt)
-FARITH3(cmpgle)
-
 static void gen_ieee_arith3(DisasContext *ctx,
                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
                             int ra, int rb, int rc, int fn11)
 {
-    TCGv va, vb;
-
-    /* ??? This is wrong: the instruction is not a nop, it still may
-       raise exceptions.  */
-    if (unlikely(rc == 31)) {
-        return;
-    }
+    TCGv va, vb, vc;
 
     gen_qual_roundmode(ctx, fn11);
     gen_qual_flushzero(ctx, fn11);
     gen_fp_exc_clear();
 
-    va = gen_ieee_input(ra, fn11, 0);
-    vb = gen_ieee_input(rb, fn11, 0);
-    helper(cpu_fir[rc], cpu_env, va, vb);
-    tcg_temp_free(va);
-    tcg_temp_free(vb);
+    va = gen_ieee_input(ctx, ra, fn11, 0);
+    vb = gen_ieee_input(ctx, rb, fn11, 0);
+    vc = dest_fpr(ctx, rc);
+    helper(vc, cpu_env, va, vb);
 
     gen_fp_exc_raise(rc, fn11);
 }
@@ -1081,21 +890,14 @@ static void gen_ieee_compare(DisasContext *ctx,
                              void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
                              int ra, int rb, int rc, int fn11)
 {
-    TCGv va, vb;
-
-    /* ??? This is wrong: the instruction is not a nop, it still may
-       raise exceptions.  */
-    if (unlikely(rc == 31)) {
-        return;
-    }
+    TCGv va, vb, vc;
 
     gen_fp_exc_clear();
 
-    va = gen_ieee_input(ra, fn11, 1);
-    vb = gen_ieee_input(rb, fn11, 1);
-    helper(cpu_fir[rc], cpu_env, va, vb);
-    tcg_temp_free(va);
-    tcg_temp_free(vb);
+    va = gen_ieee_input(ctx, ra, fn11, 1);
+    vb = gen_ieee_input(ctx, rb, fn11, 1);
+    vc = dest_fpr(ctx, rc);
+    helper(vc, cpu_env, va, vb);
 
     gen_fp_exc_raise(rc, fn11);
 }
@@ -1117,8 +919,9 @@ static inline uint64_t zapnot_mask(uint8_t lit)
     int i;
 
     for (i = 0; i < 8; ++i) {
-        if ((lit >> i) & 1)
+        if ((lit >> i) & 1) {
             mask |= 0xffull << (i * 8);
+        }
     }
     return mask;
 }
@@ -1145,165 +948,111 @@ static void gen_zapnoti(TCGv dest, TCGv src, uint8_t 
lit)
         tcg_gen_mov_i64(dest, src);
         break;
     default:
-        tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
+        tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
         break;
     }
 }
 
-static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
-{
-    if (unlikely(rc == 31))
-        return;
-    else if (unlikely(ra == 31))
-        tcg_gen_movi_i64(cpu_ir[rc], 0);
-    else if (islit)
-        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
-    else
-        gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-}
-
-static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
-{
-    if (unlikely(rc == 31))
-        return;
-    else if (unlikely(ra == 31))
-        tcg_gen_movi_i64(cpu_ir[rc], 0);
-    else if (islit)
-        gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
-    else
-        gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-}
-
-
 /* EXTWH, EXTLH, EXTQH */
-static void gen_ext_h(int ra, int rb, int rc, int islit,
+static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                       uint8_t lit, uint8_t byte_mask)
 {
-    if (unlikely(rc == 31))
-        return;
-    else if (unlikely(ra == 31))
-        tcg_gen_movi_i64(cpu_ir[rc], 0);
-    else {
-        if (islit) {
-            lit = (64 - (lit & 7) * 8) & 0x3f;
-            tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
-        } else {
-            TCGv tmp1 = tcg_temp_new();
-            tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
-            tcg_gen_shli_i64(tmp1, tmp1, 3);
-            tcg_gen_neg_i64(tmp1, tmp1);
-            tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
-            tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
-            tcg_temp_free(tmp1);
-        }
-        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
+    if (islit) {
+        tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
+    } else {
+        TCGv tmp = tcg_temp_new();
+        tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
+        tcg_gen_neg_i64(tmp, tmp);
+        tcg_gen_andi_i64(tmp, tmp, 0x3f);
+        tcg_gen_shl_i64(vc, va, tmp);
+        tcg_temp_free(tmp);
     }
+    gen_zapnoti(vc, vc, byte_mask);
 }
 
 /* EXTBL, EXTWL, EXTLL, EXTQL */
-static void gen_ext_l(int ra, int rb, int rc, int islit,
+static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                       uint8_t lit, uint8_t byte_mask)
 {
-    if (unlikely(rc == 31))
-        return;
-    else if (unlikely(ra == 31))
-        tcg_gen_movi_i64(cpu_ir[rc], 0);
-    else {
-        if (islit) {
-            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
-        } else {
-            TCGv tmp = tcg_temp_new();
-            tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
-            tcg_gen_shli_i64(tmp, tmp, 3);
-            tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
-            tcg_temp_free(tmp);
-        }
-        gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
+    if (islit) {
+        tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
+    } else {
+        TCGv tmp = tcg_temp_new();
+        tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
+        tcg_gen_shli_i64(tmp, tmp, 3);
+        tcg_gen_shr_i64(vc, va, tmp);
+        tcg_temp_free(tmp);
     }
+    gen_zapnoti(vc, vc, byte_mask);
 }
 
 /* INSWH, INSLH, INSQH */
-static void gen_ins_h(int ra, int rb, int rc, int islit,
+static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                       uint8_t lit, uint8_t byte_mask)
 {
-    if (unlikely(rc == 31))
-        return;
-    else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
-        tcg_gen_movi_i64(cpu_ir[rc], 0);
-    else {
-        TCGv tmp = tcg_temp_new();
+    TCGv tmp = tcg_temp_new();
 
-        /* The instruction description has us left-shift the byte mask
-           and extract bits <15:8> and apply that zap at the end.  This
-           is equivalent to simply performing the zap first and shifting
-           afterward.  */
-        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
+    /* The instruction description has us left-shift the byte mask and extract
+       bits <15:8> and apply that zap at the end.  This is equivalent to simply
+       performing the zap first and shifting afterward.  */
+    gen_zapnoti(tmp, va, byte_mask);
 
-        if (islit) {
-            /* Note that we have handled the lit==0 case above.  */
-            tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
+    if (islit) {
+        lit &= 7;
+        if (unlikely(lit == 0)) {
+            tcg_gen_movi_i64(vc, 0);
         } else {
-            TCGv shift = tcg_temp_new();
-
-            /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
-               Do this portably by splitting the shift into two parts:
-               shift_count-1 and 1.  Arrange for the -1 by using
-               ones-complement instead of twos-complement in the negation:
-               ~((B & 7) * 8) & 63.  */
-
-            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
-            tcg_gen_shli_i64(shift, shift, 3);
-            tcg_gen_not_i64(shift, shift);
-            tcg_gen_andi_i64(shift, shift, 0x3f);
-
-            tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
-            tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
-            tcg_temp_free(shift);
+            tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
         }
-        tcg_temp_free(tmp);
+    } else {
+        TCGv shift = tcg_temp_new();
+
+        /* If (B & 7) == 0, we need to shift by 64 and leave a zero.  Do this
+           portably by splitting the shift into two parts: shift_count-1 and 1.
+           Arrange for the -1 by using ones-complement instead of
+           twos-complement in the negation: ~(B * 8) & 63.  */
+
+        tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
+        tcg_gen_not_i64(shift, shift);
+        tcg_gen_andi_i64(shift, shift, 0x3f);
+
+        tcg_gen_shr_i64(vc, tmp, shift);
+        tcg_gen_shri_i64(vc, vc, 1);
+        tcg_temp_free(shift);
     }
+    tcg_temp_free(tmp);
 }
 
 /* INSBL, INSWL, INSLL, INSQL */
-static void gen_ins_l(int ra, int rb, int rc, int islit,
+static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                       uint8_t lit, uint8_t byte_mask)
 {
-    if (unlikely(rc == 31))
-        return;
-    else if (unlikely(ra == 31))
-        tcg_gen_movi_i64(cpu_ir[rc], 0);
-    else {
-        TCGv tmp = tcg_temp_new();
+    TCGv tmp = tcg_temp_new();
 
-        /* The instruction description has us left-shift the byte mask
-           the same number of byte slots as the data and apply the zap
-           at the end.  This is equivalent to simply performing the zap
-           first and shifting afterward.  */
-        gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
+    /* The instruction description has us left-shift the byte mask
+       the same number of byte slots as the data and apply the zap
+       at the end.  This is equivalent to simply performing the zap
+       first and shifting afterward.  */
+    gen_zapnoti(tmp, va, byte_mask);
 
-        if (islit) {
-            tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
-        } else {
-            TCGv shift = tcg_temp_new();
-            tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
-            tcg_gen_shli_i64(shift, shift, 3);
-            tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
-            tcg_temp_free(shift);
-        }
-        tcg_temp_free(tmp);
+    if (islit) {
+        tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
+    } else {
+        TCGv shift = tcg_temp_new();
+        tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
+        tcg_gen_shli_i64(shift, shift, 3);
+        tcg_gen_shl_i64(vc, tmp, shift);
+        tcg_temp_free(shift);
     }
+    tcg_temp_free(tmp);
 }
 
 /* MSKWH, MSKLH, MSKQH */
-static void gen_msk_h(int ra, int rb, int rc, int islit,
+static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                       uint8_t lit, uint8_t byte_mask)
 {
-    if (unlikely(rc == 31))
-        return;
-    else if (unlikely(ra == 31))
-        tcg_gen_movi_i64(cpu_ir[rc], 0);
-    else if (islit) {
-        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
+    if (islit) {
+        gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
     } else {
         TCGv shift = tcg_temp_new();
         TCGv mask = tcg_temp_new();
@@ -1315,17 +1064,16 @@ static void gen_msk_h(int ra, int rb, int rc, int islit,
            shift of 64 bits in order to generate a zero.  This is done by
            splitting the shift into two parts, the variable shift - 1
            followed by a constant 1 shift.  The code we expand below is
-           equivalent to ~((B & 7) * 8) & 63.  */
+           equivalent to ~(B * 8) & 63.  */
 
-        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
-        tcg_gen_shli_i64(shift, shift, 3);
+        tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
         tcg_gen_not_i64(shift, shift);
         tcg_gen_andi_i64(shift, shift, 0x3f);
         tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
         tcg_gen_shr_i64(mask, mask, shift);
         tcg_gen_shri_i64(mask, mask, 1);
 
-        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
+        tcg_gen_andc_i64(vc, va, mask);
 
         tcg_temp_free(mask);
         tcg_temp_free(shift);
@@ -1333,150 +1081,27 @@ static void gen_msk_h(int ra, int rb, int rc, int 
islit,
 }
 
 /* MSKBL, MSKWL, MSKLL, MSKQL */
-static void gen_msk_l(int ra, int rb, int rc, int islit,
+static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                       uint8_t lit, uint8_t byte_mask)
 {
-    if (unlikely(rc == 31))
-        return;
-    else if (unlikely(ra == 31))
-        tcg_gen_movi_i64(cpu_ir[rc], 0);
-    else if (islit) {
-        gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
+    if (islit) {
+        gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
     } else {
         TCGv shift = tcg_temp_new();
         TCGv mask = tcg_temp_new();
 
-        tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
+        tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
         tcg_gen_shli_i64(shift, shift, 3);
-        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
+        tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
         tcg_gen_shl_i64(mask, mask, shift);
 
-        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
+        tcg_gen_andc_i64(vc, va, mask);
 
         tcg_temp_free(mask);
         tcg_temp_free(shift);
     }
 }
 
-/* Code to call arith3 helpers */
-#define ARITH3(name)                                                  \
-static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
-                                    uint8_t lit)                      \
-{                                                                     \
-    if (unlikely(rc == 31))                                           \
-        return;                                                       \
-                                                                      \
-    if (ra != 31) {                                                   \
-        if (islit) {                                                  \
-            TCGv tmp = tcg_const_i64(lit);                            \
-            gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp);         \
-            tcg_temp_free(tmp);                                       \
-        } else                                                        \
-            gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
-    } else {                                                          \
-        TCGv tmp1 = tcg_const_i64(0);                                 \
-        if (islit) {                                                  \
-            TCGv tmp2 = tcg_const_i64(lit);                           \
-            gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2);             \
-            tcg_temp_free(tmp2);                                      \
-        } else                                                        \
-            gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]);       \
-        tcg_temp_free(tmp1);                                          \
-    }                                                                 \
-}
-ARITH3(cmpbge)
-ARITH3(minub8)
-ARITH3(minsb8)
-ARITH3(minuw4)
-ARITH3(minsw4)
-ARITH3(maxub8)
-ARITH3(maxsb8)
-ARITH3(maxuw4)
-ARITH3(maxsw4)
-ARITH3(perr)
-
-/* Code to call arith3 helpers */
-#define ARITH3_EX(name)                                                 \
-    static inline void glue(gen_, name)(int ra, int rb, int rc,         \
-                                        int islit, uint8_t lit)         \
-    {                                                                   \
-        if (unlikely(rc == 31)) {                                       \
-            return;                                                     \
-        }                                                               \
-        if (ra != 31) {                                                 \
-            if (islit) {                                                \
-                TCGv tmp = tcg_const_i64(lit);                          \
-                gen_helper_ ## name(cpu_ir[rc], cpu_env,                \
-                                    cpu_ir[ra], tmp);                   \
-                tcg_temp_free(tmp);                                     \
-            } else {                                                    \
-                gen_helper_ ## name(cpu_ir[rc], cpu_env,                \
-                                    cpu_ir[ra], cpu_ir[rb]);            \
-            }                                                           \
-        } else {                                                        \
-            TCGv tmp1 = tcg_const_i64(0);                               \
-            if (islit) {                                                \
-                TCGv tmp2 = tcg_const_i64(lit);                         \
-                gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2);   \
-                tcg_temp_free(tmp2);                                    \
-            } else {                                                    \
-                gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
-            }                                                           \
-            tcg_temp_free(tmp1);                                        \
-        }                                                               \
-    }
-ARITH3_EX(addlv)
-ARITH3_EX(sublv)
-ARITH3_EX(addqv)
-ARITH3_EX(subqv)
-ARITH3_EX(mullv)
-ARITH3_EX(mulqv)
-
-#define MVIOP2(name)                                    \
-static inline void glue(gen_, name)(int rb, int rc)     \
-{                                                       \
-    if (unlikely(rc == 31))                             \
-        return;                                         \
-    if (unlikely(rb == 31))                             \
-        tcg_gen_movi_i64(cpu_ir[rc], 0);                \
-    else                                                \
-        gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]);   \
-}
-MVIOP2(pklb)
-MVIOP2(pkwb)
-MVIOP2(unpkbl)
-MVIOP2(unpkbw)
-
-static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
-                    int islit, uint8_t lit)
-{
-    TCGv va, vb;
-
-    if (unlikely(rc == 31)) {
-        return;
-    }
-
-    if (ra == 31) {
-        va = tcg_const_i64(0);
-    } else {
-        va = cpu_ir[ra];
-    }
-    if (islit) {
-        vb = tcg_const_i64(lit);
-    } else {
-        vb = cpu_ir[rb];
-    }
-
-    tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
-
-    if (ra == 31) {
-        tcg_temp_free(va);
-    }
-    if (islit) {
-        tcg_temp_free(vb);
-    }
-}
-
 static void gen_rx(int ra, int set)
 {
     TCGv_i32 tmp;
@@ -1504,11 +1129,13 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int 
palcode)
             break;
         case 0x9E:
             /* RDUNIQUE */
-            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
+            tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
+                           offsetof(CPUAlphaState, unique));
             break;
         case 0x9F:
             /* WRUNIQUE */
-            tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
+            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+                           offsetof(CPUAlphaState, unique));
             break;
         default:
             palcode &= 0xbf;
@@ -1531,15 +1158,18 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int 
palcode)
             break;
         case 0x2D:
             /* WRVPTPTR */
-            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, 
vptptr));
+            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+                           offsetof(CPUAlphaState, vptptr));
             break;
         case 0x31:
             /* WRVAL */
-            tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
+            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+                           offsetof(CPUAlphaState, sysval));
             break;
         case 0x32:
             /* RDVAL */
-            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
+            tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
+                           offsetof(CPUAlphaState, sysval));
             break;
 
         case 0x35: {
@@ -1548,7 +1178,8 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int 
palcode)
 
             /* Note that we already know we're in kernel mode, so we know
                that PS only contains the 3 IPL bits.  */
-            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, 
ps));
+            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
+                             offsetof(CPUAlphaState, ps));
 
             /* But make sure and store only the 3 IPL bits from the user.  */
             tmp = tcg_temp_new();
@@ -1560,15 +1191,18 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int 
palcode)
 
         case 0x36:
             /* RDPS */
-            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, 
ps));
+            tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
+                             offsetof(CPUAlphaState, ps));
             break;
         case 0x38:
             /* WRUSP */
-            tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
+            tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+                           offsetof(CPUAlphaState, usp));
             break;
         case 0x3A:
             /* RDUSP */
-            tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
+            tcg_gen_st_i64(cpu_ir[IR_V0], cpu_env,
+                           offsetof(CPUAlphaState, usp));
             break;
         case 0x3C:
             /* WHAMI */
@@ -1648,16 +1282,10 @@ static int cpu_pr_data(int pr)
     return 0;
 }
 
-static ExitStatus gen_mfpr(int ra, int regno)
+static ExitStatus gen_mfpr(TCGv va, int regno)
 {
     int data = cpu_pr_data(regno);
 
-    /* In our emulated PALcode, these processor registers have no
-       side effects from reading.  */
-    if (ra == 31) {
-        return NO_EXIT;
-    }
-
     /* Special help for VMTIME and WALLTIME.  */
     if (regno == 250 || regno == 249) {
        void (*helper)(TCGv) = gen_helper_get_walltime;
@@ -1666,11 +1294,11 @@ static ExitStatus gen_mfpr(int ra, int regno)
        }
         if (use_icount) {
             gen_io_start();
-            helper(cpu_ir[ra]);
+            helper(va);
             gen_io_end();
             return EXIT_PC_STALE;
         } else {
-            helper(cpu_ir[ra]);
+            helper(va);
             return NO_EXIT;
         }
     }
@@ -1678,28 +1306,22 @@ static ExitStatus gen_mfpr(int ra, int regno)
     /* The basic registers are data only, and unknown registers
        are read-zero, write-ignore.  */
     if (data == 0) {
-        tcg_gen_movi_i64(cpu_ir[ra], 0);
+        tcg_gen_movi_i64(va, 0);
     } else if (data & PR_BYTE) {
-        tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
+        tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
     } else if (data & PR_LONG) {
-        tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
+        tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
     } else {
-        tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
+        tcg_gen_ld_i64(va, cpu_env, data);
     }
     return NO_EXIT;
 }
 
-static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
+static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
 {
     TCGv tmp;
     int data;
 
-    if (rb == 31) {
-        tmp = tcg_const_i64(0);
-    } else {
-        tmp = cpu_ir[rb];
-    }
-
     switch (regno) {
     case 255:
         /* TBIA */
@@ -1708,7 +1330,7 @@ static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int 
regno)
 
     case 254:
         /* TBIS */
-        gen_helper_tbis(cpu_env, tmp);
+        gen_helper_tbis(cpu_env, vb);
         break;
 
     case 253:
@@ -1720,17 +1342,17 @@ static ExitStatus gen_mtpr(DisasContext *ctx, int rb, 
int regno)
 
     case 252:
         /* HALT */
-        gen_helper_halt(tmp);
+        gen_helper_halt(vb);
         return EXIT_PC_STALE;
 
     case 251:
         /* ALARM */
-        gen_helper_set_alarm(cpu_env, tmp);
+        gen_helper_set_alarm(cpu_env, vb);
         break;
 
     case 7:
         /* PALBR */
-        tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
+        tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
         /* Changing the PAL base register implies un-chaining all of the TBs
            that ended with a CALL_PAL.  Since the base register usually only
            changes during boot, flushing everything works well.  */
@@ -1743,64 +1365,70 @@ static ExitStatus gen_mtpr(DisasContext *ctx, int rb, 
int regno)
         data = cpu_pr_data(regno);
         if (data != 0) {
             if (data & PR_BYTE) {
-                tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
+                tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
             } else if (data & PR_LONG) {
-                tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
+                tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
             } else {
-                tcg_gen_st_i64(tmp, cpu_env, data);
+                tcg_gen_st_i64(vb, cpu_env, data);
             }
         }
         break;
     }
 
-    if (rb == 31) {
-        tcg_temp_free(tmp);
-    }
-
     return NO_EXIT;
 }
 #endif /* !USER_ONLY*/
 
+#define REQUIRE_TB_FLAG(FLAG)                   \
+    do {                                        \
+        if ((ctx->tb->flags & (FLAG)) == 0) {   \
+            goto invalid_opc;                   \
+        }                                       \
+    } while (0)
+
+#define REQUIRE_REG_31(WHICH)                   \
+    do {                                        \
+        if (WHICH != 31) {                      \
+            goto invalid_opc;                   \
+        }                                       \
+    } while (0)
+
 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
 {
-    uint32_t palcode;
-    int32_t disp21, disp16;
-#ifndef CONFIG_USER_ONLY
-    int32_t disp12;
-#endif
+    int32_t disp21, disp16, disp12 __attribute__((unused));
     uint16_t fn11;
-    uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
-    uint8_t lit;
+    uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
+    bool islit;
+    TCGv va, vb, vc, tmp;
+    TCGv_i32 t32;
     ExitStatus ret;
 
     /* Decode all instruction fields */
-    opc = insn >> 26;
-    ra = (insn >> 21) & 0x1F;
-    rb = (insn >> 16) & 0x1F;
-    rc = insn & 0x1F;
-    real_islit = islit = (insn >> 12) & 1;
+    opc = extract32(insn, 26, 6);
+    ra = extract32(insn, 21, 5);
+    rb = extract32(insn, 16, 5);
+    rc = extract32(insn, 0, 5);
+    islit = extract32(insn, 12, 1);
+    lit = extract32(insn, 13, 8);
+
+    disp21 = sextract32(insn, 0, 21);
+    disp16 = sextract32(insn, 0, 16);
+    disp12 = sextract32(insn, 0, 12);
+
+    fn11 = extract32(insn, 5, 11);
+    fpfn = extract32(insn, 5, 6);
+    fn7 = extract32(insn, 5, 7);
+
     if (rb == 31 && !islit) {
-        islit = 1;
+        islit = true;
         lit = 0;
-    } else
-        lit = (insn >> 13) & 0xFF;
-    palcode = insn & 0x03FFFFFF;
-    disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
-    disp16 = (int16_t)(insn & 0x0000FFFF);
-#ifndef CONFIG_USER_ONLY
-    disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
-#endif
-    fn11 = (insn >> 5) & 0x000007FF;
-    fpfn = fn11 & 0x3F;
-    fn7 = (insn >> 5) & 0x0000007F;
-    LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
-              opc, ra, rb, rc, disp16);
+    }
 
     ret = NO_EXIT;
     switch (opc) {
     case 0x00:
         /* CALL_PAL */
-        ret = gen_call_pal(ctx, palcode);
+        ret = gen_call_pal(ctx, insn & 0x03ffffff);
         break;
     case 0x01:
         /* OPC01 */
@@ -1823,839 +1451,627 @@ static ExitStatus translate_one(DisasContext *ctx, 
uint32_t insn)
     case 0x07:
         /* OPC07 */
         goto invalid_opc;
-    case 0x08:
-        /* LDA */
-        if (likely(ra != 31)) {
-            if (rb != 31)
-                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
-            else
-                tcg_gen_movi_i64(cpu_ir[ra], disp16);
-        }
-        break;
+
     case 0x09:
         /* LDAH */
-        if (likely(ra != 31)) {
-            if (rb != 31)
-                tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
-            else
-                tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
+        disp16 = (uint32_t)disp16 << 16;
+        /* fall through */
+    case 0x08:
+        /* LDA */
+        va = dest_gpr(ctx, ra);
+        /* It's worth special-casing immediate loads.  */
+        if (rb == 31) {
+            tcg_gen_movi_i64(va, disp16);
+        } else {
+            tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
         }
         break;
+
     case 0x0A:
         /* LDBU */
-        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
-            gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
-            break;
-        }
-        goto invalid_opc;
+        REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+        gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
+        break;
     case 0x0B:
         /* LDQ_U */
         gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
         break;
     case 0x0C:
         /* LDWU */
-        if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
-            gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
-            break;
-        }
-        goto invalid_opc;
+        REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+        gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
+        break;
     case 0x0D:
         /* STW */
+        REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
         gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
         break;
     case 0x0E:
         /* STB */
+        REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
         gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
         break;
     case 0x0F:
         /* STQ_U */
         gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
         break;
+
     case 0x10:
+        vc = dest_gpr(ctx, rc);
+        vb = load_gpr_lit(ctx, rb, lit, islit);
+
+        if (ra == 31) {
+            if (fn7 == 0x00) {
+                /* Special case ADDL as SEXTL.  */
+                tcg_gen_ext32s_i64(vc, vb);
+                break;
+            }
+            if (fn7 == 0x29) {
+                /* Special case SUBQ as NEGQ.  */
+                tcg_gen_neg_i64(vc, vb);
+                break;
+            }
+        }
+
+        va = load_gpr(ctx, ra);
         switch (fn7) {
         case 0x00:
             /* ADDL */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit) {
-                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
-                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
-                    } else {
-                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
-                    }
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], lit);
-                    else
-                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tcg_gen_add_i64(vc, va, vb);
+            tcg_gen_ext32s_i64(vc, vc);
             break;
         case 0x02:
             /* S4ADDL */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    TCGv tmp = tcg_temp_new();
-                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
-                    if (islit)
-                        tcg_gen_addi_i64(tmp, tmp, lit);
-                    else
-                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
-                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
-                    tcg_temp_free(tmp);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], lit);
-                    else
-                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tmp = tcg_temp_new();
+            tcg_gen_shli_i64(tmp, va, 2);
+            tcg_gen_add_i64(tmp, tmp, vb);
+            tcg_gen_ext32s_i64(vc, tmp);
+            tcg_temp_free(tmp);
             break;
         case 0x09:
             /* SUBL */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
-                    else
-                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
-                    else {
-                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
-                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
-                    }
-                }
-            }
+            tcg_gen_sub_i64(vc, va, vb);
+            tcg_gen_ext32s_i64(vc, vc);
             break;
         case 0x0B:
             /* S4SUBL */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    TCGv tmp = tcg_temp_new();
-                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
-                    if (islit)
-                        tcg_gen_subi_i64(tmp, tmp, lit);
-                    else
-                        tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
-                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
-                    tcg_temp_free(tmp);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
-                    else {
-                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
-                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
-                    }
-                }
-            }
+            tmp = tcg_temp_new();
+            tcg_gen_shli_i64(tmp, va, 2);
+            tcg_gen_sub_i64(tmp, tmp, vb);
+            tcg_gen_ext32s_i64(vc, tmp);
+            tcg_temp_free(tmp);
             break;
         case 0x0F:
             /* CMPBGE */
-            gen_cmpbge(ra, rb, rc, islit, lit);
+            gen_helper_cmpbge(vc, va, vb);
             break;
         case 0x12:
             /* S8ADDL */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    TCGv tmp = tcg_temp_new();
-                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
-                    if (islit)
-                        tcg_gen_addi_i64(tmp, tmp, lit);
-                    else
-                        tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
-                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
-                    tcg_temp_free(tmp);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], lit);
-                    else
-                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tmp = tcg_temp_new();
+            tcg_gen_shli_i64(tmp, va, 3);
+            tcg_gen_add_i64(tmp, tmp, vb);
+            tcg_gen_ext32s_i64(vc, tmp);
+            tcg_temp_free(tmp);
             break;
         case 0x1B:
             /* S8SUBL */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    TCGv tmp = tcg_temp_new();
-                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
-                    if (islit)
-                        tcg_gen_subi_i64(tmp, tmp, lit);
-                    else
-                       tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
-                    tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
-                    tcg_temp_free(tmp);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
-                    else {
-                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
-                        tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
-                    }
-                }
-            }
+            tmp = tcg_temp_new();
+            tcg_gen_shli_i64(tmp, va, 3);
+            tcg_gen_sub_i64(tmp, tmp, vb);
+            tcg_gen_ext32s_i64(vc, tmp);
+            tcg_temp_free(tmp);
             break;
         case 0x1D:
             /* CMPULT */
-            gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
+            tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
             break;
         case 0x20:
             /* ADDQ */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
-                    else
-                        tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], lit);
-                    else
-                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tcg_gen_add_i64(vc, va, vb);
             break;
         case 0x22:
             /* S4ADDQ */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    TCGv tmp = tcg_temp_new();
-                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
-                    if (islit)
-                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
-                    else
-                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
-                    tcg_temp_free(tmp);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], lit);
-                    else
-                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tmp = tcg_temp_new();
+            tcg_gen_shli_i64(tmp, va, 2);
+            tcg_gen_add_i64(vc, tmp, vb);
+            tcg_temp_free(tmp);
             break;
         case 0x29:
             /* SUBQ */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
-                    else
-                        tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
-                    else
-                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tcg_gen_sub_i64(vc, va, vb);
             break;
         case 0x2B:
             /* S4SUBQ */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    TCGv tmp = tcg_temp_new();
-                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
-                    if (islit)
-                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
-                    else
-                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
-                    tcg_temp_free(tmp);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
-                    else
-                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tmp = tcg_temp_new();
+            tcg_gen_shli_i64(tmp, va, 2);
+            tcg_gen_sub_i64(vc, tmp, vb);
+            tcg_temp_free(tmp);
             break;
         case 0x2D:
             /* CMPEQ */
-            gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
+            tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
             break;
         case 0x32:
             /* S8ADDQ */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    TCGv tmp = tcg_temp_new();
-                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
-                    if (islit)
-                        tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
-                    else
-                        tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
-                    tcg_temp_free(tmp);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], lit);
-                    else
-                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tmp = tcg_temp_new();
+            tcg_gen_shli_i64(tmp, va, 3);
+            tcg_gen_add_i64(vc, tmp, vb);
+            tcg_temp_free(tmp);
             break;
         case 0x3B:
             /* S8SUBQ */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    TCGv tmp = tcg_temp_new();
-                    tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
-                    if (islit)
-                        tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
-                    else
-                        tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
-                    tcg_temp_free(tmp);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], -lit);
-                    else
-                        tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tmp = tcg_temp_new();
+            tcg_gen_shli_i64(tmp, va, 3);
+            tcg_gen_sub_i64(vc, tmp, vb);
+            tcg_temp_free(tmp);
             break;
         case 0x3D:
             /* CMPULE */
-            gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
+            tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
             break;
         case 0x40:
             /* ADDL/V */
-            gen_addlv(ra, rb, rc, islit, lit);
+            gen_helper_addlv(vc, cpu_env, va, vb);
             break;
         case 0x49:
             /* SUBL/V */
-            gen_sublv(ra, rb, rc, islit, lit);
+            gen_helper_sublv(vc, cpu_env, va, vb);
             break;
         case 0x4D:
             /* CMPLT */
-            gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
+            tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
             break;
         case 0x60:
             /* ADDQ/V */
-            gen_addqv(ra, rb, rc, islit, lit);
+            gen_helper_addqv(vc, cpu_env, va, vb);
             break;
         case 0x69:
             /* SUBQ/V */
-            gen_subqv(ra, rb, rc, islit, lit);
+            gen_helper_subqv(vc, cpu_env, va, vb);
             break;
         case 0x6D:
             /* CMPLE */
-            gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
+            tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
             break;
         default:
             goto invalid_opc;
         }
         break;
+
     case 0x11:
+        if (fn7 == 0x20) {
+            if (rc == 31) {
+                /* Special case BIS as NOP.  */
+                break;
+            }
+            if (ra == 31) {
+                /* Special case BIS as MOV.  */
+                vc = dest_gpr(ctx, rc);
+                if (islit) {
+                    tcg_gen_movi_i64(vc, lit);
+                } else {
+                    tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
+                }
+                break;
+            }
+        }
+
+        vc = dest_gpr(ctx, rc);
+        vb = load_gpr_lit(ctx, rb, lit, islit);
+
+        if (fn7 == 0x28 && ra == 31) {
+            /* Special case ORNOT as NOT.  */
+            tcg_gen_not_i64(vc, vb);
+            break;
+        }
+
+        va = load_gpr(ctx, ra);
         switch (fn7) {
         case 0x00:
             /* AND */
-            if (likely(rc != 31)) {
-                if (ra == 31)
-                    tcg_gen_movi_i64(cpu_ir[rc], 0);
-                else if (islit)
-                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
-                else
-                    tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-            }
+            tcg_gen_and_i64(vc, va, vb);
             break;
         case 0x08:
             /* BIC */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
-                    else
-                        tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                } else
-                    tcg_gen_movi_i64(cpu_ir[rc], 0);
-            }
+            tcg_gen_andc_i64(vc, va, vb);
             break;
         case 0x14:
             /* CMOVLBS */
-            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
+            tmp = tcg_temp_new();
+            tcg_gen_andi_i64(tmp, va, 1);
+            tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
+                                vb, load_gpr(ctx, rc));
+            tcg_temp_free(tmp);
             break;
         case 0x16:
             /* CMOVLBC */
-            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
+            tmp = tcg_temp_new();
+            tcg_gen_andi_i64(tmp, va, 1);
+            tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
+                                vb, load_gpr(ctx, rc));
+            tcg_temp_free(tmp);
             break;
         case 0x20:
             /* BIS */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
-                    else
-                        tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], lit);
-                    else
-                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tcg_gen_or_i64(vc, va, vb);
             break;
         case 0x24:
             /* CMOVEQ */
-            gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
+            tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
+                                vb, load_gpr(ctx, rc));
             break;
         case 0x26:
             /* CMOVNE */
-            gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
+            tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
+                                vb, load_gpr(ctx, rc));
             break;
         case 0x28:
             /* ORNOT */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
-                    else
-                        tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
-                    else
-                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tcg_gen_orc_i64(vc, va, vb);
             break;
         case 0x40:
             /* XOR */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
-                    else
-                        tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], lit);
-                    else
-                        tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tcg_gen_xor_i64(vc, va, vb);
             break;
         case 0x44:
             /* CMOVLT */
-            gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
+            tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
+                                vb, load_gpr(ctx, rc));
             break;
         case 0x46:
             /* CMOVGE */
-            gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
+            tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
+                                vb, load_gpr(ctx, rc));
             break;
         case 0x48:
             /* EQV */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
-                    else
-                        tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                } else {
-                    if (islit)
-                        tcg_gen_movi_i64(cpu_ir[rc], ~lit);
-                    else
-                        tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
-                }
-            }
+            tcg_gen_eqv_i64(vc, va, vb);
             break;
         case 0x61:
             /* AMASK */
-            if (likely(rc != 31)) {
+            REQUIRE_REG_31(ra);
+            {
                 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
-
-                if (islit) {
-                    tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
-                } else {
-                    tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
-                }
+                tcg_gen_andi_i64(vc, vb, ~amask);
             }
             break;
         case 0x64:
             /* CMOVLE */
-            gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
+            tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
+                                vb, load_gpr(ctx, rc));
             break;
         case 0x66:
             /* CMOVGT */
-            gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
+            tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
+                                vb, load_gpr(ctx, rc));
             break;
         case 0x6C:
             /* IMPLVER */
-            if (rc != 31) {
-                tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
-            }
+            REQUIRE_REG_31(ra);
+            tcg_gen_movi_i64(vc, ctx->implver);
             break;
         default:
             goto invalid_opc;
         }
         break;
+
     case 0x12:
+        vc = dest_gpr(ctx, rc);
+        va = load_gpr(ctx, ra);
         switch (fn7) {
         case 0x02:
             /* MSKBL */
-            gen_msk_l(ra, rb, rc, islit, lit, 0x01);
+            gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
             break;
         case 0x06:
             /* EXTBL */
-            gen_ext_l(ra, rb, rc, islit, lit, 0x01);
+            gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
             break;
         case 0x0B:
             /* INSBL */
-            gen_ins_l(ra, rb, rc, islit, lit, 0x01);
+            gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
             break;
         case 0x12:
             /* MSKWL */
-            gen_msk_l(ra, rb, rc, islit, lit, 0x03);
+            gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
             break;
         case 0x16:
             /* EXTWL */
-            gen_ext_l(ra, rb, rc, islit, lit, 0x03);
+            gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
             break;
         case 0x1B:
             /* INSWL */
-            gen_ins_l(ra, rb, rc, islit, lit, 0x03);
+            gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
             break;
         case 0x22:
             /* MSKLL */
-            gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
+            gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
             break;
         case 0x26:
             /* EXTLL */
-            gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
+            gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
             break;
         case 0x2B:
             /* INSLL */
-            gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
+            gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
             break;
         case 0x30:
             /* ZAP */
-            gen_zap(ra, rb, rc, islit, lit);
+            if (islit) {
+                gen_zapnoti(vc, va, ~lit);
+            } else {
+                gen_helper_zap(vc, va, load_gpr(ctx, rb));
+            }
             break;
         case 0x31:
             /* ZAPNOT */
-            gen_zapnot(ra, rb, rc, islit, lit);
+            if (islit) {
+                gen_zapnoti(vc, va, lit);
+            } else {
+                gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
+            }
             break;
         case 0x32:
             /* MSKQL */
-            gen_msk_l(ra, rb, rc, islit, lit, 0xff);
+            gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
             break;
         case 0x34:
             /* SRL */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
-                    else {
-                        TCGv shift = tcg_temp_new();
-                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
-                        tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
-                        tcg_temp_free(shift);
-                    }
-                } else
-                    tcg_gen_movi_i64(cpu_ir[rc], 0);
+            if (islit) {
+                tcg_gen_shri_i64(vc, va, lit & 0x3f);
+            } else {
+                tmp = tcg_temp_new();
+                vb = load_gpr(ctx, rb);
+                tcg_gen_andi_i64(tmp, vb, 0x3f);
+                tcg_gen_shr_i64(vc, va, tmp);
+                tcg_temp_free(tmp);
             }
             break;
         case 0x36:
             /* EXTQL */
-            gen_ext_l(ra, rb, rc, islit, lit, 0xff);
+            gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
             break;
         case 0x39:
             /* SLL */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
-                    else {
-                        TCGv shift = tcg_temp_new();
-                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
-                        tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
-                        tcg_temp_free(shift);
-                    }
-                } else
-                    tcg_gen_movi_i64(cpu_ir[rc], 0);
+            if (islit) {
+                tcg_gen_shli_i64(vc, va, lit & 0x3f);
+            } else {
+                tmp = tcg_temp_new();
+                vb = load_gpr(ctx, rb);
+                tcg_gen_andi_i64(tmp, vb, 0x3f);
+                tcg_gen_shl_i64(vc, va, tmp);
+                tcg_temp_free(tmp);
             }
             break;
         case 0x3B:
             /* INSQL */
-            gen_ins_l(ra, rb, rc, islit, lit, 0xff);
+            gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
             break;
         case 0x3C:
             /* SRA */
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    if (islit)
-                        tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
-                    else {
-                        TCGv shift = tcg_temp_new();
-                        tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
-                        tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
-                        tcg_temp_free(shift);
-                    }
-                } else
-                    tcg_gen_movi_i64(cpu_ir[rc], 0);
+            if (islit) {
+                tcg_gen_sari_i64(vc, va, lit & 0x3f);
+            } else {
+                tmp = tcg_temp_new();
+                vb = load_gpr(ctx, rb);
+                tcg_gen_andi_i64(tmp, vb, 0x3f);
+                tcg_gen_sar_i64(vc, va, tmp);
+                tcg_temp_free(tmp);
             }
             break;
         case 0x52:
             /* MSKWH */
-            gen_msk_h(ra, rb, rc, islit, lit, 0x03);
+            gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
             break;
         case 0x57:
             /* INSWH */
-            gen_ins_h(ra, rb, rc, islit, lit, 0x03);
+            gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
             break;
         case 0x5A:
             /* EXTWH */
-            gen_ext_h(ra, rb, rc, islit, lit, 0x03);
+            gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
             break;
         case 0x62:
             /* MSKLH */
-            gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
+            gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
             break;
         case 0x67:
             /* INSLH */
-            gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
+            gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
             break;
         case 0x6A:
             /* EXTLH */
-            gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
+            gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
             break;
         case 0x72:
             /* MSKQH */
-            gen_msk_h(ra, rb, rc, islit, lit, 0xff);
+            gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
             break;
         case 0x77:
             /* INSQH */
-            gen_ins_h(ra, rb, rc, islit, lit, 0xff);
+            gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
             break;
         case 0x7A:
             /* EXTQH */
-            gen_ext_h(ra, rb, rc, islit, lit, 0xff);
+            gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
             break;
         default:
             goto invalid_opc;
         }
         break;
+
     case 0x13:
+        vc = dest_gpr(ctx, rc);
+        vb = load_gpr_lit(ctx, rb, lit, islit);
+        va = load_gpr(ctx, ra);
         switch (fn7) {
         case 0x00:
             /* MULL */
-            if (likely(rc != 31)) {
-                if (ra == 31)
-                    tcg_gen_movi_i64(cpu_ir[rc], 0);
-                else {
-                    if (islit)
-                        tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
-                    else
-                        tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                    tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
-                }
-            }
+            tcg_gen_mul_i64(vc, va, vb);
+            tcg_gen_ext32s_i64(vc, vc);
             break;
         case 0x20:
             /* MULQ */
-            if (likely(rc != 31)) {
-                if (ra == 31)
-                    tcg_gen_movi_i64(cpu_ir[rc], 0);
-                else if (islit)
-                    tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
-                else
-                    tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-            }
+            tcg_gen_mul_i64(vc, va, vb);
             break;
         case 0x30:
             /* UMULH */
-            {
-                TCGv low;
-                if (unlikely(rc == 31)){
-                    break;
-                }
-                if (ra == 31) {
-                    tcg_gen_movi_i64(cpu_ir[rc], 0);
-                    break;
-                }
-                low = tcg_temp_new();
-                if (islit) {
-                    tcg_gen_movi_tl(low, lit);
-                    tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
-                } else {
-                    tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-                }
-                tcg_temp_free(low);
-            }
+            tmp = tcg_temp_new();
+            tcg_gen_mulu2_i64(tmp, vc, va, vb);
+            tcg_temp_free(tmp);
             break;
         case 0x40:
             /* MULL/V */
-            gen_mullv(ra, rb, rc, islit, lit);
+            gen_helper_mullv(vc, cpu_env, va, vb);
             break;
         case 0x60:
             /* MULQ/V */
-            gen_mulqv(ra, rb, rc, islit, lit);
+            gen_helper_mulqv(vc, cpu_env, va, vb);
             break;
         default:
             goto invalid_opc;
         }
         break;
+
     case 0x14:
+        REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
+        vc = dest_fpr(ctx, rc);
         switch (fpfn) { /* fn11 & 0x3F */
         case 0x04:
             /* ITOFS */
-            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
-                goto invalid_opc;
-            }
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    TCGv_i32 tmp = tcg_temp_new_i32();
-                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
-                    gen_helper_memory_to_s(cpu_fir[rc], tmp);
-                    tcg_temp_free_i32(tmp);
-                } else
-                    tcg_gen_movi_i64(cpu_fir[rc], 0);
-            }
+            REQUIRE_REG_31(rb);
+            t32 = tcg_temp_new_i32();
+            va = load_gpr(ctx, ra);
+            tcg_gen_trunc_i64_i32(t32, va);
+            gen_helper_memory_to_s(vc, t32);
+            tcg_temp_free_i32(t32);
             break;
         case 0x0A:
             /* SQRTF */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
-                gen_fsqrtf(rb, rc);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_REG_31(ra);
+            vb = load_fpr(ctx, rb);
+            gen_helper_sqrtf(vc, cpu_env, vb);
+            break;
         case 0x0B:
             /* SQRTS */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
-                gen_fsqrts(ctx, rb, rc, fn11);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_REG_31(ra);
+            gen_fsqrts(ctx, rb, rc, fn11);
+            break;
         case 0x14:
             /* ITOFF */
-            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
-                goto invalid_opc;
-            }
-            if (likely(rc != 31)) {
-                if (ra != 31) {
-                    TCGv_i32 tmp = tcg_temp_new_i32();
-                    tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
-                    gen_helper_memory_to_f(cpu_fir[rc], tmp);
-                    tcg_temp_free_i32(tmp);
-                } else
-                    tcg_gen_movi_i64(cpu_fir[rc], 0);
-            }
+            REQUIRE_REG_31(rb);
+            t32 = tcg_temp_new_i32();
+            va = load_gpr(ctx, ra);
+            tcg_gen_trunc_i64_i32(t32, va);
+            gen_helper_memory_to_f(vc, t32);
+            tcg_temp_free_i32(t32);
             break;
         case 0x24:
             /* ITOFT */
-            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
-                goto invalid_opc;
-            }
-            if (likely(rc != 31)) {
-                if (ra != 31)
-                    tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
-                else
-                    tcg_gen_movi_i64(cpu_fir[rc], 0);
-            }
+            REQUIRE_REG_31(rb);
+            va = load_gpr(ctx, ra);
+            tcg_gen_mov_i64(vc, va);
             break;
         case 0x2A:
             /* SQRTG */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
-                gen_fsqrtg(rb, rc);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_REG_31(ra);
+            vb = load_fpr(ctx, rb);
+            gen_helper_sqrtg(vc, cpu_env, vb);
+            break;
         case 0x02B:
             /* SQRTT */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
-                gen_fsqrtt(ctx, rb, rc, fn11);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_REG_31(ra);
+            gen_fsqrtt(ctx, rb, rc, fn11);
+            break;
         default:
             goto invalid_opc;
         }
         break;
+
     case 0x15:
         /* VAX floating point */
         /* XXX: rounding mode and trap are ignored (!) */
+        vc = dest_fpr(ctx, rc);
+        vb = load_fpr(ctx, rb);
+        va = load_fpr(ctx, ra);
         switch (fpfn) { /* fn11 & 0x3F */
         case 0x00:
             /* ADDF */
-            gen_faddf(ra, rb, rc);
+            gen_helper_addf(vc, cpu_env, va, vb);
             break;
         case 0x01:
             /* SUBF */
-            gen_fsubf(ra, rb, rc);
+            gen_helper_subf(vc, cpu_env, va, vb);
             break;
         case 0x02:
             /* MULF */
-            gen_fmulf(ra, rb, rc);
+            gen_helper_mulf(vc, cpu_env, va, vb);
             break;
         case 0x03:
             /* DIVF */
-            gen_fdivf(ra, rb, rc);
+            gen_helper_divf(vc, cpu_env, va, vb);
             break;
         case 0x1E:
-            /* CVTDG */
-#if 0 // TODO
-            gen_fcvtdg(rb, rc);
-#else
+            /* CVTDG -- TODO */
+            REQUIRE_REG_31(ra);
             goto invalid_opc;
-#endif
-            break;
         case 0x20:
             /* ADDG */
-            gen_faddg(ra, rb, rc);
+            gen_helper_addg(vc, cpu_env, va, vb);
             break;
         case 0x21:
             /* SUBG */
-            gen_fsubg(ra, rb, rc);
+            gen_helper_subg(vc, cpu_env, va, vb);
             break;
         case 0x22:
             /* MULG */
-            gen_fmulg(ra, rb, rc);
+            gen_helper_mulg(vc, cpu_env, va, vb);
             break;
         case 0x23:
             /* DIVG */
-            gen_fdivg(ra, rb, rc);
+            gen_helper_divg(vc, cpu_env, va, vb);
             break;
         case 0x25:
             /* CMPGEQ */
-            gen_fcmpgeq(ra, rb, rc);
+            gen_helper_cmpgeq(vc, cpu_env, va, vb);
             break;
         case 0x26:
             /* CMPGLT */
-            gen_fcmpglt(ra, rb, rc);
+            gen_helper_cmpglt(vc, cpu_env, va, vb);
             break;
         case 0x27:
             /* CMPGLE */
-            gen_fcmpgle(ra, rb, rc);
+            gen_helper_cmpgle(vc, cpu_env, va, vb);
             break;
         case 0x2C:
             /* CVTGF */
-            gen_fcvtgf(rb, rc);
+            REQUIRE_REG_31(ra);
+            gen_helper_cvtgf(vc, cpu_env, vb);
             break;
         case 0x2D:
-            /* CVTGD */
-#if 0 // TODO
-            gen_fcvtgd(rb, rc);
-#else
+            /* CVTGD -- TODO */
+            REQUIRE_REG_31(ra);
             goto invalid_opc;
-#endif
-            break;
         case 0x2F:
             /* CVTGQ */
-            gen_fcvtgq(rb, rc);
+            REQUIRE_REG_31(ra);
+            gen_helper_cvtgq(vc, cpu_env, vb);
             break;
         case 0x3C:
             /* CVTQF */
-            gen_fcvtqf(rb, rc);
+            REQUIRE_REG_31(ra);
+            gen_helper_cvtqf(vc, cpu_env, vb);
             break;
         case 0x3E:
             /* CVTQG */
-            gen_fcvtqg(rb, rc);
+            REQUIRE_REG_31(ra);
+            gen_helper_cvtqg(vc, cpu_env, vb);
             break;
         default:
             goto invalid_opc;
         }
         break;
+
     case 0x16:
         /* IEEE floating-point */
         switch (fpfn) { /* fn11 & 0x3F */
@@ -2708,6 +2124,7 @@ static ExitStatus translate_one(DisasContext *ctx, 
uint32_t insn)
             gen_fcmptle(ctx, ra, rb, rc, fn11);
             break;
         case 0x2C:
+            REQUIRE_REG_31(ra);
             if (fn11 == 0x2AC || fn11 == 0x6AC) {
                 /* CVTST */
                 gen_fcvtst(ctx, rb, rc, fn11);
@@ -2718,104 +2135,122 @@ static ExitStatus translate_one(DisasContext *ctx, 
uint32_t insn)
             break;
         case 0x2F:
             /* CVTTQ */
+            REQUIRE_REG_31(ra);
             gen_fcvttq(ctx, rb, rc, fn11);
             break;
         case 0x3C:
             /* CVTQS */
+            REQUIRE_REG_31(ra);
             gen_fcvtqs(ctx, rb, rc, fn11);
             break;
         case 0x3E:
             /* CVTQT */
+            REQUIRE_REG_31(ra);
             gen_fcvtqt(ctx, rb, rc, fn11);
             break;
         default:
             goto invalid_opc;
         }
         break;
+
     case 0x17:
         switch (fn11) {
         case 0x010:
             /* CVTLQ */
-            gen_fcvtlq(rb, rc);
+            REQUIRE_REG_31(ra);
+            vc = dest_fpr(ctx, rc);
+            vb = load_fpr(ctx, rb);
+            gen_fcvtlq(vc, vb);
             break;
         case 0x020:
-            if (likely(rc != 31)) {
+            /* CPYS */
+            if (rc == 31) {
+                /* Special case CPYS as FNOP.  */
+            } else {
+                vc = dest_fpr(ctx, rc);
+                va = load_fpr(ctx, ra);
                 if (ra == rb) {
-                    /* FMOV */
-                    if (ra == 31)
-                        tcg_gen_movi_i64(cpu_fir[rc], 0);
-                    else
-                        tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
+                    /* Special case CPYS as FMOV.  */
+                    tcg_gen_mov_i64(vc, va);
                 } else {
-                    /* CPYS */
-                    gen_fcpys(ra, rb, rc);
+                    vb = load_fpr(ctx, rb);
+                    gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
                 }
             }
             break;
         case 0x021:
             /* CPYSN */
-            gen_fcpysn(ra, rb, rc);
+            vc = dest_fpr(ctx, rc);
+            vb = load_fpr(ctx, rb);
+            va = load_fpr(ctx, ra);
+            gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
             break;
         case 0x022:
             /* CPYSE */
-            gen_fcpyse(ra, rb, rc);
+            vc = dest_fpr(ctx, rc);
+            vb = load_fpr(ctx, rb);
+            va = load_fpr(ctx, ra);
+            gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
             break;
         case 0x024:
             /* MT_FPCR */
-            if (likely(ra != 31))
-                gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
-            else {
-                TCGv tmp = tcg_const_i64(0);
-                gen_helper_store_fpcr(cpu_env, tmp);
-                tcg_temp_free(tmp);
-            }
+            va = load_fpr(ctx, ra);
+            gen_helper_store_fpcr(cpu_env, va);
             break;
         case 0x025:
             /* MF_FPCR */
-            if (likely(ra != 31))
-                gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
+            va = dest_fpr(ctx, ra);
+            gen_helper_load_fpcr(va, cpu_env);
             break;
         case 0x02A:
             /* FCMOVEQ */
-            gen_fcmov(TCG_COND_EQ, ra, rb, rc);
+            gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
             break;
         case 0x02B:
             /* FCMOVNE */
-            gen_fcmov(TCG_COND_NE, ra, rb, rc);
+            gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
             break;
         case 0x02C:
             /* FCMOVLT */
-            gen_fcmov(TCG_COND_LT, ra, rb, rc);
+            gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
             break;
         case 0x02D:
             /* FCMOVGE */
-            gen_fcmov(TCG_COND_GE, ra, rb, rc);
+            gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
             break;
         case 0x02E:
             /* FCMOVLE */
-            gen_fcmov(TCG_COND_LE, ra, rb, rc);
+            gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
             break;
         case 0x02F:
             /* FCMOVGT */
-            gen_fcmov(TCG_COND_GT, ra, rb, rc);
+            gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
             break;
         case 0x030:
             /* CVTQL */
-            gen_fcvtql(rb, rc);
+            REQUIRE_REG_31(ra);
+            vc = dest_fpr(ctx, rc);
+            vb = load_fpr(ctx, rb);
+            gen_fcvtql(vc, vb);
             break;
         case 0x130:
             /* CVTQL/V */
         case 0x530:
             /* CVTQL/SV */
+            REQUIRE_REG_31(ra);
             /* ??? I'm pretty sure there's nothing that /sv needs to do that
                /v doesn't do.  The only thing I can think is that /sv is a
                valid instruction merely for completeness in the ISA.  */
-            gen_fcvtql_v(ctx, rb, rc);
+            vc = dest_fpr(ctx, rc);
+            vb = load_fpr(ctx, rb);
+            gen_helper_fcvtql_v_input(cpu_env, vb);
+            gen_fcvtql(vc, vb);
             break;
         default:
             goto invalid_opc;
         }
         break;
+
     case 0x18:
         switch ((uint16_t)disp16) {
         case 0x0000:
@@ -2844,15 +2279,14 @@ static ExitStatus translate_one(DisasContext *ctx, 
uint32_t insn)
             break;
         case 0xC000:
             /* RPCC */
-            if (ra != 31) {
-                if (use_icount) {
-                    gen_io_start();
-                    gen_helper_load_pcc(cpu_ir[ra], cpu_env);
-                    gen_io_end();
-                    ret = EXIT_PC_STALE;
-                } else {
-                    gen_helper_load_pcc(cpu_ir[ra], cpu_env);
-                }
+            va = dest_gpr(ctx, ra);
+            if (use_icount) {
+                gen_io_start();
+                gen_helper_load_pcc(va, cpu_env);
+                gen_io_end();
+                ret = EXIT_PC_STALE;
+            } else {
+                gen_helper_load_pcc(va, cpu_env);
             }
             break;
         case 0xE000:
@@ -2874,58 +2308,55 @@ static ExitStatus translate_one(DisasContext *ctx, 
uint32_t insn)
             goto invalid_opc;
         }
         break;
+
     case 0x19:
         /* HW_MFPR (PALcode) */
 #ifndef CONFIG_USER_ONLY
-        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
-            return gen_mfpr(ra, insn & 0xffff);
-        }
-#endif
+        REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+        va = dest_gpr(ctx, ra);
+        ret = gen_mfpr(va, insn & 0xffff);
+        break;
+#else
         goto invalid_opc;
+#endif
+
     case 0x1A:
         /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
            prediction stack action, which of course we don't implement.  */
-        if (rb != 31) {
-            tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
-        } else {
-            tcg_gen_movi_i64(cpu_pc, 0);
-        }
+        vb = load_gpr(ctx, rb);
+        tcg_gen_andi_i64(cpu_pc, vb, ~3);
         if (ra != 31) {
             tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
         }
         ret = EXIT_PC_UPDATED;
         break;
+
     case 0x1B:
         /* HW_LD (PALcode) */
 #ifndef CONFIG_USER_ONLY
-        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
-            TCGv addr;
-
-            if (ra == 31) {
-                break;
-            }
+        REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+        {
+            TCGv addr = tcg_temp_new();
+            vb = load_gpr(ctx, rb);
+            va = dest_gpr(ctx, ra);
 
-            addr = tcg_temp_new();
-            if (rb != 31)
-                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
-            else
-                tcg_gen_movi_i64(addr, disp12);
+            tcg_gen_addi_i64(addr, vb, disp12);
             switch ((insn >> 12) & 0xF) {
             case 0x0:
                 /* Longword physical access (hw_ldl/p) */
-                gen_helper_ldl_phys(cpu_ir[ra], cpu_env, addr);
+                gen_helper_ldl_phys(va, cpu_env, addr);
                 break;
             case 0x1:
                 /* Quadword physical access (hw_ldq/p) */
-                gen_helper_ldq_phys(cpu_ir[ra], cpu_env, addr);
+                gen_helper_ldq_phys(va, cpu_env, addr);
                 break;
             case 0x2:
                 /* Longword physical access with lock (hw_ldl_l/p) */
-                gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
+                gen_helper_ldl_l_phys(va, cpu_env, addr);
                 break;
             case 0x3:
                 /* Quadword physical access with lock (hw_ldq_l/p) */
-                gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
+                gen_helper_ldq_l_phys(va, cpu_env, addr);
                 break;
             case 0x4:
                 /* Longword virtual PTE fetch (hw_ldl/v) */
@@ -2948,11 +2379,11 @@ static ExitStatus translate_one(DisasContext *ctx, 
uint32_t insn)
                 goto invalid_opc;
             case 0xA:
                 /* Longword virtual access with protection check (hw_ldl/w) */
-                tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LESL);
+                tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
                 break;
             case 0xB:
                 /* Quadword virtual access with protection check (hw_ldq/w) */
-                tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LEQ);
+                tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
                 break;
             case 0xC:
                 /* Longword virtual access with alt access mode (hw_ldl/a)*/
@@ -2963,282 +2394,215 @@ static ExitStatus translate_one(DisasContext *ctx, 
uint32_t insn)
             case 0xE:
                 /* Longword virtual access with alternate access mode and
                    protection checks (hw_ldl/wa) */
-                tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LESL);
+                tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
                 break;
             case 0xF:
                 /* Quadword virtual access with alternate access mode and
                    protection checks (hw_ldq/wa) */
-                tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LEQ);
+                tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
                 break;
             }
             tcg_temp_free(addr);
             break;
         }
-#endif
+#else
         goto invalid_opc;
+#endif
+
     case 0x1C:
+        vc = dest_gpr(ctx, rc);
+        if (fn7 == 0x70) {
+            /* FTOIT */
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
+            REQUIRE_REG_31(rb);
+            va = load_fpr(ctx, ra);
+            tcg_gen_mov_i64(vc, va);
+            break;
+        } else if (fn7 == 0x78) {
+            /* FTOIS */
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
+            REQUIRE_REG_31(rb);
+            t32 = tcg_temp_new_i32();
+            va = load_fpr(ctx, ra);
+            gen_helper_s_to_memory(t32, va);
+            tcg_gen_ext_i32_i64(vc, t32);
+            tcg_temp_free_i32(t32);
+            break;
+        }
+
+        vb = load_gpr_lit(ctx, rb, lit, islit);
         switch (fn7) {
         case 0x00:
             /* SEXTB */
-            if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
-                goto invalid_opc;
-            }
-            if (likely(rc != 31)) {
-                if (islit)
-                    tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
-                else
-                    tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
-            }
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+            REQUIRE_REG_31(ra);
+            tcg_gen_ext8s_i64(vc, vb);
             break;
         case 0x01:
             /* SEXTW */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
-                if (likely(rc != 31)) {
-                    if (islit) {
-                        tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
-                    } else {
-                        tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
-                    }
-                }
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+            REQUIRE_REG_31(ra);
+            tcg_gen_ext16s_i64(vc, vb);
+            break;
         case 0x30:
             /* CTPOP */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
-                if (likely(rc != 31)) {
-                    if (islit) {
-                        tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
-                    } else {
-                        gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
-                    }
-                }
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
+            REQUIRE_REG_31(ra);
+            gen_helper_ctpop(vc, vb);
+            break;
         case 0x31:
             /* PERR */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                gen_perr(ra, rb, rc, islit, lit);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            va = load_gpr(ctx, ra);
+            gen_helper_perr(vc, va, vb);
+            break;
         case 0x32:
             /* CTLZ */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
-                if (likely(rc != 31)) {
-                    if (islit) {
-                        tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
-                    } else {
-                        gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
-                    }
-                }
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
+            REQUIRE_REG_31(ra);
+            gen_helper_ctlz(vc, vb);
+            break;
         case 0x33:
             /* CTTZ */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
-                if (likely(rc != 31)) {
-                    if (islit) {
-                        tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
-                    } else {
-                        gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
-                    }
-                }
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
+            REQUIRE_REG_31(ra);
+            gen_helper_cttz(vc, vb);
+            break;
         case 0x34:
             /* UNPKBW */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                if (real_islit || ra != 31) {
-                    goto invalid_opc;
-                }
-                gen_unpkbw(rb, rc);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            REQUIRE_REG_31(ra);
+            gen_helper_unpkbw(vc, vb);
+            break;
         case 0x35:
             /* UNPKBL */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                if (real_islit || ra != 31) {
-                    goto invalid_opc;
-                }
-                gen_unpkbl(rb, rc);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            REQUIRE_REG_31(ra);
+            gen_helper_unpkbl(vc, vb);
+            break;
         case 0x36:
             /* PKWB */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                if (real_islit || ra != 31) {
-                    goto invalid_opc;
-                }
-                gen_pkwb(rb, rc);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            REQUIRE_REG_31(ra);
+            gen_helper_pkwb(vc, vb);
+            break;
         case 0x37:
             /* PKLB */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                if (real_islit || ra != 31) {
-                    goto invalid_opc;
-                }
-                gen_pklb(rb, rc);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            REQUIRE_REG_31(ra);
+            gen_helper_pklb(vc, vb);
+            break;
         case 0x38:
             /* MINSB8 */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                gen_minsb8(ra, rb, rc, islit, lit);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            va = load_gpr(ctx, ra);
+            gen_helper_minsb8(vc, va, vb);
+            break;
         case 0x39:
             /* MINSW4 */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                gen_minsw4(ra, rb, rc, islit, lit);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            va = load_gpr(ctx, ra);
+            gen_helper_minsw4(vc, va, vb);
+            break;
         case 0x3A:
             /* MINUB8 */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                gen_minub8(ra, rb, rc, islit, lit);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            va = load_gpr(ctx, ra);
+            gen_helper_minub8(vc, va, vb);
+            break;
         case 0x3B:
             /* MINUW4 */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                gen_minuw4(ra, rb, rc, islit, lit);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            va = load_gpr(ctx, ra);
+            gen_helper_minuw4(vc, va, vb);
+            break;
         case 0x3C:
             /* MAXUB8 */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                gen_maxub8(ra, rb, rc, islit, lit);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            va = load_gpr(ctx, ra);
+            gen_helper_maxub8(vc, va, vb);
+            break;
         case 0x3D:
             /* MAXUW4 */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                gen_maxuw4(ra, rb, rc, islit, lit);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            va = load_gpr(ctx, ra);
+            gen_helper_maxuw4(vc, va, vb);
+            break;
         case 0x3E:
             /* MAXSB8 */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                gen_maxsb8(ra, rb, rc, islit, lit);
-                break;
-            }
-            goto invalid_opc;
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            va = load_gpr(ctx, ra);
+            gen_helper_maxsb8(vc, va, vb);
+            break;
         case 0x3F:
             /* MAXSW4 */
-            if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
-                gen_maxsw4(ra, rb, rc, islit, lit);
-                break;
-            }
-            goto invalid_opc;
-        case 0x70:
-            /* FTOIT */
-            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
-                goto invalid_opc;
-            }
-            if (likely(rc != 31)) {
-                if (ra != 31)
-                    tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
-                else
-                    tcg_gen_movi_i64(cpu_ir[rc], 0);
-            }
-            break;
-        case 0x78:
-            /* FTOIS */
-            if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
-                goto invalid_opc;
-            }
-            if (rc != 31) {
-                TCGv_i32 tmp1 = tcg_temp_new_i32();
-                if (ra != 31)
-                    gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
-                else {
-                    TCGv tmp2 = tcg_const_i64(0);
-                    gen_helper_s_to_memory(tmp1, tmp2);
-                    tcg_temp_free(tmp2);
-                }
-                tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
-                tcg_temp_free_i32(tmp1);
-            }
+            REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+            va = load_gpr(ctx, ra);
+            gen_helper_maxsw4(vc, va, vb);
             break;
         default:
             goto invalid_opc;
         }
         break;
+
     case 0x1D:
         /* HW_MTPR (PALcode) */
 #ifndef CONFIG_USER_ONLY
-        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
-            return gen_mtpr(ctx, rb, insn & 0xffff);
-        }
-#endif
+        REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+        vb = load_gpr(ctx, rb);
+        ret = gen_mtpr(ctx, vb, insn & 0xffff);
+        break;
+#else
         goto invalid_opc;
+#endif
+
     case 0x1E:
         /* HW_RET (PALcode) */
 #ifndef CONFIG_USER_ONLY
-        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
-            if (rb == 31) {
-                /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
-                   address from EXC_ADDR.  This turns out to be useful for our
-                   emulation PALcode, so continue to accept it.  */
-                TCGv tmp = tcg_temp_new();
-                tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, 
exc_addr));
-                gen_helper_hw_ret(cpu_env, tmp);
-                tcg_temp_free(tmp);
-            } else {
-                gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
-            }
-            ret = EXIT_PC_UPDATED;
-            break;
+        REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+        if (rb == 31) {
+            /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
+               address from EXC_ADDR.  This turns out to be useful for our
+               emulation PALcode, so continue to accept it.  */
+            tmp = tcg_temp_new();
+            tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
+            gen_helper_hw_ret(cpu_env, tmp);
+            tcg_temp_free(tmp);
+        } else {
+            gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
         }
-#endif
+        ret = EXIT_PC_UPDATED;
+        break;
+#else
         goto invalid_opc;
+#endif
+
     case 0x1F:
         /* HW_ST (PALcode) */
 #ifndef CONFIG_USER_ONLY
-        if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
-            TCGv addr, val;
-            addr = tcg_temp_new();
-            if (rb != 31)
-                tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
-            else
-                tcg_gen_movi_i64(addr, disp12);
-            if (ra != 31)
-                val = cpu_ir[ra];
-            else {
-                val = tcg_temp_new();
-                tcg_gen_movi_i64(val, 0);
-            }
+        REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+        {
+            TCGv addr = tcg_temp_new();
+            va = load_gpr(ctx, ra);
+            vb = load_gpr(ctx, rb);
+
+            tcg_gen_addi_i64(addr, vb, disp12);
             switch ((insn >> 12) & 0xF) {
             case 0x0:
                 /* Longword physical access */
-                gen_helper_stl_phys(cpu_env, addr, val);
+                gen_helper_stl_phys(cpu_env, addr, va);
                 break;
             case 0x1:
                 /* Quadword physical access */
-                gen_helper_stq_phys(cpu_env, addr, val);
+                gen_helper_stq_phys(cpu_env, addr, va);
                 break;
             case 0x2:
                 /* Longword physical access with lock */
-                gen_helper_stl_c_phys(val, cpu_env, addr, val);
+                gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
                 break;
             case 0x3:
                 /* Quadword physical access with lock */
-                gen_helper_stq_c_phys(val, cpu_env, addr, val);
+                gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
                 break;
             case 0x4:
                 /* Longword virtual access */
@@ -3277,13 +2641,12 @@ static ExitStatus translate_one(DisasContext *ctx, 
uint32_t insn)
                 /* Invalid */
                 goto invalid_opc;
             }
-            if (ra == 31)
-                tcg_temp_free(val);
             tcg_temp_free(addr);
             break;
         }
-#endif
+#else
         goto invalid_opc;
+#endif
     case 0x20:
         /* LDF */
         gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
@@ -3483,8 +2846,9 @@ static inline void 
gen_intermediate_code_internal(AlphaCPU *cpu,
             tcg_ctx.gen_opc_instr_start[lj] = 1;
             tcg_ctx.gen_opc_icount[lj] = num_insns;
         }
-        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
             gen_io_start();
+        }
         insn = cpu_ldl_code(env, ctx.pc);
         num_insns++;
 
@@ -3492,9 +2856,24 @@ static inline void 
gen_intermediate_code_internal(AlphaCPU *cpu,
             tcg_gen_debug_insn_start(ctx.pc);
         }
 
+        TCGV_UNUSED_I64(ctx.zero);
+        TCGV_UNUSED_I64(ctx.sink);
+        TCGV_UNUSED_I64(ctx.lit);
+
         ctx.pc += 4;
         ret = translate_one(ctxp, insn);
 
+        if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
+            tcg_gen_discard_i64(ctx.sink);
+            tcg_temp_free(ctx.sink);
+        }
+        if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
+            tcg_temp_free(ctx.zero);
+        }
+        if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
+            tcg_temp_free(ctx.lit);
+        }
+
         /* If we reach a page boundary, are single stepping,
            or exhaust instruction count, stop generation.  */
         if (ret == NO_EXIT
-- 
1.7.10.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]