qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [5804] target-ppc: convert SPE load/store to TCG


From: Aurelien Jarno
Subject: [Qemu-devel] [5804] target-ppc: convert SPE load/store to TCG
Date: Thu, 27 Nov 2008 19:30:48 +0000

Revision: 5804
          http://svn.sv.gnu.org/viewvc/?view=rev&root=qemu&revision=5804
Author:   aurel32
Date:     2008-11-27 19:30:47 +0000 (Thu, 27 Nov 2008)

Log Message:
-----------
target-ppc: convert SPE load/store to TCG

Signed-off-by: Aurelien Jarno <address@hidden>

Modified Paths:
--------------
    trunk/target-ppc/cpu.h
    trunk/target-ppc/exec.h
    trunk/target-ppc/op.c
    trunk/target-ppc/op_mem.h
    trunk/target-ppc/translate.c

Modified: trunk/target-ppc/cpu.h
===================================================================
--- trunk/target-ppc/cpu.h      2008-11-27 19:30:38 UTC (rev 5803)
+++ trunk/target-ppc/cpu.h      2008-11-27 19:30:47 UTC (rev 5804)
@@ -536,12 +536,6 @@
 #if (TARGET_LONG_BITS > HOST_LONG_BITS) || defined(HOST_I386)
     target_ulong t2;
 #endif
-#if !defined(TARGET_PPC64)
-    /* temporary fixed-point registers
-     * used to emulate 64 bits registers on 32 bits targets
-     */
-    uint64_t t0_64, t1_64, t2_64;
-#endif
 
     /* general purpose registers */
     target_ulong gpr[32];

Modified: trunk/target-ppc/exec.h
===================================================================
--- trunk/target-ppc/exec.h     2008-11-27 19:30:38 UTC (rev 5803)
+++ trunk/target-ppc/exec.h     2008-11-27 19:30:47 UTC (rev 5804)
@@ -44,16 +44,6 @@
 register target_ulong T2 asm(AREG3);
 #define TDX "%016lx"
 #endif
-/* We may, sometime, need 64 bits registers on 32 bits targets */
-#if !defined(TARGET_PPC64)
-#define T0_64 (env->t0_64)
-#define T1_64 (env->t1_64)
-#define T2_64 (env->t2_64)
-#else
-#define T0_64 T0
-#define T1_64 T1
-#define T2_64 T2
-#endif
 
 #define FT0 (env->ft0)
 #define FT1 (env->ft1)

Modified: trunk/target-ppc/op.c
===================================================================
--- trunk/target-ppc/op.c       2008-11-27 19:30:38 UTC (rev 5803)
+++ trunk/target-ppc/op.c       2008-11-27 19:30:47 UTC (rev 5804)
@@ -914,35 +914,3 @@
 }
 #endif /* !defined(CONFIG_USER_ONLY) */
 
-/* SPE extension */
-void OPPROTO op_splatw_T1_64 (void)
-{
-    T1_64 = (T1_64 << 32) | (T1_64 & 0x00000000FFFFFFFFULL);
-    RETURN();
-}
-
-void OPPROTO op_extsh_T1_64 (void)
-{
-    T1_64 = (int32_t)((int16_t)T1_64);
-    RETURN();
-}
-
-void OPPROTO op_sli16_T1_64 (void)
-{
-    T1_64 = T1_64 << 16;
-    RETURN();
-}
-
-void OPPROTO op_sli32_T1_64 (void)
-{
-    T1_64 = T1_64 << 32;
-    RETURN();
-}
-
-void OPPROTO op_srli32_T1_64 (void)
-{
-    T1_64 = T1_64 >> 32;
-    RETURN();
-}
-
-

Modified: trunk/target-ppc/op_mem.h
===================================================================
--- trunk/target-ppc/op_mem.h   2008-11-27 19:30:38 UTC (rev 5803)
+++ trunk/target-ppc/op_mem.h   2008-11-27 19:30:47 UTC (rev 5804)
@@ -642,262 +642,4 @@
     RETURN();
 }
 
-/* SPE extension */
-#define _PPC_SPE_LD_OP(name, op)                                              \
-void OPPROTO glue(glue(op_spe_l, name), MEMSUFFIX) (void)                     \
-{                                                                             \
-    T1_64 = glue(op, MEMSUFFIX)((uint32_t)T0);                                \
-    RETURN();                                                                 \
-}
-
-#if defined(TARGET_PPC64)
-#define _PPC_SPE_LD_OP_64(name, op)                                           \
-void OPPROTO glue(glue(glue(op_spe_l, name), _64), MEMSUFFIX) (void)          \
-{                                                                             \
-    T1_64 = glue(op, MEMSUFFIX)((uint64_t)T0);                                \
-    RETURN();                                                                 \
-}
-#define PPC_SPE_LD_OP(name, op)                                               \
-_PPC_SPE_LD_OP(name, op);                                                     \
-_PPC_SPE_LD_OP_64(name, op)
-#else
-#define PPC_SPE_LD_OP(name, op)                                               \
-_PPC_SPE_LD_OP(name, op)
-#endif
-
-#define _PPC_SPE_ST_OP(name, op)                                              \
-void OPPROTO glue(glue(op_spe_st, name), MEMSUFFIX) (void)                    \
-{                                                                             \
-    glue(op, MEMSUFFIX)((uint32_t)T0, T1_64);                                 \
-    RETURN();                                                                 \
-}
-
-#if defined(TARGET_PPC64)
-#define _PPC_SPE_ST_OP_64(name, op)                                           \
-void OPPROTO glue(glue(glue(op_spe_st, name), _64), MEMSUFFIX) (void)         \
-{                                                                             \
-    glue(op, MEMSUFFIX)((uint64_t)T0, T1_64);                                 \
-    RETURN();                                                                 \
-}
-#define PPC_SPE_ST_OP(name, op)                                               \
-_PPC_SPE_ST_OP(name, op);                                                     \
-_PPC_SPE_ST_OP_64(name, op)
-#else
-#define PPC_SPE_ST_OP(name, op)                                               \
-_PPC_SPE_ST_OP(name, op)
-#endif
-
-PPC_SPE_LD_OP(dd, ldu64);
-PPC_SPE_ST_OP(dd, st64);
-PPC_SPE_LD_OP(dd_le, ldu64r);
-PPC_SPE_ST_OP(dd_le, st64r);
-static always_inline uint64_t glue(spe_ldw, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    ret = (uint64_t)glue(ldu32, MEMSUFFIX)(EA) << 32;
-    ret |= (uint64_t)glue(ldu32, MEMSUFFIX)(EA + 4);
-    return ret;
-}
-PPC_SPE_LD_OP(dw, spe_ldw);
-static always_inline void glue(spe_stdw, MEMSUFFIX) (target_ulong EA,
-                                                     uint64_t data)
-{
-    glue(st32, MEMSUFFIX)(EA, data >> 32);
-    glue(st32, MEMSUFFIX)(EA + 4, data);
-}
-PPC_SPE_ST_OP(dw, spe_stdw);
-static always_inline uint64_t glue(spe_ldw_le, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    ret = (uint64_t)glue(ldu32r, MEMSUFFIX)(EA) << 32;
-    ret |= (uint64_t)glue(ldu32r, MEMSUFFIX)(EA + 4);
-    return ret;
-}
-PPC_SPE_LD_OP(dw_le, spe_ldw_le);
-static always_inline void glue(spe_stdw_le, MEMSUFFIX) (target_ulong EA,
-                                                        uint64_t data)
-{
-    glue(st32r, MEMSUFFIX)(EA, data >> 32);
-    glue(st32r, MEMSUFFIX)(EA + 4, data);
-}
-PPC_SPE_ST_OP(dw_le, spe_stdw_le);
-static always_inline uint64_t glue(spe_ldh, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    ret = (uint64_t)glue(ldu16, MEMSUFFIX)(EA) << 48;
-    ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 2) << 32;
-    ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 4) << 16;
-    ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 6);
-    return ret;
-}
-PPC_SPE_LD_OP(dh, spe_ldh);
-static always_inline void glue(spe_stdh, MEMSUFFIX) (target_ulong EA,
-                                                     uint64_t data)
-{
-    glue(st16, MEMSUFFIX)(EA, data >> 48);
-    glue(st16, MEMSUFFIX)(EA + 2, data >> 32);
-    glue(st16, MEMSUFFIX)(EA + 4, data >> 16);
-    glue(st16, MEMSUFFIX)(EA + 6, data);
-}
-PPC_SPE_ST_OP(dh, spe_stdh);
-static always_inline uint64_t glue(spe_ldh_le, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    ret = (uint64_t)glue(ldu16r, MEMSUFFIX)(EA) << 48;
-    ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 2) << 32;
-    ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 4) << 16;
-    ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 6);
-    return ret;
-}
-PPC_SPE_LD_OP(dh_le, spe_ldh_le);
-static always_inline void glue(spe_stdh_le, MEMSUFFIX) (target_ulong EA,
-                                                        uint64_t data)
-{
-    glue(st16r, MEMSUFFIX)(EA, data >> 48);
-    glue(st16r, MEMSUFFIX)(EA + 2, data >> 32);
-    glue(st16r, MEMSUFFIX)(EA + 4, data >> 16);
-    glue(st16r, MEMSUFFIX)(EA + 6, data);
-}
-PPC_SPE_ST_OP(dh_le, spe_stdh_le);
-static always_inline uint64_t glue(spe_lwhe, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    ret = (uint64_t)glue(ldu16, MEMSUFFIX)(EA) << 48;
-    ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 2) << 16;
-    return ret;
-}
-PPC_SPE_LD_OP(whe, spe_lwhe);
-static always_inline void glue(spe_stwhe, MEMSUFFIX) (target_ulong EA,
-                                                      uint64_t data)
-{
-    glue(st16, MEMSUFFIX)(EA, data >> 48);
-    glue(st16, MEMSUFFIX)(EA + 2, data >> 16);
-}
-PPC_SPE_ST_OP(whe, spe_stwhe);
-static always_inline uint64_t glue(spe_lwhe_le, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    ret = (uint64_t)glue(ldu16r, MEMSUFFIX)(EA) << 48;
-    ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 2) << 16;
-    return ret;
-}
-PPC_SPE_LD_OP(whe_le, spe_lwhe_le);
-static always_inline void glue(spe_stwhe_le, MEMSUFFIX) (target_ulong EA,
-                                                         uint64_t data)
-{
-    glue(st16r, MEMSUFFIX)(EA, data >> 48);
-    glue(st16r, MEMSUFFIX)(EA + 2, data >> 16);
-}
-PPC_SPE_ST_OP(whe_le, spe_stwhe_le);
-static always_inline uint64_t glue(spe_lwhou, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    ret = (uint64_t)glue(ldu16, MEMSUFFIX)(EA) << 32;
-    ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 2);
-    return ret;
-}
-PPC_SPE_LD_OP(whou, spe_lwhou);
-static always_inline uint64_t glue(spe_lwhos, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    ret = ((uint64_t)((int32_t)glue(lds16, MEMSUFFIX)(EA))) << 32;
-    ret |= (uint64_t)((int32_t)glue(lds16, MEMSUFFIX)(EA + 2));
-    return ret;
-}
-PPC_SPE_LD_OP(whos, spe_lwhos);
-static always_inline void glue(spe_stwho, MEMSUFFIX) (target_ulong EA,
-                                                      uint64_t data)
-{
-    glue(st16, MEMSUFFIX)(EA, data >> 32);
-    glue(st16, MEMSUFFIX)(EA + 2, data);
-}
-PPC_SPE_ST_OP(who, spe_stwho);
-static always_inline uint64_t glue(spe_lwhou_le, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    ret = (uint64_t)glue(ldu16r, MEMSUFFIX)(EA) << 32;
-    ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 2);
-    return ret;
-}
-PPC_SPE_LD_OP(whou_le, spe_lwhou_le);
-static always_inline uint64_t glue(spe_lwhos_le, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    ret = ((uint64_t)((int32_t)glue(lds16r, MEMSUFFIX)(EA))) << 32;
-    ret |= (uint64_t)((int32_t)glue(lds16r, MEMSUFFIX)(EA + 2));
-    return ret;
-}
-PPC_SPE_LD_OP(whos_le, spe_lwhos_le);
-static always_inline void glue(spe_stwho_le, MEMSUFFIX) (target_ulong EA,
-                                                         uint64_t data)
-{
-    glue(st16r, MEMSUFFIX)(EA, data >> 32);
-    glue(st16r, MEMSUFFIX)(EA + 2, data);
-}
-PPC_SPE_ST_OP(who_le, spe_stwho_le);
-static always_inline void glue(spe_stwwo, MEMSUFFIX) (target_ulong EA,
-                                                      uint64_t data)
-{
-    glue(st32, MEMSUFFIX)(EA, data);
-}
-PPC_SPE_ST_OP(wwo, spe_stwwo);
-static always_inline void glue(spe_stwwo_le, MEMSUFFIX) (target_ulong EA,
-                                                         uint64_t data)
-{
-    glue(st32r, MEMSUFFIX)(EA, data);
-}
-PPC_SPE_ST_OP(wwo_le, spe_stwwo_le);
-static always_inline uint64_t glue(spe_lh, MEMSUFFIX) (target_ulong EA)
-{
-    uint16_t tmp;
-    tmp = glue(ldu16, MEMSUFFIX)(EA);
-    return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16);
-}
-PPC_SPE_LD_OP(h, spe_lh);
-static always_inline uint64_t glue(spe_lh_le, MEMSUFFIX) (target_ulong EA)
-{
-    uint16_t tmp;
-    tmp = glue(ldu16r, MEMSUFFIX)(EA);
-    return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16);
-}
-PPC_SPE_LD_OP(h_le, spe_lh_le);
-static always_inline uint64_t glue(spe_lwwsplat, MEMSUFFIX) (target_ulong EA)
-{
-    uint32_t tmp;
-    tmp = glue(ldu32, MEMSUFFIX)(EA);
-    return ((uint64_t)tmp << 32) | (uint64_t)tmp;
-}
-PPC_SPE_LD_OP(wwsplat, spe_lwwsplat);
-static always_inline
-uint64_t glue(spe_lwwsplat_le, MEMSUFFIX) (target_ulong EA)
-{
-    uint32_t tmp;
-    tmp = glue(ldu32r, MEMSUFFIX)(EA);
-    return ((uint64_t)tmp << 32) | (uint64_t)tmp;
-}
-PPC_SPE_LD_OP(wwsplat_le, spe_lwwsplat_le);
-static always_inline uint64_t glue(spe_lwhsplat, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    uint16_t tmp;
-    tmp = glue(ldu16, MEMSUFFIX)(EA);
-    ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32);
-    tmp = glue(ldu16, MEMSUFFIX)(EA + 2);
-    ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp;
-    return ret;
-}
-PPC_SPE_LD_OP(whsplat, spe_lwhsplat);
-static always_inline
-uint64_t glue(spe_lwhsplat_le, MEMSUFFIX) (target_ulong EA)
-{
-    uint64_t ret;
-    uint16_t tmp;
-    tmp = glue(ldu16r, MEMSUFFIX)(EA);
-    ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32);
-    tmp = glue(ldu16r, MEMSUFFIX)(EA + 2);
-    ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp;
-    return ret;
-}
-PPC_SPE_LD_OP(whsplat_le, spe_lwhsplat_le);
-
 #undef MEMSUFFIX

Modified: trunk/target-ppc/translate.c
===================================================================
--- trunk/target-ppc/translate.c        2008-11-27 19:30:38 UTC (rev 5803)
+++ trunk/target-ppc/translate.c        2008-11-27 19:30:47 UTC (rev 5804)
@@ -71,11 +71,6 @@
 
 /* dyngen register indexes */
 static TCGv cpu_T[3];
-#if defined(TARGET_PPC64)
-#define cpu_T64 cpu_T
-#else
-static TCGv_i64 cpu_T64[3];
-#endif
 static TCGv_i64 cpu_FT[2];
 
 #include "gen-icount.h"
@@ -107,14 +102,6 @@
     cpu_T[2] = tcg_global_reg_new(TCG_AREG3, "T2");
 #endif
 #endif
-#if !defined(TARGET_PPC64)
-    cpu_T64[0] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, t0_64),
-                                        "T0_64");
-    cpu_T64[1] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, t1_64),
-                                        "T1_64");
-    cpu_T64[2] = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUState, t2_64),
-                                        "T2_64");
-#endif
 
     cpu_FT[0] = tcg_global_mem_new_i64(TCG_AREG0,
                                        offsetof(CPUState, ft0), "FT0");
@@ -6031,91 +6018,6 @@
     GEN_EXCP_INVAL(ctx);
 }
 
-/* SPE load and stores */
-static always_inline void gen_addr_spe_imm_index (TCGv EA, DisasContext *ctx, 
int sh)
-{
-    target_long simm = rB(ctx->opcode);
-
-    if (rA(ctx->opcode) == 0)
-        tcg_gen_movi_tl(EA, simm << sh);
-    else if (likely(simm != 0))
-        tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm << sh);
-    else
-        tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
-}
-
-#define op_spe_ldst(name)        (*gen_op_##name[ctx->mem_idx])()
-#define OP_SPE_LD_TABLE(name)                                                 \
-static GenOpFunc *gen_op_spe_l##name[NB_MEM_FUNCS] = {                        \
-    GEN_MEM_FUNCS(spe_l##name),                                               \
-};
-#define OP_SPE_ST_TABLE(name)                                                 \
-static GenOpFunc *gen_op_spe_st##name[NB_MEM_FUNCS] = {                       \
-    GEN_MEM_FUNCS(spe_st##name),                                              \
-};
-
-#define GEN_SPE_LD(name, sh)                                                  \
-static always_inline void gen_evl##name (DisasContext *ctx)                   \
-{                                                                             \
-    if (unlikely(!ctx->spe_enabled)) {                                        \
-        GEN_EXCP_NO_AP(ctx);                                                  \
-        return;                                                               \
-    }                                                                         \
-    gen_addr_spe_imm_index(cpu_T[0], ctx, sh);                                \
-    op_spe_ldst(spe_l##name);                                                 \
-    gen_store_gpr64(rD(ctx->opcode), cpu_T64[1]);                             \
-}
-
-#define GEN_SPE_LDX(name)                                                     \
-static always_inline void gen_evl##name##x (DisasContext *ctx)                \
-{                                                                             \
-    if (unlikely(!ctx->spe_enabled)) {                                        \
-        GEN_EXCP_NO_AP(ctx);                                                  \
-        return;                                                               \
-    }                                                                         \
-    gen_addr_reg_index(cpu_T[0], ctx);                                        \
-    op_spe_ldst(spe_l##name);                                                 \
-    gen_store_gpr64(rD(ctx->opcode), cpu_T64[1]);                             \
-}
-
-#define GEN_SPEOP_LD(name, sh)                                                \
-OP_SPE_LD_TABLE(name);                                                        \
-GEN_SPE_LD(name, sh);                                                         \
-GEN_SPE_LDX(name)
-
-#define GEN_SPE_ST(name, sh)                                                  \
-static always_inline void gen_evst##name (DisasContext *ctx)                  \
-{                                                                             \
-    if (unlikely(!ctx->spe_enabled)) {                                        \
-        GEN_EXCP_NO_AP(ctx);                                                  \
-        return;                                                               \
-    }                                                                         \
-    gen_addr_spe_imm_index(cpu_T[0], ctx, sh);                                \
-    gen_load_gpr64(cpu_T64[1], rS(ctx->opcode));                              \
-    op_spe_ldst(spe_st##name);                                                \
-}
-
-#define GEN_SPE_STX(name)                                                     \
-static always_inline void gen_evst##name##x (DisasContext *ctx)               \
-{                                                                             \
-    if (unlikely(!ctx->spe_enabled)) {                                        \
-        GEN_EXCP_NO_AP(ctx);                                                  \
-        return;                                                               \
-    }                                                                         \
-    gen_addr_reg_index(cpu_T[0], ctx);                                        \
-    gen_load_gpr64(cpu_T64[1], rS(ctx->opcode));                              \
-    op_spe_ldst(spe_st##name);                                                \
-}
-
-#define GEN_SPEOP_ST(name, sh)                                                \
-OP_SPE_ST_TABLE(name);                                                        \
-GEN_SPE_ST(name, sh);                                                         \
-GEN_SPE_STX(name)
-
-#define GEN_SPEOP_LDST(name, sh)                                              \
-GEN_SPEOP_LD(name, sh);                                                       \
-GEN_SPEOP_ST(name, sh)
-
 /* SPE logic */
 #if defined(TARGET_PPC64)
 #define GEN_SPEOP_LOGIC2(name, tcg_op)                                        \
@@ -6681,189 +6583,353 @@
 GEN_SPE(evcmpltu,       evcmplts,      0x19, 0x08, 0x00600000, PPC_SPE); ////
 GEN_SPE(evcmpeq,        speundef,      0x1A, 0x08, 0x00600000, PPC_SPE); ////
 
-/* Load and stores */
-GEN_SPEOP_LDST(dd, 3);
-GEN_SPEOP_LDST(dw, 3);
-GEN_SPEOP_LDST(dh, 3);
-GEN_SPEOP_LDST(whe, 2);
-GEN_SPEOP_LD(whou, 2);
-GEN_SPEOP_LD(whos, 2);
-GEN_SPEOP_ST(who, 2);
+/* SPE load and stores */
+static always_inline void gen_addr_spe_imm_index (TCGv EA, DisasContext *ctx, 
int sh)
+{
+    target_ulong uimm = rB(ctx->opcode);
 
-#define _GEN_OP_SPE_STWWE(suffix)                                             \
-static always_inline void gen_op_spe_stwwe_##suffix (void)                    \
-{                                                                             \
-    gen_op_srli32_T1_64();                                                    \
-    gen_op_spe_stwwo_##suffix();                                              \
+    if (rA(ctx->opcode) == 0)
+        tcg_gen_movi_tl(EA, uimm << sh);
+    else
+        tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], uimm << sh);
 }
-#define _GEN_OP_SPE_STWWE_LE(suffix)                                          \
-static always_inline void gen_op_spe_stwwe_le_##suffix (void)                 \
-{                                                                             \
-    gen_op_srli32_T1_64();                                                    \
-    gen_op_spe_stwwo_le_##suffix();                                           \
+
+static always_inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+    gen_qemu_ld64(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+#else
+    TCGv_i64 t0 = tcg_temp_new_i64();
+    gen_qemu_ld64(t0, addr, ctx->mem_idx);
+    tcg_gen_trunc_i64_i32(cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_gen_shri_i64(t0, t0, 32);
+    tcg_gen_trunc_i64_i32(cpu_gprh[rD(ctx->opcode)], t0);
+    tcg_temp_free_i64(t0);
+#endif
 }
+
+static always_inline void gen_op_evldw(DisasContext *ctx, TCGv addr)
+{
 #if defined(TARGET_PPC64)
-#define GEN_OP_SPE_STWWE(suffix)                                              \
-_GEN_OP_SPE_STWWE(suffix);                                                    \
-_GEN_OP_SPE_STWWE_LE(suffix);                                                 \
-static always_inline void gen_op_spe_stwwe_64_##suffix (void)                 \
-{                                                                             \
-    gen_op_srli32_T1_64();                                                    \
-    gen_op_spe_stwwo_64_##suffix();                                           \
-}                                                                             \
-static always_inline void gen_op_spe_stwwe_le_64_##suffix (void)              \
-{                                                                             \
-    gen_op_srli32_T1_64();                                                    \
-    gen_op_spe_stwwo_le_64_##suffix();                                        \
+    TCGv t0 = tcg_temp_new();
+    gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+    tcg_gen_addi_tl(addr, addr, 4);
+    gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_temp_free(t0);
+#else
+    gen_qemu_ld32u(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
+    tcg_gen_addi_tl(addr, addr, 4);
+    gen_qemu_ld32u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+#endif
 }
+
+static always_inline void gen_op_evldh(DisasContext *ctx, TCGv addr)
+{
+    TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(t0, t0, 32);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(t0, t0, 16);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
 #else
-#define GEN_OP_SPE_STWWE(suffix)                                              \
-_GEN_OP_SPE_STWWE(suffix);                                                    \
-_GEN_OP_SPE_STWWE_LE(suffix)
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
 #endif
-#if defined(CONFIG_USER_ONLY)
-GEN_OP_SPE_STWWE(raw);
-#else /* defined(CONFIG_USER_ONLY) */
-GEN_OP_SPE_STWWE(user);
-GEN_OP_SPE_STWWE(kernel);
-GEN_OP_SPE_STWWE(hypv);
-#endif /* defined(CONFIG_USER_ONLY) */
-GEN_SPEOP_ST(wwe, 2);
-GEN_SPEOP_ST(wwo, 2);
+    tcg_temp_free(t0);
+}
 
-#define GEN_SPE_LDSPLAT(name, op, suffix)                                     \
-static always_inline void gen_op_spe_l##name##_##suffix (void)                \
-{                                                                             \
-    gen_op_##op##_##suffix();                                                 \
-    gen_op_splatw_T1_64();                                                    \
+static always_inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+#if defined(TARGET_PPC64)
+    tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+    tcg_gen_shli_tl(t0, t0, 16);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+    tcg_gen_shli_tl(t0, t0, 16);
+    tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+    tcg_temp_free(t0);
 }
 
-#define GEN_OP_SPE_LHE(suffix)                                                \
-static always_inline void gen_op_spe_lhe_##suffix (void)                      \
-{                                                                             \
-    gen_op_spe_lh_##suffix();                                                 \
-    gen_op_sli16_T1_64();                                                     \
+static always_inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+#if defined(TARGET_PPC64)
+    tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+    tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+    tcg_temp_free(t0);
 }
 
-#define GEN_OP_SPE_LHX(suffix)                                                \
-static always_inline void gen_op_spe_lhx_##suffix (void)                      \
-{                                                                             \
-    gen_op_spe_lh_##suffix();                                                 \
-    gen_op_extsh_T1_64();                                                     \
+static always_inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+#if defined(TARGET_PPC64)
+    tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+    tcg_gen_ext32u_tl(t0, t0);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+    tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+    tcg_temp_free(t0);
 }
 
-#if defined(CONFIG_USER_ONLY)
-GEN_OP_SPE_LHE(raw);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, raw);
-GEN_OP_SPE_LHE(le_raw);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_raw);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, raw);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_raw);
-GEN_OP_SPE_LHX(raw);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, raw);
-GEN_OP_SPE_LHX(le_raw);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_raw);
+static always_inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr)
+{
+    TCGv t0 = tcg_temp_new();
 #if defined(TARGET_PPC64)
-GEN_OP_SPE_LHE(64_raw);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_raw);
-GEN_OP_SPE_LHE(le_64_raw);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_raw);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_raw);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_raw);
-GEN_OP_SPE_LHX(64_raw);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_raw);
-GEN_OP_SPE_LHX(le_64_raw);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_raw);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(t0, t0, 16);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
 #endif
+    tcg_temp_free(t0);
+}
+
+static always_inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+    TCGv t0 = tcg_temp_new();
+    gen_qemu_ld16u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(t0, t0, 32);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_temp_free(t0);
 #else
-GEN_OP_SPE_LHE(user);
-GEN_OP_SPE_LHE(kernel);
-GEN_OP_SPE_LHE(hypv);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, user);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, kernel);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, hypv);
-GEN_OP_SPE_LHE(le_user);
-GEN_OP_SPE_LHE(le_kernel);
-GEN_OP_SPE_LHE(le_hypv);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_user);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_kernel);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_hypv);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, user);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, kernel);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, hypv);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_user);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_kernel);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_hypv);
-GEN_OP_SPE_LHX(user);
-GEN_OP_SPE_LHX(kernel);
-GEN_OP_SPE_LHX(hypv);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, user);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, kernel);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, hypv);
-GEN_OP_SPE_LHX(le_user);
-GEN_OP_SPE_LHX(le_kernel);
-GEN_OP_SPE_LHX(le_hypv);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_user);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_kernel);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_hypv);
+    gen_qemu_ld16u(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+}
+
+static always_inline void gen_op_evlwhos(DisasContext *ctx, TCGv addr)
+{
 #if defined(TARGET_PPC64)
-GEN_OP_SPE_LHE(64_user);
-GEN_OP_SPE_LHE(64_kernel);
-GEN_OP_SPE_LHE(64_hypv);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_user);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_kernel);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, 64_hypv);
-GEN_OP_SPE_LHE(le_64_user);
-GEN_OP_SPE_LHE(le_64_kernel);
-GEN_OP_SPE_LHE(le_64_hypv);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_user);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_kernel);
-GEN_SPE_LDSPLAT(hhesplat, spe_lhe, le_64_hypv);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_user);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_kernel);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, 64_hypv);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_user);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_kernel);
-GEN_SPE_LDSPLAT(hhousplat, spe_lh, le_64_hypv);
-GEN_OP_SPE_LHX(64_user);
-GEN_OP_SPE_LHX(64_kernel);
-GEN_OP_SPE_LHX(64_hypv);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_user);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_kernel);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, 64_hypv);
-GEN_OP_SPE_LHX(le_64_user);
-GEN_OP_SPE_LHX(le_64_kernel);
-GEN_OP_SPE_LHX(le_64_hypv);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_user);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_kernel);
-GEN_SPE_LDSPLAT(hhossplat, spe_lhx, le_64_hypv);
+    TCGv t0 = tcg_temp_new();
+    gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+    tcg_gen_ext32u_tl(cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(t0, t0, 32);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_temp_free(t0);
+#else
+    gen_qemu_ld16s(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16s(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
 #endif
+}
+
+static always_inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr)
+{
+    TCGv t0 = tcg_temp_new();
+    gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+#if defined(TARGET_PPC64)
+    tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+    tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
 #endif
-GEN_SPEOP_LD(hhesplat, 1);
-GEN_SPEOP_LD(hhousplat, 1);
-GEN_SPEOP_LD(hhossplat, 1);
-GEN_SPEOP_LD(wwsplat, 2);
-GEN_SPEOP_LD(whsplat, 2);
+    tcg_temp_free(t0);
+}
 
-GEN_SPE(evlddx,         evldd,         0x00, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evldwx,         evldw,         0x01, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evldhx,         evldh,         0x02, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlhhesplatx,   evlhhesplat,   0x04, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlhhousplatx,  evlhhousplat,  0x06, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlhhossplatx,  evlhhossplat,  0x07, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlwhex,        evlwhe,        0x08, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlwhoux,       evlwhou,       0x0A, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlwhosx,       evlwhos,       0x0B, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlwwsplatx,    evlwwsplat,    0x0C, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evlwhsplatx,    evlwhsplat,    0x0E, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstddx,        evstdd,        0x10, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstdwx,        evstdw,        0x11, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstdhx,        evstdh,        0x12, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstwhex,       evstwhe,       0x18, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstwhox,       evstwho,       0x1A, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstwwex,       evstwwe,       0x1C, 0x0C, 0x00000000, PPC_SPE); //
-GEN_SPE(evstwwox,       evstwwo,       0x1E, 0x0C, 0x00000000, PPC_SPE); //
+static always_inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr)
+{
+    TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
+    tcg_gen_shli_tl(t0, t0, 32);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+    tcg_gen_shli_tl(t0, t0, 16);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+#else
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+    tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+    tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+    tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+#endif
+    tcg_temp_free(t0);
+}
 
+static always_inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+    gen_qemu_st64(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+#else
+    TCGv_i64 t0 = tcg_temp_new_i64();
+    tcg_gen_concat_i32_i64(t0, cpu_gpr[rS(ctx->opcode)], 
cpu_gprh[rS(ctx->opcode)]);
+    gen_qemu_st64(t0, addr, ctx->mem_idx);
+    tcg_temp_free_i64(t0);
+#endif
+}
+
+static always_inline void gen_op_evstdw(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+    TCGv t0 = tcg_temp_new();
+    tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+    gen_qemu_st32(t0, addr, ctx->mem_idx);
+    tcg_temp_free(t0);
+#else
+    gen_qemu_st32(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+    tcg_gen_addi_tl(addr, addr, 4);
+    gen_qemu_st32(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+}
+
+static always_inline void gen_op_evstdh(DisasContext *ctx, TCGv addr)
+{
+    TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+    tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 48);
+#else
+    tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+#endif
+    gen_qemu_st16(t0, addr, ctx->mem_idx);
+    tcg_gen_addi_tl(addr, addr, 2);
+#if defined(TARGET_PPC64)
+    tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+    gen_qemu_st16(t0, addr, ctx->mem_idx);
+#else
+    gen_qemu_st16(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+    tcg_gen_addi_tl(addr, addr, 2);
+    tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+    gen_qemu_st16(t0, addr, ctx->mem_idx);
+    tcg_temp_free(t0);
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_st16(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+}
+
+static always_inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr)
+{
+    TCGv t0 = tcg_temp_new();
+#if defined(TARGET_PPC64)
+    tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 48);
+#else
+    tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+#endif
+    gen_qemu_st16(t0, addr, ctx->mem_idx);
+    tcg_gen_addi_tl(addr, addr, 2);
+    tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+    gen_qemu_st16(t0, addr, ctx->mem_idx);
+    tcg_temp_free(t0);
+}
+
+static always_inline void gen_op_evstwho(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+    TCGv t0 = tcg_temp_new();
+    tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+    gen_qemu_st16(t0, addr, ctx->mem_idx);
+    tcg_temp_free(t0);
+#else
+    gen_qemu_st16(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+    tcg_gen_addi_tl(addr, addr, 2);
+    gen_qemu_st16(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+}
+
+static always_inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr)
+{
+#if defined(TARGET_PPC64)
+    TCGv t0 = tcg_temp_new();
+    tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
+    gen_qemu_st32(t0, addr, ctx->mem_idx);
+    tcg_temp_free(t0);
+#else
+    gen_qemu_st32(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+#endif
+}
+
+static always_inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
+{
+    gen_qemu_st32(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+}
+
+#define GEN_SPEOP_LDST(name, opc2, sh)                                        \
+GEN_HANDLER(gen_##name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE)                \
+{                                                                             \
+    TCGv t0;                                                                  \
+    if (unlikely(!ctx->spe_enabled)) {                                        \
+        GEN_EXCP_NO_AP(ctx);                                                  \
+        return;                                                               \
+    }                                                                         \
+    t0 = tcg_temp_new();                                                      \
+    if (Rc(ctx->opcode)) {                                                    \
+        gen_addr_spe_imm_index(t0, ctx, sh);                                  \
+    } else {                                                                  \
+        gen_addr_reg_index(t0, ctx);                                          \
+    }                                                                         \
+    gen_op_##name(ctx, t0);                                                   \
+    tcg_temp_free(t0);                                                        \
+}
+
+GEN_SPEOP_LDST(evldd, 0x00, 3);
+GEN_SPEOP_LDST(evldw, 0x01, 3);
+GEN_SPEOP_LDST(evldh, 0x02, 3);
+GEN_SPEOP_LDST(evlhhesplat, 0x04, 1);
+GEN_SPEOP_LDST(evlhhousplat, 0x06, 1);
+GEN_SPEOP_LDST(evlhhossplat, 0x07, 1);
+GEN_SPEOP_LDST(evlwhe, 0x08, 2);
+GEN_SPEOP_LDST(evlwhou, 0x0A, 2);
+GEN_SPEOP_LDST(evlwhos, 0x0B, 2);
+GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2);
+GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2);
+
+GEN_SPEOP_LDST(evstdd, 0x10, 3);
+GEN_SPEOP_LDST(evstdw, 0x11, 3);
+GEN_SPEOP_LDST(evstdh, 0x12, 3);
+GEN_SPEOP_LDST(evstwhe, 0x18, 2);
+GEN_SPEOP_LDST(evstwho, 0x1A, 2);
+GEN_SPEOP_LDST(evstwwe, 0x1C, 2);
+GEN_SPEOP_LDST(evstwwo, 0x1E, 2);
+
 /* Multiply and add - TODO */
 #if 0
 GEN_SPE(speundef,       evmhessf,      0x01, 0x10, 0x00000000, PPC_SPE);






reply via email to

[Prev in Thread] Current Thread [Next in Thread]