qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH RFC 4/7] tcg: add add_i32x4 opcode


From: Kirill Batuzov
Subject: [Qemu-devel] [PATCH RFC 4/7] tcg: add add_i32x4 opcode
Date: Thu, 16 Oct 2014 12:56:51 +0400

Introduce INDEX_op_add_i32x4 opcode which adds two 128-bit variables as vectors
of four 32-bit integers.

Add tcg_gen_add_i32x4 wrapper function that generates this opcode. If a TCG 
target
does not support it, the wrapper falls back to emulation of vector operation as
a series of scalar ones. Wrapper arguments should be globals unless the 
frontend is
sure that the backend has at least some support for vector operations (by "some
support" I mean loads, stores and moves).

Note that emulation of vector operation with scalar ones is done inline. An
attempt to do it as a helper resulted in a serious performance degradation.

Signed-off-by: Kirill Batuzov <address@hidden>
---
 tcg/tcg-op.h  |  108 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 tcg/tcg-opc.h |   12 +++++++
 tcg/tcg.h     |    5 +++
 3 files changed, 125 insertions(+)

diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index ea2b14f..c5f777d 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -139,6 +139,15 @@ static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, 
TCGv_i64 val,
     *tcg_ctx.gen_opparam_ptr++ = offset;
 }
 
+static inline void tcg_gen_ldst_op_v128(TCGOpcode opc, TCGv_v128 val,
+                                       TCGv_ptr base, TCGArg offset)
+{
+    *tcg_ctx.gen_opc_ptr++ = opc;
+    *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_V128(val);
+    *tcg_ctx.gen_opparam_ptr++ = GET_TCGV_PTR(base);
+    *tcg_ctx.gen_opparam_ptr++ = offset;
+}
+
 static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
                                    TCGv_i32 arg3, TCGv_i32 arg4)
 {
@@ -1069,6 +1078,11 @@ static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr 
arg2, tcg_target_long o
     tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset);
 }
 
+static inline void tcg_gen_ld_v128(TCGv_v128 ret, TCGv_ptr arg2, 
tcg_target_long offset)
+{
+    tcg_gen_ldst_op_v128(INDEX_op_ld_v128, ret, arg2, offset);
+}
+
 static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2,
                                    tcg_target_long offset)
 {
@@ -1092,6 +1106,11 @@ static inline void tcg_gen_st_i64(TCGv_i64 arg1, 
TCGv_ptr arg2, tcg_target_long
     tcg_gen_ldst_op_i64(INDEX_op_st_i64, arg1, arg2, offset);
 }
 
+static inline void tcg_gen_st_v128(TCGv_v128 arg1, TCGv_ptr arg2, 
tcg_target_long offset)
+{
+    tcg_gen_ldst_op_v128(INDEX_op_st_v128, arg1, arg2, offset);
+}
+
 static inline void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
 {
     tcg_gen_op3_i64(INDEX_op_add_i64, ret, arg1, arg2);
@@ -2780,6 +2799,8 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv 
addr, int mem_index)
     tcg_gen_add_i32(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), TCGV_PTR_TO_NAT(B))
 # define tcg_gen_addi_ptr(R, A, B) \
     tcg_gen_addi_i32(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), (B))
+# define tcg_gen_movi_ptr(R, B) \
+    tcg_gen_movi_i32(TCGV_PTR_TO_NAT(R), (B))
 # define tcg_gen_ext_i32_ptr(R, A) \
     tcg_gen_mov_i32(TCGV_PTR_TO_NAT(R), (A))
 #else
@@ -2791,6 +2812,93 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv 
addr, int mem_index)
     tcg_gen_add_i64(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), TCGV_PTR_TO_NAT(B))
 # define tcg_gen_addi_ptr(R, A, B) \
     tcg_gen_addi_i64(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), (B))
+# define tcg_gen_movi_ptr(R, B) \
+    tcg_gen_movi_i64(TCGV_PTR_TO_NAT(R), (B))
 # define tcg_gen_ext_i32_ptr(R, A) \
     tcg_gen_ext_i32_i64(TCGV_PTR_TO_NAT(R), (A))
 #endif /* UINTPTR_MAX == UINT32_MAX */
+
+/***************************************/
+/* 128-bit vector arithmetic.          */
+
+static inline void *tcg_v128_swap_slot(int n)
+{
+    return &tcg_ctx.v128_swap[n * 16];
+}
+
+/* Find a memory location for 128-bit TCG variable. */
+static inline void tcg_v128_to_ptr(TCGv_v128 tmp, TCGv_ptr base, int slot,
+                                   TCGv_ptr *real_base, intptr_t *real_offset,
+                                   int is_read)
+{
+    int idx = GET_TCGV_V128(tmp);
+    assert(idx >= 0 && idx < tcg_ctx.nb_temps);
+    if (idx < tcg_ctx.nb_globals) {
+        /* Globals use their locations within CPUArchState. */
+        int env = GET_TCGV_PTR(tcg_ctx.cpu_env);
+        TCGTemp *ts_env = &tcg_ctx.temps[env];
+        TCGTemp *ts_arg = &tcg_ctx.temps[idx];
+
+        /* Sanity checks: global's memory locations must be addressed
+           relative to ENV. */
+        assert(ts_env->val_type == TEMP_VAL_REG &&
+               ts_env->reg == ts_arg->mem_reg &&
+               ts_arg->mem_allocated);
+
+        *real_base = tcg_ctx.cpu_env;
+        *real_offset = ts_arg->mem_offset;
+
+        if (is_read) {
+            tcg_gen_sync_temp_v128(tmp);
+        } else {
+            tcg_gen_discard_v128(tmp);
+        }
+    } else {
+        /* Temporaries use swap space in TCGContext. Since we already have
+           a 128-bit temporary we'll assume that the target supports 128-bit
+           loads and stores. */
+        *real_base = base;
+        *real_offset = slot * 16;
+        if (is_read) {
+            tcg_gen_st_v128(tmp, base, slot * 16);
+        }
+    }
+}
+
+static inline void tcg_gen_add_i32x4(TCGv_v128 res, TCGv_v128 arg1,
+                                     TCGv_v128 arg2)
+{
+    if (TCG_TARGET_HAS_add_i32x4) {
+        tcg_gen_op3_v128(INDEX_op_add_i32x4, res, arg1, arg2);
+    } else {
+        TCGv_ptr base = tcg_temp_new_ptr();
+        TCGv_ptr arg1p, arg2p, resp;
+        intptr_t arg1of, arg2of, resof;
+        int i;
+        TCGv_i32 tmp1, tmp2;
+
+        tcg_gen_movi_ptr(base, (unsigned long)&tcg_ctx.v128_swap[0]);
+
+        tcg_v128_to_ptr(arg1, base, 1, &arg1p, &arg1of, 1);
+        tcg_v128_to_ptr(arg2, base, 2, &arg2p, &arg2of, 1);
+        tcg_v128_to_ptr(res, base, 0, &resp, &resof, 0);
+
+        tmp1 = tcg_temp_new_i32();
+        tmp2 = tcg_temp_new_i32();
+
+        for (i = 0; i < 4; i++) {
+            tcg_gen_ld_i32(tmp1, arg1p, arg1of + i * 4);
+            tcg_gen_ld_i32(tmp2, arg2p, arg2of + i * 4);
+            tcg_gen_add_i32(tmp1, tmp1, tmp2);
+            tcg_gen_st_i32(tmp1, resp, resof + i * 4);
+        }
+
+        if (GET_TCGV_V128(res) >= tcg_ctx.nb_globals) {
+            tcg_gen_ld_v128(res, base, 0);
+        }
+
+        tcg_temp_free_i32(tmp1);
+        tcg_temp_free_i32(tmp2);
+        tcg_temp_free_ptr(base);
+    }
+}
diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h
index 0916d83..51d72d9 100644
--- a/tcg/tcg-opc.h
+++ b/tcg/tcg-opc.h
@@ -52,6 +52,12 @@ DEF(br, 0, 0, 1, TCG_OPF_BB_END)
 # define IMPL64  TCG_OPF_64BIT
 #endif
 
+#ifdef TCG_TARGET_HAS_REG128
+# define IMPL128 0
+#else
+# define IMPL128 TCG_OPF_NOT_PRESENT
+#endif
+
 DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
 DEF(movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT)
 DEF(setcond_i32, 1, 2, 1, 0)
@@ -177,6 +183,12 @@ DEF(muls2_i64, 2, 2, 0, IMPL64 | 
IMPL(TCG_TARGET_HAS_muls2_i64))
 DEF(muluh_i64, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i64))
 DEF(mulsh_i64, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i64))
 
+/* load/store */
+DEF(st_v128, 0, 2, 1, IMPL128)
+DEF(ld_v128, 1, 1, 1, IMPL128)
+/* 128-bit vector arith */
+DEF(add_i32x4, 1, 2, 0, IMPL128 | IMPL(TCG_TARGET_HAS_add_i32x4))
+
 /* QEMU specific */
 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
 DEF(debug_insn_start, 0, 0, 2, TCG_OPF_NOT_PRESENT)
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 83fb0d3..75ab2e4 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -119,6 +119,10 @@ typedef uint64_t TCGRegSet;
 #define TCG_TARGET_HAS_rem_i64          0
 #endif
 
+#ifndef TCG_TARGET_HAS_add_i32x4
+#define TCG_TARGET_HAS_add_i32x4        0
+#endif
+
 /* For 32-bit targets, some sort of unsigned widening multiply is required.  */
 #if TCG_TARGET_REG_BITS == 32 \
     && !(defined(TCG_TARGET_HAS_mulu2_i32) \
@@ -544,6 +548,7 @@ struct TCGContext {
     /* threshold to flush the translated code buffer */
     size_t code_gen_buffer_max_size;
     void *code_gen_ptr;
+    uint8_t v128_swap[16 * 3];
 
     TBContext tb_ctx;
 
-- 
1.7.10.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]