[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH 05/43] target/loongarch: Implement vadd/vsub
From: |
Song Gao |
Subject: |
[RFC PATCH 05/43] target/loongarch: Implement vadd/vsub |
Date: |
Sat, 24 Dec 2022 16:15:55 +0800 |
This patch includes:
- VADD.{B/H/W/D/Q};
- VSUB.{B/H/W/D/Q}.
Signed-off-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/disas.c | 23 ++++++
target/loongarch/helper.h | 12 +++
target/loongarch/insn_trans/trans_lsx.c.inc | 23 ++++++
target/loongarch/insns.decode | 22 ++++++
target/loongarch/lsx_helper.c | 81 +++++++++++++++++++++
5 files changed, 161 insertions(+)
diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c
index 858dfcc53a..51c597603e 100644
--- a/target/loongarch/disas.c
+++ b/target/loongarch/disas.c
@@ -755,3 +755,26 @@ static bool trans_fcmp_cond_##suffix(DisasContext *ctx, \
FCMP_INSN(s)
FCMP_INSN(d)
+
+#define INSN_LSX(insn, type) \
+static bool trans_##insn(DisasContext *ctx, arg_##type * a) \
+{ \
+ output_##type(ctx, a, #insn); \
+ return true; \
+}
+
+static void output_vvv(DisasContext *ctx, arg_vvv *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, v%d, v%d", a->vd, a->vj, a->vk);
+}
+
+INSN_LSX(vadd_b, vvv)
+INSN_LSX(vadd_h, vvv)
+INSN_LSX(vadd_w, vvv)
+INSN_LSX(vadd_d, vvv)
+INSN_LSX(vadd_q, vvv)
+INSN_LSX(vsub_b, vvv)
+INSN_LSX(vsub_h, vvv)
+INSN_LSX(vsub_w, vvv)
+INSN_LSX(vsub_d, vvv)
+INSN_LSX(vsub_q, vvv)
diff --git a/target/loongarch/helper.h b/target/loongarch/helper.h
index 9c01823a26..465bc36cb8 100644
--- a/target/loongarch/helper.h
+++ b/target/loongarch/helper.h
@@ -130,3 +130,15 @@ DEF_HELPER_4(ldpte, void, env, tl, tl, i32)
DEF_HELPER_1(ertn, void, env)
DEF_HELPER_1(idle, void, env)
#endif
+
+/* LoongArch LSX */
+DEF_HELPER_4(vadd_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vadd_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vadd_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vadd_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vadd_q, void, env, i32, i32, i32)
+DEF_HELPER_4(vsub_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsub_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsub_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsub_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsub_q, void, env, i32, i32, i32)
diff --git a/target/loongarch/insn_trans/trans_lsx.c.inc
b/target/loongarch/insn_trans/trans_lsx.c.inc
index d0bc9f561e..b2276ae688 100644
--- a/target/loongarch/insn_trans/trans_lsx.c.inc
+++ b/target/loongarch/insn_trans/trans_lsx.c.inc
@@ -14,3 +14,26 @@
#else
#define CHECK_SXE
#endif
+
+static bool gen_vvv(DisasContext *ctx, arg_vvv *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 vk = tcg_constant_i32(a->vk);
+
+ CHECK_SXE;
+ func(cpu_env, vd, vj, vk);
+ return true;
+}
+
+TRANS(vadd_b, gen_vvv, gen_helper_vadd_b)
+TRANS(vadd_h, gen_vvv, gen_helper_vadd_h)
+TRANS(vadd_w, gen_vvv, gen_helper_vadd_w)
+TRANS(vadd_d, gen_vvv, gen_helper_vadd_d)
+TRANS(vadd_q, gen_vvv, gen_helper_vadd_q)
+TRANS(vsub_b, gen_vvv, gen_helper_vsub_b)
+TRANS(vsub_h, gen_vvv, gen_helper_vsub_h)
+TRANS(vsub_w, gen_vvv, gen_helper_vsub_w)
+TRANS(vsub_d, gen_vvv, gen_helper_vsub_d)
+TRANS(vsub_q, gen_vvv, gen_helper_vsub_q)
diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode
index 3fdc6e148c..0dd6ab20a2 100644
--- a/target/loongarch/insns.decode
+++ b/target/loongarch/insns.decode
@@ -484,3 +484,25 @@ ldpte 0000 01100100 01 ........ ..... 00000
@j_i
ertn 0000 01100100 10000 01110 00000 00000 @empty
idle 0000 01100100 10001 ............... @i15
dbcl 0000 00000010 10101 ............... @i15
+
+#
+# LSX Argument sets
+#
+
+&vvv vd vj vk
+
+#
+# LSX Formats
+#
+@vvv .... ........ ..... vk:5 vj:5 vd:5 &vvv
+
+vadd_b 0111 00000000 10100 ..... ..... ..... @vvv
+vadd_h 0111 00000000 10101 ..... ..... ..... @vvv
+vadd_w 0111 00000000 10110 ..... ..... ..... @vvv
+vadd_d 0111 00000000 10111 ..... ..... ..... @vvv
+vadd_q 0111 00010010 11010 ..... ..... ..... @vvv
+vsub_b 0111 00000000 11000 ..... ..... ..... @vvv
+vsub_h 0111 00000000 11001 ..... ..... ..... @vvv
+vsub_w 0111 00000000 11010 ..... ..... ..... @vvv
+vsub_d 0111 00000000 11011 ..... ..... ..... @vvv
+vsub_q 0111 00010010 11011 ..... ..... ..... @vvv
diff --git a/target/loongarch/lsx_helper.c b/target/loongarch/lsx_helper.c
index 325574a026..195b2ffa8d 100644
--- a/target/loongarch/lsx_helper.c
+++ b/target/loongarch/lsx_helper.c
@@ -4,3 +4,84 @@
*
* Copyright (c) 2022 Loongson Technology Corporation Limited
*/
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+#define DO_HELPER_VVV(NAME, BIT, FUNC, ...) \
+ void helper_##NAME(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+ { FUNC(env, vd, vj, vk, BIT, __VA_ARGS__); }
+
+static void helper_vvv(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk, int bit,
+ void (*func)(vec_t*, vec_t*, vec_t*, int, int))
+{
+ int i;
+ vec_t *Vd = &(env->fpr[vd].vec);
+ vec_t *Vj = &(env->fpr[vj].vec);
+ vec_t *Vk = &(env->fpr[vk].vec);
+
+ for (i = 0; i < LSX_LEN/bit; i++) {
+ func(Vd, Vj, Vk, bit, i);
+ }
+}
+
+static void do_vadd(vec_t *Vd, vec_t *Vj, vec_t *Vk, int bit, int n)
+{
+ switch (bit) {
+ case 8:
+ Vd->B[n] = Vj->B[n] + Vk->B[n];
+ break;
+ case 16:
+ Vd->H[n] = Vj->H[n] + Vk->H[n];
+ break;
+ case 32:
+ Vd->W[n] = Vj->W[n] + Vk->W[n];
+ break;
+ case 64:
+ Vd->D[n] = Vj->D[n] + Vk->D[n];
+ break;
+ case 128:
+ Vd->Q[n] = Vj->Q[n] + Vk->Q[n];
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void do_vsub(vec_t *Vd, vec_t *Vj, vec_t *Vk, int bit, int n)
+{
+ switch (bit) {
+ case 8:
+ Vd->B[n] = Vj->B[n] - Vk->B[n];
+ break;
+ case 16:
+ Vd->H[n] = Vj->H[n] - Vk->H[n];
+ break;
+ case 32:
+ Vd->W[n] = Vj->W[n] - Vk->W[n];
+ break;
+ case 64:
+ Vd->D[n] = Vj->D[n] - Vk->D[n];
+ break;
+ case 128:
+ Vd->Q[n] = Vj->Q[n] - Vk->Q[n];
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+DO_HELPER_VVV(vadd_b, 8, helper_vvv, do_vadd)
+DO_HELPER_VVV(vadd_h, 16, helper_vvv, do_vadd)
+DO_HELPER_VVV(vadd_w, 32, helper_vvv, do_vadd)
+DO_HELPER_VVV(vadd_d, 64, helper_vvv, do_vadd)
+DO_HELPER_VVV(vadd_q, 128, helper_vvv, do_vadd)
+DO_HELPER_VVV(vsub_b, 8, helper_vvv, do_vsub)
+DO_HELPER_VVV(vsub_h, 16, helper_vvv, do_vsub)
+DO_HELPER_VVV(vsub_w, 32, helper_vvv, do_vsub)
+DO_HELPER_VVV(vsub_d, 64, helper_vvv, do_vsub)
+DO_HELPER_VVV(vsub_q, 128, helper_vvv, do_vsub)
--
2.31.1
- Re: [RFC PATCH 01/43] target/loongarch: Add vector data type vec_t, (continued)
- [RFC PATCH 07/43] target/loongarch: Implement vneg, Song Gao, 2022/12/24
- [RFC PATCH 17/43] target/loongarch: Implement vdiv/vmod, Song Gao, 2022/12/24
- [RFC PATCH 09/43] target/loongarch: Implement vhaddw/vhsubw, Song Gao, 2022/12/24
- [RFC PATCH 21/43] target/loongarch: Implement vmskltz/vmskgez/vmsknz, Song Gao, 2022/12/24
- [RFC PATCH 05/43] target/loongarch: Implement vadd/vsub,
Song Gao <=
- [RFC PATCH 06/43] target/loongarch: Implement vaddi/vsubi, Song Gao, 2022/12/24
- [RFC PATCH 29/43] target/loongarch: Implement vssrlrn vssrarn, Song Gao, 2022/12/24
- [RFC PATCH 33/43] target/loongarch: Implement vfrstp, Song Gao, 2022/12/24
- [RFC PATCH 26/43] target/loongarch: Implement vsrln vsran, Song Gao, 2022/12/24
- [RFC PATCH 23/43] target/loongarch: Implement vsll vsrl vsra vrotr, Song Gao, 2022/12/24
- [RFC PATCH 27/43] target/loongarch: Implement vsrlrn vsrarn, Song Gao, 2022/12/24
- [RFC PATCH 30/43] target/loongarch: Implement vclo vclz, Song Gao, 2022/12/24