[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 29/47] target/loongarch: Implement xvsrln xvsran
From: |
Song Gao |
Subject: |
[PATCH v3 29/47] target/loongarch: Implement xvsrln xvsran |
Date: |
Fri, 14 Jul 2023 16:45:57 +0800 |
This patch includes:
- XVSRLN.{B.H/H.W/W.D};
- XVSRAN.{B.H/H.W/W.D};
- XVSRLNI.{B.H/H.W/W.D/D.Q};
- XVSRANI.{B.H/H.W/W.D/D.Q}.
Signed-off-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/disas.c | 16 ++
target/loongarch/insn_trans/trans_lasx.c.inc | 16 ++
target/loongarch/insns.decode | 16 ++
target/loongarch/vec.h | 2 +
target/loongarch/vec_helper.c | 168 ++++++++++---------
5 files changed, 141 insertions(+), 77 deletions(-)
diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c
index 9109203a05..14b526abd6 100644
--- a/target/loongarch/disas.c
+++ b/target/loongarch/disas.c
@@ -2104,6 +2104,22 @@ INSN_LASX(xvsrari_h, vv_i)
INSN_LASX(xvsrari_w, vv_i)
INSN_LASX(xvsrari_d, vv_i)
+INSN_LASX(xvsrln_b_h, vvv)
+INSN_LASX(xvsrln_h_w, vvv)
+INSN_LASX(xvsrln_w_d, vvv)
+INSN_LASX(xvsran_b_h, vvv)
+INSN_LASX(xvsran_h_w, vvv)
+INSN_LASX(xvsran_w_d, vvv)
+
+INSN_LASX(xvsrlni_b_h, vv_i)
+INSN_LASX(xvsrlni_h_w, vv_i)
+INSN_LASX(xvsrlni_w_d, vv_i)
+INSN_LASX(xvsrlni_d_q, vv_i)
+INSN_LASX(xvsrani_b_h, vv_i)
+INSN_LASX(xvsrani_h_w, vv_i)
+INSN_LASX(xvsrani_w_d, vv_i)
+INSN_LASX(xvsrani_d_q, vv_i)
+
INSN_LASX(xvreplgr2vr_b, vr)
INSN_LASX(xvreplgr2vr_h, vr)
INSN_LASX(xvreplgr2vr_w, vr)
diff --git a/target/loongarch/insn_trans/trans_lasx.c.inc
b/target/loongarch/insn_trans/trans_lasx.c.inc
index aebe384220..43ff9b188a 100644
--- a/target/loongarch/insn_trans/trans_lasx.c.inc
+++ b/target/loongarch/insn_trans/trans_lasx.c.inc
@@ -423,6 +423,22 @@ TRANS(xvsrari_h, gen_vv_i, 32, gen_helper_vsrari_h)
TRANS(xvsrari_w, gen_vv_i, 32, gen_helper_vsrari_w)
TRANS(xvsrari_d, gen_vv_i, 32, gen_helper_vsrari_d)
+TRANS(xvsrln_b_h, gen_vvv, 32, gen_helper_vsrln_b_h)
+TRANS(xvsrln_h_w, gen_vvv, 32, gen_helper_vsrln_h_w)
+TRANS(xvsrln_w_d, gen_vvv, 32, gen_helper_vsrln_w_d)
+TRANS(xvsran_b_h, gen_vvv, 32, gen_helper_vsran_b_h)
+TRANS(xvsran_h_w, gen_vvv, 32, gen_helper_vsran_h_w)
+TRANS(xvsran_w_d, gen_vvv, 32, gen_helper_vsran_w_d)
+
+TRANS(xvsrlni_b_h, gen_vv_i, 32, gen_helper_vsrlni_b_h)
+TRANS(xvsrlni_h_w, gen_vv_i, 32, gen_helper_vsrlni_h_w)
+TRANS(xvsrlni_w_d, gen_vv_i, 32, gen_helper_vsrlni_w_d)
+TRANS(xvsrlni_d_q, gen_vv_i, 32, gen_helper_vsrlni_d_q)
+TRANS(xvsrani_b_h, gen_vv_i, 32, gen_helper_vsrani_b_h)
+TRANS(xvsrani_h_w, gen_vv_i, 32, gen_helper_vsrani_h_w)
+TRANS(xvsrani_w_d, gen_vv_i, 32, gen_helper_vsrani_w_d)
+TRANS(xvsrani_d_q, gen_vv_i, 32, gen_helper_vsrani_d_q)
+
TRANS(xvreplgr2vr_b, gvec_dup, 32, MO_8)
TRANS(xvreplgr2vr_h, gvec_dup, 32, MO_16)
TRANS(xvreplgr2vr_w, gvec_dup, 32, MO_32)
diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode
index ca0951e1cc..204dcfa075 100644
--- a/target/loongarch/insns.decode
+++ b/target/loongarch/insns.decode
@@ -1678,6 +1678,22 @@ xvsrari_h 0111 01101010 10000 1 .... ..... .....
@vv_ui4
xvsrari_w 0111 01101010 10001 ..... ..... ..... @vv_ui5
xvsrari_d 0111 01101010 1001 ...... ..... ..... @vv_ui6
+xvsrln_b_h 0111 01001111 01001 ..... ..... ..... @vvv
+xvsrln_h_w 0111 01001111 01010 ..... ..... ..... @vvv
+xvsrln_w_d 0111 01001111 01011 ..... ..... ..... @vvv
+xvsran_b_h 0111 01001111 01101 ..... ..... ..... @vvv
+xvsran_h_w 0111 01001111 01110 ..... ..... ..... @vvv
+xvsran_w_d 0111 01001111 01111 ..... ..... ..... @vvv
+
+xvsrlni_b_h 0111 01110100 00000 1 .... ..... ..... @vv_ui4
+xvsrlni_h_w 0111 01110100 00001 ..... ..... ..... @vv_ui5
+xvsrlni_w_d 0111 01110100 0001 ...... ..... ..... @vv_ui6
+xvsrlni_d_q 0111 01110100 001 ....... ..... ..... @vv_ui7
+xvsrani_b_h 0111 01110101 10000 1 .... ..... ..... @vv_ui4
+xvsrani_h_w 0111 01110101 10001 ..... ..... ..... @vv_ui5
+xvsrani_w_d 0111 01110101 1001 ...... ..... ..... @vv_ui6
+xvsrani_d_q 0111 01110101 101 ....... ..... ..... @vv_ui7
+
xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @vr
xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @vr
xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @vr
diff --git a/target/loongarch/vec.h b/target/loongarch/vec.h
index 681afd842f..67d829f9da 100644
--- a/target/loongarch/vec.h
+++ b/target/loongarch/vec.h
@@ -74,4 +74,6 @@
#define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b)
+#define R_SHIFT(a, b) (a >> b)
+
#endif /* LOONGARCH_VEC_H */
diff --git a/target/loongarch/vec_helper.c b/target/loongarch/vec_helper.c
index 38b55e00ca..dacedc4363 100644
--- a/target/loongarch/vec_helper.c
+++ b/target/loongarch/vec_helper.c
@@ -1079,107 +1079,121 @@ VSRARI(vsrari_h, 16, H)
VSRARI(vsrari_w, 32, W)
VSRARI(vsrari_d, 64, D)
-#define R_SHIFT(a, b) (a >> b)
-
-#define VSRLN(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(void *vd, void *v, void *vk, uint32_t desc) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = R_SHIFT((T)Vj->E2(i),((T)Vk->E2(i)) % BIT); \
- } \
- Vd->D(1) = 0; \
+#define VSRLN(NAME, BIT, E1, E2) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
+{ \
+ int i, j, ofs; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ int oprsz = simd_oprsz(desc); \
+ \
+ ofs = LSX_LEN / BIT; \
+ for (i = 0; i < oprsz / 16; i++) { \
+ for (j = 0; j < ofs; j++) { \
+ Vd->E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), \
+ Vk->E2(j + ofs * i) % BIT); \
+ } \
+ Vd->D(2 * i + 1) = 0; \
+ } \
}
-VSRLN(vsrln_b_h, 16, uint16_t, B, H)
-VSRLN(vsrln_h_w, 32, uint32_t, H, W)
-VSRLN(vsrln_w_d, 64, uint64_t, W, D)
+VSRLN(vsrln_b_h, 16, B, UH)
+VSRLN(vsrln_h_w, 32, H, UW)
+VSRLN(vsrln_w_d, 64, W, UD)
-#define VSRAN(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = R_SHIFT(Vj->E2(i), ((T)Vk->E2(i)) % BIT); \
- } \
- Vd->D(1) = 0; \
+#define VSRAN(NAME, BIT, E1, E2, E3) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
+{ \
+ int i, j, ofs; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ int oprsz = simd_oprsz(desc); \
+ \
+ ofs = LSX_LEN / BIT; \
+ for (i = 0; i < oprsz / 16; i++) { \
+ for (j = 0; j < ofs; j++) { \
+ Vd->E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), \
+ Vk->E3(j + ofs * i) % BIT); \
+ } \
+ Vd->D(2 * i + 1) = 0; \
+ } \
}
-VSRAN(vsran_b_h, 16, uint16_t, B, H)
-VSRAN(vsran_h_w, 32, uint32_t, H, W)
-VSRAN(vsran_w_d, 64, uint64_t, W, D)
+VSRAN(vsran_b_h, 16, B, H, UH)
+VSRAN(vsran_h_w, 32, H, W, UW)
+VSRAN(vsran_w_d, 64, W, D, UD)
-#define VSRLNI(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
-{ \
- int i, max; \
- VReg temp; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- \
- temp.D(0) = 0; \
- temp.D(1) = 0; \
- max = LSX_LEN/BIT; \
- for (i = 0; i < max; i++) { \
- temp.E1(i) = R_SHIFT((T)Vj->E2(i), imm); \
- temp.E1(i + max) = R_SHIFT((T)Vd->E2(i), imm); \
- } \
- *Vd = temp; \
+#define VSRLNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
+{ \
+ int i, j, ofs; \
+ VReg temp = {}; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ int oprsz = simd_oprsz(desc); \
+ \
+ ofs = LSX_LEN / BIT; \
+ for (i = 0; i < oprsz / 16; i++) { \
+ for (j = 0; j < ofs; j++) { \
+ temp.E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), imm); \
+ temp.E1(j + ofs * (2 * i + 1)) = R_SHIFT(Vd->E2(j + ofs * i), \
+ imm); \
+ } \
+ } \
+ *Vd = temp; \
}
void HELPER(vsrlni_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
{
- VReg temp;
+ int i;
+ VReg temp = {};
VReg *Vd = (VReg *)vd;
VReg *Vj = (VReg *)vj;
- temp.D(0) = 0;
- temp.D(1) = 0;
- temp.D(0) = int128_getlo(int128_urshift(Vj->Q(0), imm % 128));
- temp.D(1) = int128_getlo(int128_urshift(Vd->Q(0), imm % 128));
+ for (i = 0; i < 2; i++) {
+ temp.D(2 * i) = int128_getlo(int128_urshift(Vj->Q(i), imm % 128));
+ temp.D(2 * i +1) = int128_getlo(int128_urshift(Vd->Q(i), imm % 128));
+ }
*Vd = temp;
}
-VSRLNI(vsrlni_b_h, 16, uint16_t, B, H)
-VSRLNI(vsrlni_h_w, 32, uint32_t, H, W)
-VSRLNI(vsrlni_w_d, 64, uint64_t, W, D)
+VSRLNI(vsrlni_b_h, 16, B, UH)
+VSRLNI(vsrlni_h_w, 32, H, UW)
+VSRLNI(vsrlni_w_d, 64, W, UD)
-#define VSRANI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
-{ \
- int i, max; \
- VReg temp; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- \
- temp.D(0) = 0; \
- temp.D(1) = 0; \
- max = LSX_LEN/BIT; \
- for (i = 0; i < max; i++) { \
- temp.E1(i) = R_SHIFT(Vj->E2(i), imm); \
- temp.E1(i + max) = R_SHIFT(Vd->E2(i), imm); \
- } \
- *Vd = temp; \
+#define VSRANI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
+{ \
+ int i, j, ofs; \
+ VReg temp = {}; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ int oprsz = simd_oprsz(desc); \
+ \
+ ofs = LSX_LEN / BIT; \
+ for (i = 0; i < oprsz / 16; i++) { \
+ for (j = 0; j < ofs; j++) { \
+ temp.E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), imm); \
+ temp.E1(j + ofs * (2 * i + 1)) = R_SHIFT(Vd->E2(j + ofs * i), \
+ imm); \
+ } \
+ } \
+ *Vd = temp; \
}
void HELPER(vsrani_d_q)(void *vd, void *vj, uint64_t imm, uint32_t desc)
{
- VReg temp;
+ int i;
+ VReg temp = {};
VReg *Vd = (VReg *)vd;
VReg *Vj = (VReg *)vj;
- temp.D(0) = 0;
- temp.D(1) = 0;
- temp.D(0) = int128_getlo(int128_rshift(Vj->Q(0), imm % 128));
- temp.D(1) = int128_getlo(int128_rshift(Vd->Q(0), imm % 128));
+ for (i = 0; i < 2; i++) {
+ temp.D(2 * i) = int128_getlo(int128_rshift(Vj->Q(i), imm % 128));
+ temp.D(2 * i + 1) = int128_getlo(int128_rshift(Vd->Q(i), imm % 128));
+ }
*Vd = temp;
}
--
2.39.1
- [PATCH v3 14/47] target/loongarch: Implement xvadda, (continued)
- [PATCH v3 14/47] target/loongarch: Implement xvadda, Song Gao, 2023/07/14
- [PATCH v3 19/47] target/loongarch: Implement xvsat, Song Gao, 2023/07/14
- [PATCH v3 25/47] target/loongarch: Implement LASX logic instructions, Song Gao, 2023/07/14
- [PATCH v3 28/47] target/loongarch: Implement xvsrlr xvsrar, Song Gao, 2023/07/14
- [PATCH v3 26/47] target/loongarch: Implement xvsll xvsrl xvsra xvrotr, Song Gao, 2023/07/14
- [PATCH v3 42/47] target/loongarch: Implement xvinsgr2vr xvpickve2gr, Song Gao, 2023/07/14
- [PATCH v3 31/47] target/loongarch: Implement xvssrln xvssran, Song Gao, 2023/07/14
- [PATCH v3 47/47] target/loongarch: CPUCFG support LASX, Song Gao, 2023/07/14
- [PATCH v3 36/47] target/loongarch: Implement xvfrstp, Song Gao, 2023/07/14
- [PATCH v3 37/47] target/loongarch: Implement LASX fpu arith instructions, Song Gao, 2023/07/14
- [PATCH v3 29/47] target/loongarch: Implement xvsrln xvsran,
Song Gao <=
- [PATCH v3 41/47] target/loongarch: Implement xvbitsel xvset, Song Gao, 2023/07/14
- [PATCH v3 39/47] target/loongarch: Implement xvseq xvsle xvslt, Song Gao, 2023/07/14
- [PATCH v3 44/47] target/loongarch: Implement xvpack xvpick xvilv{l/h}, Song Gao, 2023/07/14
- [PATCH v3 45/47] target/loongarch: Implement xvshuf xvperm{i} xvshuf4i xvextrins, Song Gao, 2023/07/14
- [PATCH v3 46/47] target/loongarch: Implement xvld xvst, Song Gao, 2023/07/14
- [PATCH v3 21/47] target/loongarch: Implement vext2xv, Song Gao, 2023/07/14
- [PATCH v3 15/47] target/loongarch: Implement xvmax/xvmin, Song Gao, 2023/07/14
- [PATCH v3 11/47] target/loongarch: Implement xvaddw/xvsubw, Song Gao, 2023/07/14
- [PATCH v3 10/47] target/loongarch: Implement xvhaddw/xvhsubw, Song Gao, 2023/07/14
- [PATCH v3 13/47] target/loongarch: Implement xvabsd, Song Gao, 2023/07/14