[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 28/78] target/arm: Implement SVE2 UQSHRN, UQRSHRN
From: |
Richard Henderson |
Subject: |
[PATCH v4 28/78] target/arm: Implement SVE2 UQSHRN, UQRSHRN |
Date: |
Tue, 9 Mar 2021 08:19:51 -0800 |
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper-sve.h | 16 +++++++
target/arm/sve.decode | 4 ++
target/arm/sve_helper.c | 24 ++++++++++
target/arm/translate-sve.c | 93 ++++++++++++++++++++++++++++++++++++++
4 files changed, 137 insertions(+)
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index 2e80d9d27b..ba6a24fc8b 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -2476,6 +2476,22 @@ DEF_HELPER_FLAGS_3(sve2_sqrshrunt_h, TCG_CALL_NO_RWG,
void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve2_sqrshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve2_sqrshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 18faa900ca..13b5da0856 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -1296,6 +1296,10 @@ SHRNB 01000101 .. 1 ..... 00 0100 ..... .....
@rd_rn_tszimm_shr
SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr
RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr
RSHRNT 01000101 .. 1 ..... 00 0111 ..... ..... @rd_rn_tszimm_shr
+UQSHRNB 01000101 .. 1 ..... 00 1100 ..... ..... @rd_rn_tszimm_shr
+UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr
+UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr
+UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr
## SVE2 floating-point pairwise operations
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 96a4c76e94..83d3547f67 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -1971,6 +1971,30 @@ DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1,
DO_SQRSHRUN_H)
DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S)
DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, , H1_4, DO_SQRSHRUN_D)
+#define DO_UQSHRN_H(x, sh) MIN(x >> sh, UINT8_MAX)
+#define DO_UQSHRN_S(x, sh) MIN(x >> sh, UINT16_MAX)
+#define DO_UQSHRN_D(x, sh) MIN(x >> sh, UINT32_MAX)
+
+DO_SHRNB(sve2_uqshrnb_h, uint16_t, uint8_t, DO_UQSHRN_H)
+DO_SHRNB(sve2_uqshrnb_s, uint32_t, uint16_t, DO_UQSHRN_S)
+DO_SHRNB(sve2_uqshrnb_d, uint64_t, uint32_t, DO_UQSHRN_D)
+
+DO_SHRNT(sve2_uqshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQSHRN_H)
+DO_SHRNT(sve2_uqshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQSHRN_S)
+DO_SHRNT(sve2_uqshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQSHRN_D)
+
+#define DO_UQRSHRN_H(x, sh) MIN(do_urshr(x, sh), UINT8_MAX)
+#define DO_UQRSHRN_S(x, sh) MIN(do_urshr(x, sh), UINT16_MAX)
+#define DO_UQRSHRN_D(x, sh) MIN(do_urshr(x, sh), UINT32_MAX)
+
+DO_SHRNB(sve2_uqrshrnb_h, uint16_t, uint8_t, DO_UQRSHRN_H)
+DO_SHRNB(sve2_uqrshrnb_s, uint32_t, uint16_t, DO_UQRSHRN_S)
+DO_SHRNB(sve2_uqrshrnb_d, uint64_t, uint32_t, DO_UQRSHRN_D)
+
+DO_SHRNT(sve2_uqrshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQRSHRN_H)
+DO_SHRNT(sve2_uqrshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQRSHRN_S)
+DO_SHRNT(sve2_uqrshrnt_d, uint64_t, uint32_t, , H1_4, DO_UQRSHRN_D)
+
#undef DO_SHRNB
#undef DO_SHRNT
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index fe247d758c..6a3c34a4fc 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -6957,6 +6957,99 @@ static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz
*a)
return do_sve2_shr_narrow(s, a, ops);
}
+static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+
+ tcg_gen_shri_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_umin_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_UQSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shri_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_uqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_uqshrnb_h,
+ .vece = MO_16 },
+ { .fniv = gen_uqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_uqshrnb_s,
+ .vece = MO_32 },
+ { .fniv = gen_uqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_uqshrnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+
+ tcg_gen_shri_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_UQSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_uqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqshrnt_h,
+ .vece = MO_16 },
+ { .fniv = gen_uqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqshrnt_s,
+ .vece = MO_32 },
+ { .fniv = gen_uqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqshrnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_UQRSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_uqrshrnb_h },
+ { .fno = gen_helper_sve2_uqrshrnb_s },
+ { .fno = gen_helper_sve2_uqrshrnb_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_uqrshrnt_h },
+ { .fno = gen_helper_sve2_uqrshrnt_s },
+ { .fno = gen_helper_sve2_uqrshrnt_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
gen_helper_gvec_4_ptr *fn)
{
--
2.25.1
- [PATCH v4 29/78] target/arm: Implement SVE2 SQSHRN, SQRSHRN, (continued)
- [PATCH v4 29/78] target/arm: Implement SVE2 SQSHRN, SQRSHRN, Richard Henderson, 2021/03/09
- [PATCH v4 15/78] target/arm: Implement SVE2 bitwise shift left long, Richard Henderson, 2021/03/09
- [PATCH v4 30/78] target/arm: Implement SVE2 WHILEGT, WHILEGE, WHILEHI, WHILEHS, Richard Henderson, 2021/03/09
- [PATCH v4 32/78] target/arm: Implement SVE2 bitwise ternary operations, Richard Henderson, 2021/03/09
- [PATCH v4 34/78] target/arm: Implement SVE2 saturating multiply-add long, Richard Henderson, 2021/03/09
- [PATCH v4 22/78] target/arm: Implement SVE2 bitwise shift and insert, Richard Henderson, 2021/03/09
- [PATCH v4 21/78] target/arm: Implement SVE2 bitwise shift right and accumulate, Richard Henderson, 2021/03/09
- [PATCH v4 37/78] target/arm: Implement SVE2 complex integer multiply-add, Richard Henderson, 2021/03/09
- [PATCH v4 26/78] target/arm: Implement SVE2 SHRN, RSHRN, Richard Henderson, 2021/03/09
- [PATCH v4 35/78] target/arm: Implement SVE2 saturating multiply-add high, Richard Henderson, 2021/03/09
- [PATCH v4 28/78] target/arm: Implement SVE2 UQSHRN, UQRSHRN,
Richard Henderson <=
- [PATCH v4 31/78] target/arm: Implement SVE2 WHILERW, WHILEWR, Richard Henderson, 2021/03/09
- [PATCH v4 33/78] target/arm: Implement SVE2 MATCH, NMATCH, Richard Henderson, 2021/03/09
- [PATCH v4 36/78] target/arm: Implement SVE2 integer multiply-add long, Richard Henderson, 2021/03/09
- [PATCH v4 39/78] target/arm: Implement SVE2 RADDHNB, RADDHNT, Richard Henderson, 2021/03/09
- [PATCH v4 38/78] target/arm: Implement SVE2 ADDHNB, ADDHNT, Richard Henderson, 2021/03/09
- [PATCH v4 40/78] target/arm: Implement SVE2 SUBHNB, SUBHNT, Richard Henderson, 2021/03/09
- [PATCH v4 41/78] target/arm: Implement SVE2 RSUBHNB, RSUBHNT, Richard Henderson, 2021/03/09
- [PATCH v4 42/78] target/arm: Implement SVE2 HISTCNT, HISTSEG, Richard Henderson, 2021/03/09
- [PATCH v4 43/78] target/arm: Implement SVE2 XAR, Richard Henderson, 2021/03/09
- [PATCH v4 44/78] target/arm: Implement SVE2 scatter store insns, Richard Henderson, 2021/03/09