[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PULL 34/45] target/arm: Use gvec for VSRA
From: |
Peter Maydell |
Subject: |
[Qemu-devel] [PULL 34/45] target/arm: Use gvec for VSRA |
Date: |
Fri, 19 Oct 2018 17:57:24 +0100 |
From: Richard Henderson <address@hidden>
Move ssra_op and usra_op expanders from translate-a64.c.
Signed-off-by: Richard Henderson <address@hidden>
Message-id: address@hidden
Reviewed-by: Peter Maydell <address@hidden>
Signed-off-by: Peter Maydell <address@hidden>
---
target/arm/translate.h | 2 +
target/arm/translate-a64.c | 106 ----------------------------
target/arm/translate.c | 139 ++++++++++++++++++++++++++++++++++---
3 files changed, 130 insertions(+), 117 deletions(-)
diff --git a/target/arm/translate.h b/target/arm/translate.h
index dea59c7214d..5e13571b362 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -196,6 +196,8 @@ static inline TCGv_i32 get_ahp_flag(void)
extern const GVecGen3 bsl_op;
extern const GVecGen3 bit_op;
extern const GVecGen3 bif_op;
+extern const GVecGen2i ssra_op[4];
+extern const GVecGen2i usra_op[4];
/*
* Forward to the isar_feature_* tests given a DisasContext pointer.
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 384bcbbb00c..6d11e384898 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -9392,66 +9392,6 @@ static void disas_simd_scalar_two_reg_misc(DisasContext
*s, uint32_t insn)
}
}
-static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_vec_sar8i_i64(a, a, shift);
- tcg_gen_vec_add8_i64(d, d, a);
-}
-
-static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_vec_sar16i_i64(a, a, shift);
- tcg_gen_vec_add16_i64(d, d, a);
-}
-
-static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
-{
- tcg_gen_sari_i32(a, a, shift);
- tcg_gen_add_i32(d, d, a);
-}
-
-static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_sari_i64(a, a, shift);
- tcg_gen_add_i64(d, d, a);
-}
-
-static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
-{
- tcg_gen_sari_vec(vece, a, a, sh);
- tcg_gen_add_vec(vece, d, d, a);
-}
-
-static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_vec_shr8i_i64(a, a, shift);
- tcg_gen_vec_add8_i64(d, d, a);
-}
-
-static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_vec_shr16i_i64(a, a, shift);
- tcg_gen_vec_add16_i64(d, d, a);
-}
-
-static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
-{
- tcg_gen_shri_i32(a, a, shift);
- tcg_gen_add_i32(d, d, a);
-}
-
-static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_shri_i64(a, a, shift);
- tcg_gen_add_i64(d, d, a);
-}
-
-static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
-{
- tcg_gen_shri_vec(vece, a, a, sh);
- tcg_gen_add_vec(vece, d, d, a);
-}
-
static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
{
uint64_t mask = dup_const(MO_8, 0xff >> shift);
@@ -9507,52 +9447,6 @@ static void gen_shr_ins_vec(unsigned vece, TCGv_vec d,
TCGv_vec a, int64_t sh)
static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
int immh, int immb, int opcode, int rn, int
rd)
{
- static const GVecGen2i ssra_op[4] = {
- { .fni8 = gen_ssra8_i64,
- .fniv = gen_ssra_vec,
- .load_dest = true,
- .opc = INDEX_op_sari_vec,
- .vece = MO_8 },
- { .fni8 = gen_ssra16_i64,
- .fniv = gen_ssra_vec,
- .load_dest = true,
- .opc = INDEX_op_sari_vec,
- .vece = MO_16 },
- { .fni4 = gen_ssra32_i32,
- .fniv = gen_ssra_vec,
- .load_dest = true,
- .opc = INDEX_op_sari_vec,
- .vece = MO_32 },
- { .fni8 = gen_ssra64_i64,
- .fniv = gen_ssra_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .opc = INDEX_op_sari_vec,
- .vece = MO_64 },
- };
- static const GVecGen2i usra_op[4] = {
- { .fni8 = gen_usra8_i64,
- .fniv = gen_usra_vec,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_8, },
- { .fni8 = gen_usra16_i64,
- .fniv = gen_usra_vec,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_16, },
- { .fni4 = gen_usra32_i32,
- .fniv = gen_usra_vec,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_32, },
- { .fni8 = gen_usra64_i64,
- .fniv = gen_usra_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_64, },
- };
static const GVecGen2i sri_op[4] = {
{ .fni8 = gen_shr8_ins_i64,
.fniv = gen_shr_ins_vec,
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 2d715d9b47b..b3b2ef93f4d 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -5770,6 +5770,113 @@ const GVecGen3 bif_op = {
.load_dest = true
};
+static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_sar8i_i64(a, a, shift);
+ tcg_gen_vec_add8_i64(d, d, a);
+}
+
+static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_sar16i_i64(a, a, shift);
+ tcg_gen_vec_add16_i64(d, d, a);
+}
+
+static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+ tcg_gen_sari_i32(a, a, shift);
+ tcg_gen_add_i32(d, d, a);
+}
+
+static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_sari_i64(a, a, shift);
+ tcg_gen_add_i64(d, d, a);
+}
+
+static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ tcg_gen_sari_vec(vece, a, a, sh);
+ tcg_gen_add_vec(vece, d, d, a);
+}
+
+const GVecGen2i ssra_op[4] = {
+ { .fni8 = gen_ssra8_i64,
+ .fniv = gen_ssra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_sari_vec,
+ .vece = MO_8 },
+ { .fni8 = gen_ssra16_i64,
+ .fniv = gen_ssra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_sari_vec,
+ .vece = MO_16 },
+ { .fni4 = gen_ssra32_i32,
+ .fniv = gen_ssra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_sari_vec,
+ .vece = MO_32 },
+ { .fni8 = gen_ssra64_i64,
+ .fniv = gen_ssra_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opc = INDEX_op_sari_vec,
+ .vece = MO_64 },
+};
+
+static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_shr8i_i64(a, a, shift);
+ tcg_gen_vec_add8_i64(d, d, a);
+}
+
+static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_shr16i_i64(a, a, shift);
+ tcg_gen_vec_add16_i64(d, d, a);
+}
+
+static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+ tcg_gen_shri_i32(a, a, shift);
+ tcg_gen_add_i32(d, d, a);
+}
+
+static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_shri_i64(a, a, shift);
+ tcg_gen_add_i64(d, d, a);
+}
+
+static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ tcg_gen_shri_vec(vece, a, a, sh);
+ tcg_gen_add_vec(vece, d, d, a);
+}
+
+const GVecGen2i usra_op[4] = {
+ { .fni8 = gen_usra8_i64,
+ .fniv = gen_usra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_8, },
+ { .fni8 = gen_usra16_i64,
+ .fniv = gen_usra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_16, },
+ { .fni4 = gen_usra32_i32,
+ .fniv = gen_usra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_32, },
+ { .fni8 = gen_usra64_i64,
+ .fniv = gen_usra_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_64, },
+};
/* Translate a NEON data processing instruction. Return nonzero if the
instruction is invalid.
@@ -6408,6 +6515,25 @@ static int disas_neon_data_insn(DisasContext *s,
uint32_t insn)
}
return 0;
+ case 1: /* VSRA */
+ /* Right shift comes here negative. */
+ shift = -shift;
+ /* Shifts larger than the element size are architecturally
+ * valid. Unsigned results in all zeros; signed results
+ * in all sign bits.
+ */
+ if (!u) {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
+ MIN(shift, (8 << size) - 1),
+ &ssra_op[size]);
+ } else if (shift >= 8 << size) {
+ /* rd += 0 */
+ } else {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
+ shift, &usra_op[size]);
+ }
+ return 0;
+
case 5: /* VSHL, VSLI */
if (!u) { /* VSHL */
/* Shifts larger than the element size are
@@ -6440,12 +6566,6 @@ static int disas_neon_data_insn(DisasContext *s,
uint32_t insn)
neon_load_reg64(cpu_V0, rm + pass);
tcg_gen_movi_i64(cpu_V1, imm);
switch (op) {
- case 1: /* VSRA */
- if (u)
- gen_helper_neon_shl_u64(cpu_V0, cpu_V0,
cpu_V1);
- else
- gen_helper_neon_shl_s64(cpu_V0, cpu_V0,
cpu_V1);
- break;
case 2: /* VRSHR */
case 3: /* VRSRA */
if (u)
@@ -6473,7 +6593,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t
insn)
default:
g_assert_not_reached();
}
- if (op == 1 || op == 3) {
+ if (op == 3) {
/* Accumulate. */
neon_load_reg64(cpu_V1, rd + pass);
tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
@@ -6500,9 +6620,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t
insn)
tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, imm);
switch (op) {
- case 1: /* VSRA */
- GEN_NEON_INTEGER_OP(shl);
- break;
case 2: /* VRSHR */
case 3: /* VRSRA */
GEN_NEON_INTEGER_OP(rshl);
@@ -6542,7 +6659,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t
insn)
}
tcg_temp_free_i32(tmp2);
- if (op == 1 || op == 3) {
+ if (op == 3) {
/* Accumulate. */
tmp2 = neon_load_reg(rd, pass);
gen_neon_add(size, tmp, tmp2);
--
2.19.1
- [Qemu-devel] [PULL 35/45] target/arm: Use gvec for VSRI, VSLI, (continued)
- [Qemu-devel] [PULL 35/45] target/arm: Use gvec for VSRI, VSLI, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 38/45] target/arm: Use gvec for NEON VLD all lanes, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 45/45] target/arm: Only flush tlb if ASID changes, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 44/45] target/arm: Remove writefn from TTBR0_EL3, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 43/45] net: cadence_gem: Announce 64bit addressing support, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 42/45] net: cadence_gem: Announce availability of priority queues, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 41/45] target/arm: Reorg NEON VLD/VST single element to one lane, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 40/45] target/arm: Promote consecutive memory ops for aa32, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 39/45] target/arm: Reorg NEON VLD/VST all elements, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 37/45] target/arm: Use gvec for NEON_3R_VTST_VCEQ, NEON_3R_VCGT, NEON_3R_VCGE, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 34/45] target/arm: Use gvec for VSRA,
Peter Maydell <=
- [Qemu-devel] [PULL 33/45] target/arm: Use gvec for VSHR, VSHL, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 32/45] target/arm: Use gvec for NEON_3R_VMUL, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 31/45] target/arm: Use gvec for NEON_2RM_VMN, NEON_2RM_VNEG, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 30/45] target/arm: Use gvec for NEON_3R_VADD_VSUB insns, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 29/45] target/arm: Use gvec for NEON_3R_LOGIC insns, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 27/45] target/arm: Use gvec for NEON VDUP, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 28/45] target/arm: Use gvec for NEON VMOV, VMVN, VBIC & VORR (immediate), Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 26/45] target/arm: Mark some arrays const, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 25/45] target/arm: Promote consecutive memory ops for aa64, Peter Maydell, 2018/10/19
- [Qemu-devel] [PULL 24/45] target/arm: Use tcg_gen_gvec_dup_i64 for LD[1-4]R, Peter Maydell, 2018/10/19