[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 09/17] target/arm: Convert MOVI, FMOV, ORR, BIC (vector immediate
From: |
Richard Henderson |
Subject: |
[PATCH 09/17] target/arm: Convert MOVI, FMOV, ORR, BIC (vector immediate) to decodetree |
Date: |
Wed, 17 Jul 2024 16:08:55 +1000 |
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/tcg/translate-a64.c | 117 ++++++++++++++-------------------
target/arm/tcg/a64.decode | 9 +++
2 files changed, 59 insertions(+), 67 deletions(-)
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 6582816e4e..1fa9dc3172 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -6872,6 +6872,52 @@ static bool trans_FMOVI_s(DisasContext *s, arg_FMOVI_s
*a)
return true;
}
+/*
+ * Advanced SIMD Modified Immediate
+ */
+
+static bool trans_FMOVI_v_h(DisasContext *s, arg_FMOVI_v_h *a)
+{
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ tcg_gen_gvec_dup_imm(MO_16, vec_full_reg_offset(s, a->rd),
+ a->q ? 16 : 8, vec_full_reg_size(s),
+ vfp_expand_imm(MO_16, a->abcdefgh));
+ }
+ return true;
+}
+
+static void gen_movi(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, c);
+}
+
+static bool trans_Vimm(DisasContext *s, arg_Vimm *a)
+{
+ GVecGen2iFn *fn;
+
+ /* Handle decode of cmode/op here between ORR/BIC/MOVI */
+ if ((a->cmode & 1) && a->cmode < 12) {
+ /* For op=1, the imm will be inverted, so BIC becomes AND. */
+ fn = a->op ? tcg_gen_gvec_andi : tcg_gen_gvec_ori;
+ } else {
+ /* There is one unallocated cmode/op combination in this space */
+ if (a->cmode == 15 && a->op == 1 && a->q == 0) {
+ return false;
+ }
+ fn = gen_movi;
+ }
+
+ if (fp_access_check(s)) {
+ uint64_t imm = asimd_imm_const(a->abcdefgh, a->cmode, a->op);
+ gen_gvec_fn2i(s, a->q, a->rd, a->rd, imm, fn, MO_64);
+ }
+ return true;
+}
+
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
* shift amount is in range (ie 0..31 or 0..63) and provide the ARM
@@ -9051,69 +9097,6 @@ static void disas_data_proc_fp(DisasContext *s, uint32_t
insn)
}
}
-/* AdvSIMD modified immediate
- * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
- * +---+---+----+---------------------+-----+-------+----+---+-------+------+
- * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
- * +---+---+----+---------------------+-----+-------+----+---+-------+------+
- *
- * There are a number of operations that can be carried out here:
- * MOVI - move (shifted) imm into register
- * MVNI - move inverted (shifted) imm into register
- * ORR - bitwise OR of (shifted) imm with register
- * BIC - bitwise clear of (shifted) imm with register
- * With ARMv8.2 we also have:
- * FMOV half-precision
- */
-static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
-{
- int rd = extract32(insn, 0, 5);
- int cmode = extract32(insn, 12, 4);
- int o2 = extract32(insn, 11, 1);
- uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
- bool is_neg = extract32(insn, 29, 1);
- bool is_q = extract32(insn, 30, 1);
- uint64_t imm = 0;
-
- if (o2) {
- if (cmode != 0xf || is_neg) {
- unallocated_encoding(s);
- return;
- }
- /* FMOV (vector, immediate) - half-precision */
- if (!dc_isar_feature(aa64_fp16, s)) {
- unallocated_encoding(s);
- return;
- }
- imm = vfp_expand_imm(MO_16, abcdefgh);
- /* now duplicate across the lanes */
- imm = dup_const(MO_16, imm);
- } else {
- if (cmode == 0xf && is_neg && !is_q) {
- unallocated_encoding(s);
- return;
- }
- imm = asimd_imm_const(abcdefgh, cmode, is_neg);
- }
-
- if (!fp_access_check(s)) {
- return;
- }
-
- if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
- /* MOVI or MVNI, with MVNI negation handled above. */
- tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
- vec_full_reg_size(s), imm);
- } else {
- /* ORR or BIC, with BIC negation to AND handled above. */
- if (is_neg) {
- gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
- } else {
- gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
- }
- }
-}
-
/*
* Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
*
@@ -10593,8 +10576,10 @@ static void disas_simd_shift_imm(DisasContext *s,
uint32_t insn)
bool is_u = extract32(insn, 29, 1);
bool is_q = extract32(insn, 30, 1);
- /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
- assert(immh != 0);
+ if (immh == 0) {
+ unallocated_encoding(s);
+ return;
+ }
switch (opcode) {
case 0x08: /* SRI */
@@ -11602,8 +11587,6 @@ static void disas_simd_two_reg_misc_fp16(DisasContext
*s, uint32_t insn)
static const AArch64DecodeTable data_proc_simd[] = {
/* pattern , mask , fn */
{ 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
- /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
- { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
{ 0x0f000400, 0x9f800400, disas_simd_shift_imm },
{ 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
{ 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index de763d3f12..d4dfc5f772 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -1184,3 +1184,12 @@ FMINV_s 0110 1110 10 11000 01111 10 ..... .....
@rr_q1e2
# Floating-point Immediate
FMOVI_s 0001 1110 .. 1 imm:8 100 00000 rd:5 esz=%esz_hsd
+
+# Advanced SIMD Modified Immediate
+
+%abcdefgh 16:3 5:5
+
+FMOVI_v_h 0 q:1 00 1111 00000 ... 1111 11 ..... rd:5 %abcdefgh
+
+# MOVI, MVNI, ORR, BIC, FMOV are all intermixed via cmode.
+Vimm 0 q:1 op:1 0 1111 00000 ... cmode:4 01 ..... rd:5 %abcdefgh
--
2.43.0
- [PATCH 00/17] target/arm: AdvSIMD decodetree conversion, part 4, Richard Henderson, 2024/07/17
- [PATCH 01/17] target/arm: Use tcg_gen_extract2_i64 for EXT, Richard Henderson, 2024/07/17
- [PATCH 02/17] target/arm: Convert EXT to decodetree, Richard Henderson, 2024/07/17
- [PATCH 03/17] target/arm: Convert TBL, TBX to decodetree, Richard Henderson, 2024/07/17
- [PATCH 04/17] target/arm: Convert UZP, TRN, ZIP to decodetree, Richard Henderson, 2024/07/17
- [PATCH 05/17] target/arm: Simplify do_reduction_op, Richard Henderson, 2024/07/17
- [PATCH 07/17] target/arm: Convert FMAXNMV, FMINNMV, FMAXV, FMINV to decodetree, Richard Henderson, 2024/07/17
- [PATCH 09/17] target/arm: Convert MOVI, FMOV, ORR, BIC (vector immediate) to decodetree,
Richard Henderson <=
- [PATCH 06/17] target/arm: Convert ADDV, *ADDLV, *MAXV, *MINV to decodetree, Richard Henderson, 2024/07/17
- [PATCH 08/17] target/arm: Convert FMOVI (scalar, immediate) to decodetree, Richard Henderson, 2024/07/17
- [PATCH 10/17] target/arm: Introduce gen_gvec_sshr, gen_gvec_ushr, Richard Henderson, 2024/07/17
- [PATCH 12/17] target/arm: Convert handle_vec_simd_shri to decodetree, Richard Henderson, 2024/07/17
- [PATCH 13/17] target/arm: Convet handle_vec_simd_shli to decodetree, Richard Henderson, 2024/07/17
- [PATCH 11/17] target/arm: Fix whitespace near gen_srshr64_i64, Richard Henderson, 2024/07/17
- [PATCH 14/17] target/arm: Clear high SVE elements in handle_vec_simd_wshli, Richard Henderson, 2024/07/17
- [PATCH 15/17] target/arm: Use {,s}extract in handle_vec_simd_wshli, Richard Henderson, 2024/07/17
- Prev by Date:
[PATCH 07/17] target/arm: Convert FMAXNMV, FMINNMV, FMAXV, FMINV to decodetree
- Next by Date:
[PATCH 06/17] target/arm: Convert ADDV, *ADDLV, *MAXV, *MINV to decodetree
- Previous by thread:
[PATCH 07/17] target/arm: Convert FMAXNMV, FMINNMV, FMAXV, FMINV to decodetree
- Next by thread:
[PATCH 06/17] target/arm: Convert ADDV, *ADDLV, *MAXV, *MINV to decodetree
- Index(es):