[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 24/38] target/arm: Convert handle_scalar_simd_shli to decodetree
From: |
Peter Maydell |
Subject: |
[PULL 24/38] target/arm: Convert handle_scalar_simd_shli to decodetree |
Date: |
Thu, 19 Sep 2024 14:10:52 +0100 |
From: Richard Henderson <richard.henderson@linaro.org>
This includes SHL and SLI.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20240912024114.1097832-25-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/tcg/a64.decode | 4 ++++
target/arm/tcg/translate-a64.c | 44 +++++++---------------------------
2 files changed, 13 insertions(+), 35 deletions(-)
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index 6c2362b3bbc..96803fe6e4c 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -1291,6 +1291,7 @@ RSHRN_v 0.00 11110 .... ... 10001 1 ..... .....
@q_shri_s
@shri_d .... ..... 1 ...... ..... . rn:5 rd:5 \
&rri_e esz=3 imm=%neon_rshift_i6
+@shli_d .... ..... 1 imm:6 ..... . rn:5 rd:5 &rri_e esz=3
SSHR_s 0101 11110 .... ... 00000 1 ..... ..... @shri_d
USHR_s 0111 11110 .... ... 00000 1 ..... ..... @shri_d
@@ -1301,3 +1302,6 @@ URSHR_s 0111 11110 .... ... 00100 1 ..... .....
@shri_d
SRSRA_s 0101 11110 .... ... 00110 1 ..... ..... @shri_d
URSRA_s 0111 11110 .... ... 00110 1 ..... ..... @shri_d
SRI_s 0111 11110 .... ... 01000 1 ..... ..... @shri_d
+
+SHL_s 0101 11110 .... ... 01010 1 ..... ..... @shli_d
+SLI_s 0111 11110 .... ... 01010 1 ..... ..... @shli_d
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index efd93a7f234..934746d2f2c 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -7124,6 +7124,11 @@ static void gen_sri_d(TCGv_i64 dst, TCGv_i64 src,
int64_t shift)
}
}
+static void gen_sli_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift)
+{
+ tcg_gen_deposit_i64(dst, dst, src, shift, 64 - shift);
+}
+
static bool do_vec_shift_imm_narrow(DisasContext *s, arg_qrri_e *a,
WideShiftImmFn * const fns[3], MemOp sign)
{
@@ -7201,6 +7206,9 @@ TRANS(SRSRA_s, do_scalar_shift_imm, a, gen_srsra_d, true,
0)
TRANS(URSRA_s, do_scalar_shift_imm, a, gen_ursra_d, true, 0)
TRANS(SRI_s, do_scalar_shift_imm, a, gen_sri_d, true, 0)
+TRANS(SHL_s, do_scalar_shift_imm, a, tcg_gen_shli_i64, false, 0)
+TRANS(SLI_s, do_scalar_shift_imm, a, gen_sli_d, true, 0)
+
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
* shift amount is in range (ie 0..31 or 0..63) and provide the ARM
@@ -9417,38 +9425,6 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res,
TCGv_i64 tcg_src,
}
}
-/* SHL/SLI - Scalar shift left */
-static void handle_scalar_simd_shli(DisasContext *s, bool insert,
- int immh, int immb, int opcode,
- int rn, int rd)
-{
- int size = 32 - clz32(immh) - 1;
- int immhb = immh << 3 | immb;
- int shift = immhb - (8 << size);
- TCGv_i64 tcg_rn;
- TCGv_i64 tcg_rd;
-
- if (!extract32(immh, 3, 1)) {
- unallocated_encoding(s);
- return;
- }
-
- if (!fp_access_check(s)) {
- return;
- }
-
- tcg_rn = read_fp_dreg(s, rn);
- tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
-
- if (insert) {
- tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
- } else {
- tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
- }
-
- write_fp_dreg(s, rd, tcg_rd);
-}
-
/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
* (signed/unsigned) narrowing */
static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
@@ -9900,9 +9876,6 @@ static void disas_simd_scalar_shift_imm(DisasContext *s,
uint32_t insn)
}
switch (opcode) {
- case 0x0a: /* SHL / SLI */
- handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
- break;
case 0x1c: /* SCVTF, UCVTF */
handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
opcode, rn, rd);
@@ -9940,6 +9913,7 @@ static void disas_simd_scalar_shift_imm(DisasContext *s,
uint32_t insn)
case 0x04: /* SRSHR / URSHR */
case 0x06: /* SRSRA / URSRA */
case 0x08: /* SRI */
+ case 0x0a: /* SHL / SLI */
unallocated_encoding(s);
break;
}
--
2.34.1
- [PULL 15/38] target/arm: Fix whitespace near gen_srshr64_i64, (continued)
- [PULL 15/38] target/arm: Fix whitespace near gen_srshr64_i64, Peter Maydell, 2024/09/19
- [PULL 18/38] target/arm: Use {, s}extract in handle_vec_simd_wshli, Peter Maydell, 2024/09/19
- [PULL 20/38] target/arm: Push tcg_rnd into handle_shri_with_rndacc, Peter Maydell, 2024/09/19
- [PULL 19/38] target/arm: Convert SSHLL, USHLL to decodetree, Peter Maydell, 2024/09/19
- [PULL 28/38] target/arm: Convert vector [US]QSHRN, [US]QRSHRN, SQSHRUN to decodetree, Peter Maydell, 2024/09/19
- [PULL 25/38] target/arm: Convert VQSHL, VQSHLU to gvec, Peter Maydell, 2024/09/19
- [PULL 30/38] hw/char/stm32l4x5_usart.c: Enable USART ACK bit response, Peter Maydell, 2024/09/19
- [PULL 27/38] target/arm: Convert SQSHL, UQSHL, SQSHLU (immediate) to decodetree, Peter Maydell, 2024/09/19
- [PULL 23/38] target/arm: Convert handle_scalar_simd_shri to decodetree, Peter Maydell, 2024/09/19
- [PULL 22/38] target/arm: Convert SHRN, RSHRN to decodetree, Peter Maydell, 2024/09/19
- [PULL 24/38] target/arm: Convert handle_scalar_simd_shli to decodetree,
Peter Maydell <=
- [PULL 26/38] target/arm: Widen NeonGenNarrowEnvFn return to 64 bits, Peter Maydell, 2024/09/19
- [PULL 35/38] kvm: Make 'mmap_size' be 'int' in kvm_init_vcpu(), do_kvm_destroy_vcpu(), Peter Maydell, 2024/09/19
- [PULL 33/38] tests: expand timeout information for aarch64/sbsa-ref, Peter Maydell, 2024/09/19
- [PULL 34/38] tests: drop OpenBSD tests for aarch64/sbsa-ref, Peter Maydell, 2024/09/19
- [PULL 38/38] docs/devel: Remove nested-papr.txt, Peter Maydell, 2024/09/19
- [PULL 31/38] tests: use default cpu for aarch64/sbsa-ref, Peter Maydell, 2024/09/19
- [PULL 32/38] tests: add FreeBSD tests for aarch64/sbsa-ref, Peter Maydell, 2024/09/19
- [PULL 37/38] target/arm: Correct ID_AA64ISAR1_EL1 value for neoverse-v1, Peter Maydell, 2024/09/19
- Re: [PULL 00/38] target-arm queue, Peter Maydell, 2024/09/19