[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 62/65] target/riscv: Add vector slide instructions for XTheadVect
From: |
Huang Tao |
Subject: |
[PATCH 62/65] target/riscv: Add vector slide instructions for XTheadVector |
Date: |
Fri, 12 Apr 2024 15:37:32 +0800 |
The instructions have the same function as RVV1.0. Overall there are only
general differences between XTheadVector and RVV1.0.
Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
---
target/riscv/helper.h | 17 +++
.../riscv/insn_trans/trans_xtheadvector.c.inc | 25 +++-
target/riscv/xtheadvector_helper.c | 123 ++++++++++++++++++
3 files changed, 159 insertions(+), 6 deletions(-)
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index fe264621ff..6ce0bcbba7 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -2316,3 +2316,20 @@ DEF_HELPER_4(th_vid_v_b, void, ptr, ptr, env, i32)
DEF_HELPER_4(th_vid_v_h, void, ptr, ptr, env, i32)
DEF_HELPER_4(th_vid_v_w, void, ptr, ptr, env, i32)
DEF_HELPER_4(th_vid_v_d, void, ptr, ptr, env, i32)
+
+DEF_HELPER_6(th_vslideup_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslideup_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslideup_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslideup_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslidedown_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslidedown_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslidedown_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslidedown_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslide1up_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslide1up_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslide1up_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslide1up_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslide1down_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32)
diff --git a/target/riscv/insn_trans/trans_xtheadvector.c.inc
b/target/riscv/insn_trans/trans_xtheadvector.c.inc
index 54ccd933c0..46cfc51690 100644
--- a/target/riscv/insn_trans/trans_xtheadvector.c.inc
+++ b/target/riscv/insn_trans/trans_xtheadvector.c.inc
@@ -2797,18 +2797,31 @@ static bool trans_th_vfmv_s_f(DisasContext *s,
arg_th_vfmv_s_f *a)
return false;
}
+/* Vector Slide Instructions */
+static bool slideup_check_th(DisasContext *s, arg_rmrr *a)
+{
+ return (require_xtheadvector(s) &&
+ vext_check_isa_ill(s) &&
+ th_check_overlap_mask(s, a->rd, a->vm, true) &&
+ th_check_reg(s, a->rd, false) &&
+ th_check_reg(s, a->rs2, false) &&
+ (a->rd != a->rs2));
+}
+
+GEN_OPIVX_TRANS_TH(th_vslideup_vx, slideup_check_th)
+GEN_OPIVX_TRANS_TH(th_vslide1up_vx, slideup_check_th)
+GEN_OPIVI_TRANS_TH(th_vslideup_vi, IMM_ZX, th_vslideup_vx, slideup_check_th)
+
+GEN_OPIVX_TRANS_TH(th_vslidedown_vx, opivx_check_th)
+GEN_OPIVX_TRANS_TH(th_vslide1down_vx, opivx_check_th)
+GEN_OPIVI_TRANS_TH(th_vslidedown_vi, IMM_ZX, th_vslidedown_vx, opivx_check_th)
+
#define TH_TRANS_STUB(NAME) \
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
{ \
return require_xtheadvector(s); \
}
-TH_TRANS_STUB(th_vslideup_vx)
-TH_TRANS_STUB(th_vslideup_vi)
-TH_TRANS_STUB(th_vslide1up_vx)
-TH_TRANS_STUB(th_vslidedown_vx)
-TH_TRANS_STUB(th_vslidedown_vi)
-TH_TRANS_STUB(th_vslide1down_vx)
TH_TRANS_STUB(th_vrgather_vv)
TH_TRANS_STUB(th_vrgather_vx)
TH_TRANS_STUB(th_vrgather_vi)
diff --git a/target/riscv/xtheadvector_helper.c
b/target/riscv/xtheadvector_helper.c
index 0743d57b12..73a15eb070 100644
--- a/target/riscv/xtheadvector_helper.c
+++ b/target/riscv/xtheadvector_helper.c
@@ -3678,3 +3678,126 @@ GEN_TH_VID_V(th_vid_v_b, uint8_t, H1, clearb_th)
GEN_TH_VID_V(th_vid_v_h, uint16_t, H2, clearh_th)
GEN_TH_VID_V(th_vid_v_w, uint32_t, H4, clearl_th)
GEN_TH_VID_V(th_vid_v_d, uint64_t, H8, clearq_th)
+
+/*
+ * Vector Permutation Instructions
+ */
+
+/* Vector Slide Instructions */
+#define GEN_TH_VSLIDEUP_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = th_mlen(desc); \
+ uint32_t vlmax = (env_archcpu(env)->cfg.vlenb << 3) / mlen; \
+ uint32_t vm = th_vm(desc); \
+ uint32_t vl = env->vl; \
+ target_ulong offset = s1, i_min, i; \
+ \
+ VSTART_CHECK_EARLY_EXIT(env); \
+ i_min = MAX(env->vstart, offset); \
+ for (i = i_min; i < vl; i++) { \
+ if (!vm && !th_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
+ } \
+ env->vstart = 0; \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
+GEN_TH_VSLIDEUP_VX(th_vslideup_vx_b, uint8_t, H1, clearb_th)
+GEN_TH_VSLIDEUP_VX(th_vslideup_vx_h, uint16_t, H2, clearh_th)
+GEN_TH_VSLIDEUP_VX(th_vslideup_vx_w, uint32_t, H4, clearl_th)
+GEN_TH_VSLIDEUP_VX(th_vslideup_vx_d, uint64_t, H8, clearq_th)
+
+#define GEN_TH_VSLIDEDOWN_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = th_mlen(desc); \
+ uint32_t vlmax = (env_archcpu(env)->cfg.vlenb << 3) / mlen; \
+ uint32_t vm = th_vm(desc); \
+ uint32_t vl = env->vl; \
+ target_ulong offset = s1, i; \
+ \
+ VSTART_CHECK_EARLY_EXIT(env); \
+ for (i = env->vstart; i < vl; ++i) { \
+ target_ulong j = i + offset; \
+ if (!vm && !th_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ *((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j)); \
+ } \
+ env->vstart = 0; \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
+GEN_TH_VSLIDEDOWN_VX(th_vslidedown_vx_b, uint8_t, H1, clearb_th)
+GEN_TH_VSLIDEDOWN_VX(th_vslidedown_vx_h, uint16_t, H2, clearh_th)
+GEN_TH_VSLIDEDOWN_VX(th_vslidedown_vx_w, uint32_t, H4, clearl_th)
+GEN_TH_VSLIDEDOWN_VX(th_vslidedown_vx_d, uint64_t, H8, clearq_th)
+
+#define GEN_TH_VSLIDE1UP_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = th_mlen(desc); \
+ uint32_t vlmax = (env_archcpu(env)->cfg.vlenb << 3) / mlen; \
+ uint32_t vm = th_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ VSTART_CHECK_EARLY_EXIT(env); \
+ for (i = env->vstart; i < vl; i++) { \
+ if (!vm && !th_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ if (i == 0) { \
+ *((ETYPE *)vd + H(i)) = s1; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
+ } \
+ } \
+ env->vstart = 0; \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
+GEN_TH_VSLIDE1UP_VX(th_vslide1up_vx_b, uint8_t, H1, clearb_th)
+GEN_TH_VSLIDE1UP_VX(th_vslide1up_vx_h, uint16_t, H2, clearh_th)
+GEN_TH_VSLIDE1UP_VX(th_vslide1up_vx_w, uint32_t, H4, clearl_th)
+GEN_TH_VSLIDE1UP_VX(th_vslide1up_vx_d, uint64_t, H8, clearq_th)
+
+#define GEN_TH_VSLIDE1DOWN_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = th_mlen(desc); \
+ uint32_t vlmax = (env_archcpu(env)->cfg.vlenb << 3) / mlen; \
+ uint32_t vm = th_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ VSTART_CHECK_EARLY_EXIT(env); \
+ for (i = env->vstart; i < vl; i++) { \
+ if (!vm && !th_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ if (i == vl - 1) { \
+ *((ETYPE *)vd + H(i)) = s1; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
+ } \
+ } \
+ env->vstart = 0; \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
+GEN_TH_VSLIDE1DOWN_VX(th_vslide1down_vx_b, uint8_t, H1, clearb_th)
+GEN_TH_VSLIDE1DOWN_VX(th_vslide1down_vx_h, uint16_t, H2, clearh_th)
+GEN_TH_VSLIDE1DOWN_VX(th_vslide1down_vx_w, uint32_t, H4, clearl_th)
+GEN_TH_VSLIDE1DOWN_VX(th_vslide1down_vx_d, uint64_t, H8, clearq_th)
--
2.44.0
- [PATCH 52/65] target/riscv: Add single-width floating-point reduction instructions for XTheadVector, (continued)
- [PATCH 52/65] target/riscv: Add single-width floating-point reduction instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 53/65] target/riscv: Add widening floating-point reduction instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 54/65] target/riscv: Add mask-register logical instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 55/65] target/riscv: Add vector mask population count vmpopc for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 56/65] target/riscv: Add th.vmfirst.m for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 57/65] target/riscv: Add set-X-first mask bit instructrions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 58/65] target/riscv: Add vector iota instruction for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 59/65] target/riscv: Add vector element index instruction for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 60/65] target/riscv: Add integer extract and scalar move instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 61/65] target/riscv: Add floating-point scalar move instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 62/65] target/riscv: Add vector slide instructions for XTheadVector,
Huang Tao <=
- [PATCH 63/65] target/riscv: Add vector register gather instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 64/65] target/riscv: Add vector compress instruction for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 65/65] target/riscv: Enable XTheadVector extension for c906, Huang Tao, 2024/04/12