qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 25/43] target/loongarch: Implement vsrlr vsrar


From: Song Gao
Subject: [RFC PATCH 25/43] target/loongarch: Implement vsrlr vsrar
Date: Sat, 24 Dec 2022 16:16:15 +0800

This patch includes:
- VSRLR[I].{B/H/W/D};
- VSRAR[I].{B/H/W/D}.

Signed-off-by: Song Gao <gaosong@loongson.cn>
---
 target/loongarch/disas.c                    |  18 +++
 target/loongarch/helper.h                   |  18 +++
 target/loongarch/insn_trans/trans_lsx.c.inc |  18 +++
 target/loongarch/insns.decode               |  18 +++
 target/loongarch/lsx_helper.c               | 124 ++++++++++++++++++++
 5 files changed, 196 insertions(+)

diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c
index 18c4fd521a..766d934705 100644
--- a/target/loongarch/disas.c
+++ b/target/loongarch/disas.c
@@ -1119,3 +1119,21 @@ INSN_LSX(vsllwil_hu_bu,    vv_i)
 INSN_LSX(vsllwil_wu_hu,    vv_i)
 INSN_LSX(vsllwil_du_wu,    vv_i)
 INSN_LSX(vextl_qu_du,      vv)
+
+INSN_LSX(vsrlr_b,          vvv)
+INSN_LSX(vsrlr_h,          vvv)
+INSN_LSX(vsrlr_w,          vvv)
+INSN_LSX(vsrlr_d,          vvv)
+INSN_LSX(vsrlri_b,         vv_i)
+INSN_LSX(vsrlri_h,         vv_i)
+INSN_LSX(vsrlri_w,         vv_i)
+INSN_LSX(vsrlri_d,         vv_i)
+
+INSN_LSX(vsrar_b,          vvv)
+INSN_LSX(vsrar_h,          vvv)
+INSN_LSX(vsrar_w,          vvv)
+INSN_LSX(vsrar_d,          vvv)
+INSN_LSX(vsrari_b,         vv_i)
+INSN_LSX(vsrari_h,         vv_i)
+INSN_LSX(vsrari_w,         vv_i)
+INSN_LSX(vsrari_d,         vv_i)
diff --git a/target/loongarch/helper.h b/target/loongarch/helper.h
index e3ec216b14..65438c00f1 100644
--- a/target/loongarch/helper.h
+++ b/target/loongarch/helper.h
@@ -474,3 +474,21 @@ DEF_HELPER_4(vsllwil_hu_bu, void, env, i32, i32, i32)
 DEF_HELPER_4(vsllwil_wu_hu, void, env, i32, i32, i32)
 DEF_HELPER_4(vsllwil_du_wu, void, env, i32, i32, i32)
 DEF_HELPER_3(vextl_qu_du, void, env, i32, i32)
+
+DEF_HELPER_4(vsrlr_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlr_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlr_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlr_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlri_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlri_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlri_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlri_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vsrar_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrar_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrar_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrar_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrari_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrari_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrari_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrari_d, void, env, i32, i32, i32)
diff --git a/target/loongarch/insn_trans/trans_lsx.c.inc 
b/target/loongarch/insn_trans/trans_lsx.c.inc
index 8193e66fff..9196ec3ed7 100644
--- a/target/loongarch/insn_trans/trans_lsx.c.inc
+++ b/target/loongarch/insn_trans/trans_lsx.c.inc
@@ -390,3 +390,21 @@ TRANS(vsllwil_hu_bu, gen_vv_i, gen_helper_vsllwil_hu_bu)
 TRANS(vsllwil_wu_hu, gen_vv_i, gen_helper_vsllwil_wu_hu)
 TRANS(vsllwil_du_wu, gen_vv_i, gen_helper_vsllwil_du_wu)
 TRANS(vextl_qu_du, gen_vv, gen_helper_vextl_qu_du)
+
+TRANS(vsrlr_b, gen_vvv, gen_helper_vsrlr_b)
+TRANS(vsrlr_h, gen_vvv, gen_helper_vsrlr_h)
+TRANS(vsrlr_w, gen_vvv, gen_helper_vsrlr_w)
+TRANS(vsrlr_d, gen_vvv, gen_helper_vsrlr_d)
+TRANS(vsrlri_b, gen_vv_i, gen_helper_vsrlri_b)
+TRANS(vsrlri_h, gen_vv_i, gen_helper_vsrlri_h)
+TRANS(vsrlri_w, gen_vv_i, gen_helper_vsrlri_w)
+TRANS(vsrlri_d, gen_vv_i, gen_helper_vsrlri_d)
+
+TRANS(vsrar_b, gen_vvv, gen_helper_vsrar_b)
+TRANS(vsrar_h, gen_vvv, gen_helper_vsrar_h)
+TRANS(vsrar_w, gen_vvv, gen_helper_vsrar_w)
+TRANS(vsrar_d, gen_vvv, gen_helper_vsrar_d)
+TRANS(vsrari_b, gen_vv_i, gen_helper_vsrari_b)
+TRANS(vsrari_h, gen_vv_i, gen_helper_vsrari_h)
+TRANS(vsrari_w, gen_vv_i, gen_helper_vsrari_w)
+TRANS(vsrari_d, gen_vv_i, gen_helper_vsrari_d)
diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode
index 29609b834e..eef25e2eef 100644
--- a/target/loongarch/insns.decode
+++ b/target/loongarch/insns.decode
@@ -847,3 +847,21 @@ vsllwil_hu_bu    0111 00110000 11000 01 ... ..... .....   
@vv_ui3
 vsllwil_wu_hu    0111 00110000 11000 1 .... ..... .....   @vv_ui4
 vsllwil_du_wu    0111 00110000 11001 ..... ..... .....    @vv_ui5
 vextl_qu_du      0111 00110000 11010 00000 ..... .....    @vv
+
+vsrlr_b          0111 00001111 00000 ..... ..... .....    @vvv
+vsrlr_h          0111 00001111 00001 ..... ..... .....    @vvv
+vsrlr_w          0111 00001111 00010 ..... ..... .....    @vvv
+vsrlr_d          0111 00001111 00011 ..... ..... .....    @vvv
+vsrlri_b         0111 00101010 01000 01 ... ..... .....   @vv_ui3
+vsrlri_h         0111 00101010 01000 1 .... ..... .....   @vv_ui4
+vsrlri_w         0111 00101010 01001 ..... ..... .....    @vv_ui5
+vsrlri_d         0111 00101010 0101 ...... ..... .....    @vv_ui6
+
+vsrar_b          0111 00001111 00100 ..... ..... .....    @vvv
+vsrar_h          0111 00001111 00101 ..... ..... .....    @vvv
+vsrar_w          0111 00001111 00110 ..... ..... .....    @vvv
+vsrar_d          0111 00001111 00111 ..... ..... .....    @vvv
+vsrari_b         0111 00101010 10000 01 ... ..... .....   @vv_ui3
+vsrari_h         0111 00101010 10000 1 .... ..... .....   @vv_ui4
+vsrari_w         0111 00101010 10001 ..... ..... .....    @vv_ui5
+vsrari_d         0111 00101010 1001 ...... ..... .....    @vv_ui6
diff --git a/target/loongarch/lsx_helper.c b/target/loongarch/lsx_helper.c
index 91c1964d81..529a81372b 100644
--- a/target/loongarch/lsx_helper.c
+++ b/target/loongarch/lsx_helper.c
@@ -2258,3 +2258,127 @@ DO_HELPER_VV_I(vsllwil_hu_bu, 16, helper_vv_i_c, 
do_vsllwil_u)
 DO_HELPER_VV_I(vsllwil_wu_hu, 32, helper_vv_i_c, do_vsllwil_u)
 DO_HELPER_VV_I(vsllwil_du_wu, 64, helper_vv_i_c, do_vsllwil_u)
 DO_HELPER_VV(vextl_qu_du, 128, helper_vv, do_vextl_qu_du)
+
+static int64_t vsrlr(int64_t s1, int64_t s2, int bit)
+{
+    uint64_t umax = MAKE_64BIT_MASK(0, bit);
+    uint64_t u1 = s1 & umax;
+    int32_t n = (uint64_t)(s2 % bit);
+
+    if (n == 0) {
+        return u1;
+    } else {
+        uint64_t r_bit = (u1 >> (n  -1)) & 1;
+        return (u1 >> n) + r_bit;
+    }
+}
+
+static void do_vsrlr(vec_t *Vd, vec_t *Vj, vec_t *Vk, int bit, int n)
+{
+    switch (bit) {
+    case 8:
+        Vd->B[n] = vsrlr(Vj->B[n], Vk->B[n], bit);
+        break;
+    case 16:
+        Vd->H[n] = vsrlr(Vj->H[n], Vk->H[n], bit);
+        break;
+    case 32:
+        Vd->W[n] = vsrlr(Vj->W[n], Vk->W[n], bit);
+        break;
+    case 64:
+        Vd->D[n] = vsrlr(Vj->D[n], Vk->D[n], bit);
+        break;
+    default:
+        g_assert_not_reached();
+    }
+}
+
+static void do_vsrlri(vec_t *Vd, vec_t *Vj, uint32_t imm, int bit, int n)
+{
+    switch (bit) {
+    case 8:
+        Vd->B[n] = vsrlr(Vj->B[n], imm, bit);
+        break;
+    case 16:
+        Vd->H[n] = vsrlr(Vj->H[n], imm, bit);
+        break;
+    case 32:
+        Vd->W[n] = vsrlr(Vj->W[n], imm, bit);
+        break;
+    case 64:
+        Vd->D[n] = vsrlr(Vj->D[n], imm, bit);
+        break;
+    default:
+        g_assert_not_reached();
+    }
+}
+
+DO_HELPER_VVV(vsrlr_b, 8, helper_vvv, do_vsrlr)
+DO_HELPER_VVV(vsrlr_h, 16, helper_vvv, do_vsrlr)
+DO_HELPER_VVV(vsrlr_w, 32, helper_vvv, do_vsrlr)
+DO_HELPER_VVV(vsrlr_d, 64, helper_vvv, do_vsrlr)
+DO_HELPER_VVV(vsrlri_b, 8, helper_vv_i, do_vsrlri)
+DO_HELPER_VVV(vsrlri_h, 16, helper_vv_i, do_vsrlri)
+DO_HELPER_VVV(vsrlri_w, 32, helper_vv_i, do_vsrlri)
+DO_HELPER_VVV(vsrlri_d, 64, helper_vv_i, do_vsrlri)
+
+static int64_t vsrar(int64_t s1, int64_t s2, int bit)
+{
+    int32_t n = (uint64_t)(s2 % bit);
+
+    if (n == 0) {
+        return s1;
+    } else {
+        uint64_t r_bit = (s1 >> (n  -1)) & 1;
+        return (s1 >> n) + r_bit;
+    }
+}
+
+static void do_vsrar(vec_t *Vd, vec_t *Vj, vec_t *Vk, int bit, int n)
+{
+    switch (bit) {
+    case 8:
+        Vd->B[n] = vsrar(Vj->B[n], Vk->B[n], bit);
+        break;
+    case 16:
+        Vd->H[n] = vsrar(Vj->H[n], Vk->H[n], bit);
+        break;
+    case 32:
+        Vd->W[n] = vsrar(Vj->W[n], Vk->W[n], bit);
+        break;
+    case 64:
+        Vd->D[n] = vsrar(Vj->D[n], Vk->D[n], bit);
+        break;
+    default:
+        g_assert_not_reached();
+    }
+}
+
+static void do_vsrari(vec_t *Vd, vec_t *Vj, uint32_t imm, int bit, int n)
+{
+    switch (bit) {
+    case 8:
+        Vd->B[n] = vsrar(Vj->B[n], imm, bit);
+        break;
+    case 16:
+        Vd->H[n] = vsrar(Vj->H[n], imm, bit);
+        break;
+    case 32:
+        Vd->W[n] = vsrar(Vj->W[n], imm, bit);
+        break;
+    case 64:
+        Vd->D[n] = vsrar(Vj->D[n], imm, bit);
+        break;
+    default:
+        g_assert_not_reached();
+    }
+}
+
+DO_HELPER_VVV(vsrar_b, 8, helper_vvv, do_vsrar)
+DO_HELPER_VVV(vsrar_h, 16, helper_vvv, do_vsrar)
+DO_HELPER_VVV(vsrar_w, 32, helper_vvv, do_vsrar)
+DO_HELPER_VVV(vsrar_d, 64, helper_vvv, do_vsrar)
+DO_HELPER_VVV(vsrari_b, 8, helper_vv_i, do_vsrari)
+DO_HELPER_VVV(vsrari_h, 16, helper_vv_i, do_vsrari)
+DO_HELPER_VVV(vsrari_w, 32, helper_vv_i, do_vsrari)
+DO_HELPER_VVV(vsrari_d, 64, helper_vv_i, do_vsrari)
-- 
2.31.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]