[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 24/65] target/riscv: Add single-width integer multiply instructio
From: |
Huang Tao |
Subject: |
[PATCH 24/65] target/riscv: Add single-width integer multiply instructions for XTheadVector |
Date: |
Fri, 12 Apr 2024 15:36:54 +0800 |
The instructions have the same function as RVV1.0. Overall there are only
general differences between XTheadVector and RVV1.0.
Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
---
target/riscv/helper.h | 33 +++++++++
.../riscv/insn_trans/trans_xtheadvector.c.inc | 18 ++---
target/riscv/vector_helper.c | 28 ++++----
target/riscv/vector_internals.h | 17 +++++
target/riscv/xtheadvector_helper.c | 69 +++++++++++++++++++
5 files changed, 141 insertions(+), 24 deletions(-)
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index f3e4ab0f1f..e678dd5385 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1728,3 +1728,36 @@ DEF_HELPER_6(th_vmax_vx_b, void, ptr, ptr, tl, ptr, env,
i32)
DEF_HELPER_6(th_vmax_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(th_vmax_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(th_vmax_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(th_vmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmul_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulh_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulh_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulh_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulh_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulhu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulhu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulhu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulhu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulhsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulhsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulhsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmulhsu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vmul_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmul_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmul_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmul_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulh_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulh_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulh_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulh_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulhu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulhu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulhu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulhu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulhsu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulhsu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulhsu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vmulhsu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
diff --git a/target/riscv/insn_trans/trans_xtheadvector.c.inc
b/target/riscv/insn_trans/trans_xtheadvector.c.inc
index f19a771b61..15f36ba98a 100644
--- a/target/riscv/insn_trans/trans_xtheadvector.c.inc
+++ b/target/riscv/insn_trans/trans_xtheadvector.c.inc
@@ -1529,20 +1529,22 @@ GEN_OPIVX_TRANS_TH(th_vmin_vx, opivx_check_th)
GEN_OPIVX_TRANS_TH(th_vmaxu_vx, opivx_check_th)
GEN_OPIVX_TRANS_TH(th_vmax_vx, opivx_check_th)
+/* Vector Single-Width Integer Multiply Instructions */
+GEN_OPIVV_GVEC_TRANS_TH(th_vmul_vv, mul)
+GEN_OPIVV_TRANS_TH(th_vmulh_vv, opivv_check_th)
+GEN_OPIVV_TRANS_TH(th_vmulhu_vv, opivv_check_th)
+GEN_OPIVV_TRANS_TH(th_vmulhsu_vv, opivv_check_th)
+GEN_OPIVX_GVEC_TRANS_TH(th_vmul_vx, muls)
+GEN_OPIVX_TRANS_TH(th_vmulh_vx, opivx_check_th)
+GEN_OPIVX_TRANS_TH(th_vmulhu_vx, opivx_check_th)
+GEN_OPIVX_TRANS_TH(th_vmulhsu_vx, opivx_check_th)
+
#define TH_TRANS_STUB(NAME) \
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
{ \
return require_xtheadvector(s); \
}
-TH_TRANS_STUB(th_vmul_vv)
-TH_TRANS_STUB(th_vmul_vx)
-TH_TRANS_STUB(th_vmulh_vv)
-TH_TRANS_STUB(th_vmulh_vx)
-TH_TRANS_STUB(th_vmulhu_vv)
-TH_TRANS_STUB(th_vmulhu_vx)
-TH_TRANS_STUB(th_vmulhsu_vv)
-TH_TRANS_STUB(th_vmulhsu_vx)
TH_TRANS_STUB(th_vdivu_vv)
TH_TRANS_STUB(th_vdivu_vx)
TH_TRANS_STUB(th_vdiv_vv)
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 9774fc62c3..5aba3f238f 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -647,10 +647,6 @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
*/
/* (TD, T1, T2, TX1, TX2) */
-#define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t
-#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
-#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
-#define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
#define WOP_SUS_B int16_t, uint8_t, int8_t, uint16_t, int16_t
#define WOP_SUS_H int32_t, uint16_t, int16_t, uint32_t, int32_t
#define WOP_SUS_W int64_t, uint32_t, int32_t, uint64_t, int64_t
@@ -1399,22 +1395,22 @@ GEN_VEXT_VV(vmul_vv_h, 2)
GEN_VEXT_VV(vmul_vv_w, 4)
GEN_VEXT_VV(vmul_vv_d, 8)
-static int8_t do_mulh_b(int8_t s2, int8_t s1)
+int8_t do_mulh_b(int8_t s2, int8_t s1)
{
return (int16_t)s2 * (int16_t)s1 >> 8;
}
-static int16_t do_mulh_h(int16_t s2, int16_t s1)
+int16_t do_mulh_h(int16_t s2, int16_t s1)
{
return (int32_t)s2 * (int32_t)s1 >> 16;
}
-static int32_t do_mulh_w(int32_t s2, int32_t s1)
+int32_t do_mulh_w(int32_t s2, int32_t s1)
{
return (int64_t)s2 * (int64_t)s1 >> 32;
}
-static int64_t do_mulh_d(int64_t s2, int64_t s1)
+int64_t do_mulh_d(int64_t s2, int64_t s1)
{
uint64_t hi_64, lo_64;
@@ -1422,22 +1418,22 @@ static int64_t do_mulh_d(int64_t s2, int64_t s1)
return hi_64;
}
-static uint8_t do_mulhu_b(uint8_t s2, uint8_t s1)
+uint8_t do_mulhu_b(uint8_t s2, uint8_t s1)
{
return (uint16_t)s2 * (uint16_t)s1 >> 8;
}
-static uint16_t do_mulhu_h(uint16_t s2, uint16_t s1)
+uint16_t do_mulhu_h(uint16_t s2, uint16_t s1)
{
return (uint32_t)s2 * (uint32_t)s1 >> 16;
}
-static uint32_t do_mulhu_w(uint32_t s2, uint32_t s1)
+uint32_t do_mulhu_w(uint32_t s2, uint32_t s1)
{
return (uint64_t)s2 * (uint64_t)s1 >> 32;
}
-static uint64_t do_mulhu_d(uint64_t s2, uint64_t s1)
+uint64_t do_mulhu_d(uint64_t s2, uint64_t s1)
{
uint64_t hi_64, lo_64;
@@ -1445,17 +1441,17 @@ static uint64_t do_mulhu_d(uint64_t s2, uint64_t s1)
return hi_64;
}
-static int8_t do_mulhsu_b(int8_t s2, uint8_t s1)
+int8_t do_mulhsu_b(int8_t s2, uint8_t s1)
{
return (int16_t)s2 * (uint16_t)s1 >> 8;
}
-static int16_t do_mulhsu_h(int16_t s2, uint16_t s1)
+int16_t do_mulhsu_h(int16_t s2, uint16_t s1)
{
return (int32_t)s2 * (uint32_t)s1 >> 16;
}
-static int32_t do_mulhsu_w(int32_t s2, uint32_t s1)
+int32_t do_mulhsu_w(int32_t s2, uint32_t s1)
{
return (int64_t)s2 * (uint64_t)s1 >> 32;
}
@@ -1479,7 +1475,7 @@ static int32_t do_mulhsu_w(int32_t s2, uint32_t s1)
* HI_P -= (A < 0 ? B : 0)
*/
-static int64_t do_mulhsu_d(int64_t s2, uint64_t s1)
+int64_t do_mulhsu_d(int64_t s2, uint64_t s1)
{
uint64_t hi_64, lo_64;
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
index 24e64c37d4..4cbd7f972a 100644
--- a/target/riscv/vector_internals.h
+++ b/target/riscv/vector_internals.h
@@ -142,6 +142,10 @@ void vext_set_elems_1s(void *base, uint32_t is_agnostic,
uint32_t cnt,
#define OP_SSS_H int16_t, int16_t, int16_t, int16_t, int16_t
#define OP_SSS_W int32_t, int32_t, int32_t, int32_t, int32_t
#define OP_SSS_D int64_t, int64_t, int64_t, int64_t, int64_t
+#define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t
+#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
+#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
+#define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
static void do_##NAME(void *vd, void *vs2, int i) \
@@ -261,4 +265,17 @@ void probe_pages(CPURISCVState *env, target_ulong addr,
target_ulong len, uintptr_t ra,
MMUAccessType access_type);
+int8_t do_mulh_b(int8_t s2, int8_t s1);
+int16_t do_mulh_h(int16_t s2, int16_t s1);
+int32_t do_mulh_w(int32_t s2, int32_t s1);
+int64_t do_mulh_d(int64_t s2, int64_t s1);
+uint8_t do_mulhu_b(uint8_t s2, uint8_t s1);
+uint16_t do_mulhu_h(uint16_t s2, uint16_t s1);
+uint32_t do_mulhu_w(uint32_t s2, uint32_t s1);
+uint64_t do_mulhu_d(uint64_t s2, uint64_t s1);
+int8_t do_mulhsu_b(int8_t s2, uint8_t s1);
+int16_t do_mulhsu_h(int16_t s2, uint16_t s1);
+int32_t do_mulhsu_w(int32_t s2, uint32_t s1);
+int64_t do_mulhsu_d(int64_t s2, uint64_t s1);
+
#endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
diff --git a/target/riscv/xtheadvector_helper.c
b/target/riscv/xtheadvector_helper.c
index da869e1069..9d8129750c 100644
--- a/target/riscv/xtheadvector_helper.c
+++ b/target/riscv/xtheadvector_helper.c
@@ -1609,3 +1609,72 @@ GEN_TH_VX(th_vmax_vx_b, 1, 1, clearb_th)
GEN_TH_VX(th_vmax_vx_h, 2, 2, clearh_th)
GEN_TH_VX(th_vmax_vx_w, 4, 4, clearl_th)
GEN_TH_VX(th_vmax_vx_d, 8, 8, clearq_th)
+
+/* Vector Single-Width Integer Multiply Instructions */
+#define TH_MUL(N, M) (N * M)
+THCALL(TH_OPIVV2, th_vmul_vv_b, OP_SSS_B, H1, H1, H1, TH_MUL)
+THCALL(TH_OPIVV2, th_vmul_vv_h, OP_SSS_H, H2, H2, H2, TH_MUL)
+THCALL(TH_OPIVV2, th_vmul_vv_w, OP_SSS_W, H4, H4, H4, TH_MUL)
+THCALL(TH_OPIVV2, th_vmul_vv_d, OP_SSS_D, H8, H8, H8, TH_MUL)
+GEN_TH_VV(th_vmul_vv_b, 1, 1, clearb_th)
+GEN_TH_VV(th_vmul_vv_h, 2, 2, clearh_th)
+GEN_TH_VV(th_vmul_vv_w, 4, 4, clearl_th)
+GEN_TH_VV(th_vmul_vv_d, 8, 8, clearq_th)
+
+THCALL(TH_OPIVV2, th_vmulh_vv_b, OP_SSS_B, H1, H1, H1, do_mulh_b)
+THCALL(TH_OPIVV2, th_vmulh_vv_h, OP_SSS_H, H2, H2, H2, do_mulh_h)
+THCALL(TH_OPIVV2, th_vmulh_vv_w, OP_SSS_W, H4, H4, H4, do_mulh_w)
+THCALL(TH_OPIVV2, th_vmulh_vv_d, OP_SSS_D, H8, H8, H8, do_mulh_d)
+THCALL(TH_OPIVV2, th_vmulhu_vv_b, OP_UUU_B, H1, H1, H1, do_mulhu_b)
+THCALL(TH_OPIVV2, th_vmulhu_vv_h, OP_UUU_H, H2, H2, H2, do_mulhu_h)
+THCALL(TH_OPIVV2, th_vmulhu_vv_w, OP_UUU_W, H4, H4, H4, do_mulhu_w)
+THCALL(TH_OPIVV2, th_vmulhu_vv_d, OP_UUU_D, H8, H8, H8, do_mulhu_d)
+THCALL(TH_OPIVV2, th_vmulhsu_vv_b, OP_SUS_B, H1, H1, H1, do_mulhsu_b)
+THCALL(TH_OPIVV2, th_vmulhsu_vv_h, OP_SUS_H, H2, H2, H2, do_mulhsu_h)
+THCALL(TH_OPIVV2, th_vmulhsu_vv_w, OP_SUS_W, H4, H4, H4, do_mulhsu_w)
+THCALL(TH_OPIVV2, th_vmulhsu_vv_d, OP_SUS_D, H8, H8, H8, do_mulhsu_d)
+GEN_TH_VV(th_vmulh_vv_b, 1, 1, clearb_th)
+GEN_TH_VV(th_vmulh_vv_h, 2, 2, clearh_th)
+GEN_TH_VV(th_vmulh_vv_w, 4, 4, clearl_th)
+GEN_TH_VV(th_vmulh_vv_d, 8, 8, clearq_th)
+GEN_TH_VV(th_vmulhu_vv_b, 1, 1, clearb_th)
+GEN_TH_VV(th_vmulhu_vv_h, 2, 2, clearh_th)
+GEN_TH_VV(th_vmulhu_vv_w, 4, 4, clearl_th)
+GEN_TH_VV(th_vmulhu_vv_d, 8, 8, clearq_th)
+GEN_TH_VV(th_vmulhsu_vv_b, 1, 1, clearb_th)
+GEN_TH_VV(th_vmulhsu_vv_h, 2, 2, clearh_th)
+GEN_TH_VV(th_vmulhsu_vv_w, 4, 4, clearl_th)
+GEN_TH_VV(th_vmulhsu_vv_d, 8, 8, clearq_th)
+
+THCALL(TH_OPIVX2, th_vmul_vx_b, OP_SSS_B, H1, H1, TH_MUL)
+THCALL(TH_OPIVX2, th_vmul_vx_h, OP_SSS_H, H2, H2, TH_MUL)
+THCALL(TH_OPIVX2, th_vmul_vx_w, OP_SSS_W, H4, H4, TH_MUL)
+THCALL(TH_OPIVX2, th_vmul_vx_d, OP_SSS_D, H8, H8, TH_MUL)
+THCALL(TH_OPIVX2, th_vmulh_vx_b, OP_SSS_B, H1, H1, do_mulh_b)
+THCALL(TH_OPIVX2, th_vmulh_vx_h, OP_SSS_H, H2, H2, do_mulh_h)
+THCALL(TH_OPIVX2, th_vmulh_vx_w, OP_SSS_W, H4, H4, do_mulh_w)
+THCALL(TH_OPIVX2, th_vmulh_vx_d, OP_SSS_D, H8, H8, do_mulh_d)
+THCALL(TH_OPIVX2, th_vmulhu_vx_b, OP_UUU_B, H1, H1, do_mulhu_b)
+THCALL(TH_OPIVX2, th_vmulhu_vx_h, OP_UUU_H, H2, H2, do_mulhu_h)
+THCALL(TH_OPIVX2, th_vmulhu_vx_w, OP_UUU_W, H4, H4, do_mulhu_w)
+THCALL(TH_OPIVX2, th_vmulhu_vx_d, OP_UUU_D, H8, H8, do_mulhu_d)
+THCALL(TH_OPIVX2, th_vmulhsu_vx_b, OP_SUS_B, H1, H1, do_mulhsu_b)
+THCALL(TH_OPIVX2, th_vmulhsu_vx_h, OP_SUS_H, H2, H2, do_mulhsu_h)
+THCALL(TH_OPIVX2, th_vmulhsu_vx_w, OP_SUS_W, H4, H4, do_mulhsu_w)
+THCALL(TH_OPIVX2, th_vmulhsu_vx_d, OP_SUS_D, H8, H8, do_mulhsu_d)
+GEN_TH_VX(th_vmul_vx_b, 1, 1, clearb_th)
+GEN_TH_VX(th_vmul_vx_h, 2, 2, clearh_th)
+GEN_TH_VX(th_vmul_vx_w, 4, 4, clearl_th)
+GEN_TH_VX(th_vmul_vx_d, 8, 8, clearq_th)
+GEN_TH_VX(th_vmulh_vx_b, 1, 1, clearb_th)
+GEN_TH_VX(th_vmulh_vx_h, 2, 2, clearh_th)
+GEN_TH_VX(th_vmulh_vx_w, 4, 4, clearl_th)
+GEN_TH_VX(th_vmulh_vx_d, 8, 8, clearq_th)
+GEN_TH_VX(th_vmulhu_vx_b, 1, 1, clearb_th)
+GEN_TH_VX(th_vmulhu_vx_h, 2, 2, clearh_th)
+GEN_TH_VX(th_vmulhu_vx_w, 4, 4, clearl_th)
+GEN_TH_VX(th_vmulhu_vx_d, 8, 8, clearq_th)
+GEN_TH_VX(th_vmulhsu_vx_b, 1, 1, clearb_th)
+GEN_TH_VX(th_vmulhsu_vx_h, 2, 2, clearh_th)
+GEN_TH_VX(th_vmulhsu_vx_w, 4, 4, clearl_th)
+GEN_TH_VX(th_vmulhsu_vx_d, 8, 8, clearq_th)
--
2.44.0
- [PATCH 14/65] target/riscv: Add unit-stride fault-only-first instructions for XTheadVector, (continued)
- [PATCH 14/65] target/riscv: Add unit-stride fault-only-first instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 15/65] target/riscv: Add vector amo operations for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 16/65] target/riscv: Add single-width integer add and subtract instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 17/65] target/riscv: Add widening integer add/subtract instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 18/65] target/riscv: Add integer add-with-carry/sub-with-borrow instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 19/65] target/riscv: Add bitwise logical instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 20/65] target/riscv: Add single-width bit shift instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 21/65] target/riscv: Add narrowing integer right shift instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 22/65] target/riscv: Add integer compare instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 23/65] target/riscv: Add integer min/max instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 24/65] target/riscv: Add single-width integer multiply instructions for XTheadVector,
Huang Tao <=
- [PATCH 25/65] target/riscv: Add integer divide instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 26/65] target/riscv: Add widening integer multiply instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 27/65] target/riscv: Add single-width integer multiply-add instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 28/65] target/riscv: Add widening integer multiply-add instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 29/65] target/riscv: Add integer merge and move instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 30/65] target/riscv: Add single-width saturating add and sub instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 31/65] target/riscv: Add single-width average add and sub instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 32/65] target/riscv: Add single-width fractional mul with rounding and saturation for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 33/65] target/riscv: Add widening saturating scaled multiply-add instructions for XTheadVector, Huang Tao, 2024/04/12
- [PATCH 34/65] target/riscv: Add single-width scaling shift instructions for XTheadVector, Huang Tao, 2024/04/12