[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC v4 24/70] target/riscv: rvv-1.0: update vext_max_elems() for load/s
From: |
frank . chang |
Subject: |
[RFC v4 24/70] target/riscv: rvv-1.0: update vext_max_elems() for load/store insns |
Date: |
Mon, 17 Aug 2020 16:49:09 +0800 |
From: Frank Chang <frank.chang@sifive.com>
Unlike other vector instructions, load/store vector instructions return
the maximum vector size calculated with EMUL.
For other vector instructions, return VLMAX as the maximum vector size.
Signed-off-by: Frank Chang <frank.chang@sifive.com>
---
target/riscv/insn_trans/trans_rvv.inc.c | 57 +++++++++++-----
target/riscv/vector_helper.c | 90 ++++++++++++++-----------
2 files changed, 88 insertions(+), 59 deletions(-)
diff --git a/target/riscv/insn_trans/trans_rvv.inc.c
b/target/riscv/insn_trans/trans_rvv.inc.c
index 6a2f175b50a..334e1fc123b 100644
--- a/target/riscv/insn_trans/trans_rvv.inc.c
+++ b/target/riscv/insn_trans/trans_rvv.inc.c
@@ -583,11 +583,17 @@ static bool vext_check_isa_ill(DisasContext *s)
static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
{ \
if (CHECK(s, a, EEW)) { \
- return OP(s, a, SEQ); \
+ return OP(s, a, EEW, SEQ); \
} \
return false; \
}
+static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
+{
+ int8_t emul = ctzl(eew) - (s->sew + 3) + s->lmul;
+ return emul < 0 ? 0 : emul;
+}
+
/*
*** unit stride load and store
*/
@@ -611,7 +617,7 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1,
uint32_t data,
/*
* As simd_desc supports at most 256 bytes, and in this implementation,
- * the max vector group length is 2048 bytes. So split it into two parts.
+ * the max vector group length is 1024 bytes. So split it into two parts.
*
* The first part is vlen in bytes, encoded in maxsz of simd_desc.
* The second part is lmul, encoded in data of simd_desc.
@@ -635,7 +641,7 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1,
uint32_t data,
return true;
}
-static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew, uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_us *fn;
@@ -653,8 +659,14 @@ static bool ld_us_op(DisasContext *s, arg_r2nfvm *a,
uint8_t seq)
return false;
}
+ /*
+ * Vector load/store instructions have the EEW encoded
+ * directly in the instructions. The maximum vector size is
+ * calculated with EMUL rather than LMUL.
+ */
+ uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
}
@@ -671,7 +683,7 @@ GEN_VEXT_TRANS(vle16_v, 16, 1, r2nfvm, ld_us_op,
ld_us_check)
GEN_VEXT_TRANS(vle32_v, 32, 2, r2nfvm, ld_us_op, ld_us_check)
GEN_VEXT_TRANS(vle64_v, 64, 3, r2nfvm, ld_us_op, ld_us_check)
-static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew, uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_us *fn;
@@ -689,8 +701,9 @@ static bool st_us_op(DisasContext *s, arg_r2nfvm *a,
uint8_t seq)
return false;
}
+ uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
}
@@ -749,7 +762,8 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1,
uint32_t rs2,
return true;
}
-static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew,
+ uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_stride *fn;
@@ -763,8 +777,9 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a,
uint8_t seq)
return false;
}
+ uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
}
@@ -781,7 +796,8 @@ GEN_VEXT_TRANS(vlse16_v, 16, 1, rnfvm, ld_stride_op,
ld_stride_check)
GEN_VEXT_TRANS(vlse32_v, 32, 2, rnfvm, ld_stride_op, ld_stride_check)
GEN_VEXT_TRANS(vlse64_v, 64, 3, rnfvm, ld_stride_op, ld_stride_check)
-static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew,
+ uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_stride *fn;
@@ -791,8 +807,9 @@ static bool st_stride_op(DisasContext *s, arg_rnfvm *a,
uint8_t seq)
gen_helper_vsse32_v, gen_helper_vsse64_v
};
+ uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
fn = fns[seq];
if (fn == NULL) {
@@ -856,7 +873,7 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1,
uint32_t vs2,
return true;
}
-static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew, uint8_t
seq)
{
uint32_t data = 0;
gen_helper_ldst_index *fn;
@@ -889,8 +906,9 @@ static bool ld_index_op(DisasContext *s, arg_rnfvm *a,
uint8_t seq)
fn = fns[seq][s->sew];
+ uint8_t emul = vext_get_emul(s, 1 << (s->sew + 3));
data = FIELD_DP32(data, VDATA, VM, a->vm);
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
}
@@ -907,7 +925,7 @@ GEN_VEXT_TRANS(vlxei16_v, 16, 1, rnfvm, ld_index_op,
ld_index_check)
GEN_VEXT_TRANS(vlxei32_v, 32, 2, rnfvm, ld_index_op, ld_index_check)
GEN_VEXT_TRANS(vlxei64_v, 64, 3, rnfvm, ld_index_op, ld_index_check)
-static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew, uint8_t
seq)
{
uint32_t data = 0;
gen_helper_ldst_index *fn;
@@ -940,8 +958,9 @@ static bool st_index_op(DisasContext *s, arg_rnfvm *a,
uint8_t seq)
fn = fns[seq][s->sew];
+ uint8_t emul = vext_get_emul(s, 1 << (s->sew + 3));
data = FIELD_DP32(data, VDATA, VM, a->vm);
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
}
@@ -991,7 +1010,7 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t
data,
return true;
}
-static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew, uint8_t seq)
{
uint32_t data = 0;
gen_helper_ldst_us *fn;
@@ -1005,8 +1024,9 @@ static bool ldff_op(DisasContext *s, arg_r2nfvm *a,
uint8_t seq)
return false;
}
+ uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
return ldff_trans(a->rd, a->rs1, data, fn, s);
}
@@ -1127,7 +1147,7 @@ static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t
vs2,
return true;
}
-static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq)
+static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t eew, uint8_t seq)
{
uint32_t data = 0;
gen_helper_amo *fn;
@@ -1187,8 +1207,9 @@ static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t
seq)
return false;
}
+ uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, WD, a->wd);
return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s);
}
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index f3da2e7fbbd..f802e8c9c05 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -17,6 +17,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
#include "cpu.h"
#include "exec/memop.h"
#include "exec/exec-all.h"
@@ -121,14 +122,21 @@ static uint32_t vext_wd(uint32_t desc)
}
/*
- * Get vector group length in bytes. Its range is [64, 2048].
+ * Get the maximum number of elements can be operated.
*
- * As simd_desc support at most 256, the max vlen is 512 bits.
- * So vlen in bytes is encoded as maxsz.
+ * esz: log2 of element size in bytes.
*/
-static inline uint32_t vext_maxsz(uint32_t desc)
+static inline uint32_t vext_max_elems(uint32_t desc, uint32_t esz)
{
- return simd_maxsz(desc) << vext_lmul(desc);
+ /*
+ * As simd_desc support at most 256 bytes, the max vlen is 256 bits.
+ * so vlen in bytes (vlenb) is encoded as maxsz.
+ */
+ uint32_t vlenb = simd_maxsz(desc);
+
+ /* Return VLMAX */
+ int scale = vext_lmul(desc) - esz;
+ return scale < 0 ? vlenb >> -scale : vlenb << scale;
}
/*
@@ -223,14 +231,14 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
{
uint32_t i, k;
uint32_t nf = vext_nf(desc);
- uint32_t vlmax = vext_maxsz(desc) / esz;
+ uint32_t max_elems = vext_max_elems(desc, esz);
/* probe every access*/
for (i = 0; i < env->vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
continue;
}
- probe_pages(env, base + stride * i, nf * esz, ra, access_type);
+ probe_pages(env, base + stride * i, nf << esz, ra, access_type);
}
/* do real access */
for (i = 0; i < env->vl; i++) {
@@ -239,8 +247,8 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
continue;
}
while (k < nf) {
- target_ulong addr = base + stride * i + k * esz;
- ldst_elem(env, addr, i + k * vlmax, vd, ra);
+ target_ulong addr = base + stride * i + (k << esz);
+ ldst_elem(env, addr, i + k * max_elems, vd, ra);
k++;
}
}
@@ -253,7 +261,7 @@ void HELPER(NAME)(void *vd, void * v0, target_ulong base,
\
{ \
uint32_t vm = vext_vm(desc); \
vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \
- sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
+ ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
}
GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b)
@@ -268,7 +276,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base,
\
{ \
uint32_t vm = vext_vm(desc); \
vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \
- sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \
+ ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
}
GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b)
@@ -288,16 +296,16 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState
*env, uint32_t desc,
{
uint32_t i, k;
uint32_t nf = vext_nf(desc);
- uint32_t vlmax = vext_maxsz(desc) / esz;
+ uint32_t max_elems = vext_max_elems(desc, esz);
/* probe every access */
- probe_pages(env, base, env->vl * nf * esz, ra, access_type);
+ probe_pages(env, base, env->vl * (nf << esz), ra, access_type);
/* load bytes from guest memory */
for (i = 0; i < env->vl; i++) {
k = 0;
while (k < nf) {
- target_ulong addr = base + (i * nf + k) * esz;
- ldst_elem(env, addr, i + k * vlmax, vd, ra);
+ target_ulong addr = base + ((i * nf + k) << esz);
+ ldst_elem(env, addr, i + k * max_elems, vd, ra);
k++;
}
}
@@ -312,16 +320,16 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState
*env, uint32_t desc,
void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
- uint32_t stride = vext_nf(desc) * sizeof(ETYPE); \
+ uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
- sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
+ ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
} \
\
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
vext_ldst_us(vd, base, env, desc, LOAD_FN, \
- sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
+ ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
}
GEN_VEXT_LD_US(vle8_v, int8_t, lde_b)
@@ -333,16 +341,16 @@ GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
- uint32_t stride = vext_nf(desc) * sizeof(ETYPE); \
+ uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
- sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \
+ ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
} \
\
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
vext_ldst_us(vd, base, env, desc, STORE_FN, \
- sizeof(ETYPE), GETPC(), MMU_DATA_STORE); \
+ ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
}
GEN_VEXT_ST_US(vse8_v, int8_t, ste_b)
@@ -378,14 +386,14 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
uint32_t i, k;
uint32_t nf = vext_nf(desc);
uint32_t vm = vext_vm(desc);
- uint32_t vlmax = vext_maxsz(desc) / esz;
+ uint32_t max_elems = vext_max_elems(desc, esz);
/* probe every access*/
for (i = 0; i < env->vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
continue;
}
- probe_pages(env, get_index_addr(base, i, vs2), nf * esz, ra,
+ probe_pages(env, get_index_addr(base, i, vs2), nf << esz, ra,
access_type);
}
/* load bytes from guest memory */
@@ -395,8 +403,8 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
continue;
}
while (k < nf) {
- abi_ptr addr = get_index_addr(base, i, vs2) + k * esz;
- ldst_elem(env, addr, i + k * vlmax, vd, ra);
+ abi_ptr addr = get_index_addr(base, i, vs2) + (k << esz);
+ ldst_elem(env, addr, i + k * max_elems, vd, ra);
k++;
}
}
@@ -407,7 +415,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base,
\
void *vs2, CPURISCVState *env, uint32_t desc) \
{ \
vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
- LOAD_FN, sizeof(ETYPE), GETPC(), MMU_DATA_LOAD); \
+ LOAD_FN, ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
}
GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b)
@@ -432,7 +440,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base,
\
void *vs2, CPURISCVState *env, uint32_t desc) \
{ \
vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
- STORE_FN, sizeof(ETYPE), \
+ STORE_FN, ctzl(sizeof(ETYPE)), \
GETPC(), MMU_DATA_STORE); \
}
@@ -466,7 +474,7 @@ vext_ldff(void *vd, void *v0, target_ulong base,
uint32_t i, k, vl = 0;
uint32_t nf = vext_nf(desc);
uint32_t vm = vext_vm(desc);
- uint32_t vlmax = vext_maxsz(desc) / esz;
+ uint32_t max_elems = vext_max_elems(desc, esz);
target_ulong addr, offset, remain;
/* probe every access*/
@@ -474,24 +482,24 @@ vext_ldff(void *vd, void *v0, target_ulong base,
if (!vm && !vext_elem_mask(v0, i)) {
continue;
}
- addr = base + nf * i * esz;
+ addr = base + i * (nf << esz);
if (i == 0) {
- probe_pages(env, addr, nf * esz, ra, MMU_DATA_LOAD);
+ probe_pages(env, addr, nf << esz, ra, MMU_DATA_LOAD);
} else {
/* if it triggers an exception, no need to check watchpoint */
- remain = nf * esz;
+ remain = nf << esz;
while (remain > 0) {
offset = -(addr | TARGET_PAGE_MASK);
host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD,
cpu_mmu_index(env, false));
if (host) {
#ifdef CONFIG_USER_ONLY
- if (page_check_range(addr, nf * esz, PAGE_READ) < 0) {
+ if (page_check_range(addr, nf << esz, PAGE_READ) < 0) {
vl = i;
goto ProbeSuccess;
}
#else
- probe_pages(env, addr, nf * esz, ra, MMU_DATA_LOAD);
+ probe_pages(env, addr, nf << esz, ra, MMU_DATA_LOAD);
#endif
} else {
vl = i;
@@ -516,8 +524,8 @@ ProbeSuccess:
continue;
}
while (k < nf) {
- target_ulong addr = base + (i * nf + k) * esz;
- ldst_elem(env, addr, i + k * vlmax, vd, ra);
+ target_ulong addr = base + ((i * nf + k) << esz);
+ ldst_elem(env, addr, i + k * max_elems, vd, ra);
k++;
}
}
@@ -528,7 +536,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \
{ \
vext_ldff(vd, v0, base, env, desc, LOAD_FN, \
- sizeof(ETYPE), GETPC()); \
+ ctzl(sizeof(ETYPE)), GETPC()); \
}
GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b)
@@ -741,7 +749,7 @@ void HELPER(NAME)(void *vs3, void *v0, target_ulong base,
\
{ \
vext_amo_noatomic(vs3, v0, base, vs2, env, desc, \
INDEX_FN, vext_##NAME##_noatomic_op, \
- sizeof(ETYPE), GETPC()); \
+ ctzl(sizeof(ETYPE)), GETPC()); \
}
GEN_VEXT_AMO(vamoswapei8_32_v, int32_t, idx_b)
@@ -1227,7 +1235,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
\
void *vs2, CPURISCVState *env, uint32_t desc) \
{ \
uint32_t vl = env->vl; \
- uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
+ uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
uint32_t i; \
\
for (i = 0; i < vl; i++) { \
@@ -3888,7 +3896,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void
*vs2, \
{ \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
- uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
+ uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
uint32_t i; \
\
for (i = 0; i < vl; i++) { \
@@ -4693,7 +4701,7 @@ GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8)
void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
CPURISCVState *env, uint32_t desc) \
{ \
- uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
+ uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
uint32_t index, i; \
@@ -4721,7 +4729,7 @@ GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8)
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
CPURISCVState *env, uint32_t desc) \
{ \
- uint32_t vlmax = env_archcpu(env)->cfg.vlen; \
+ uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
uint32_t index = s1, i; \
--
2.17.1
- [RFC v4 20/70] target/riscv: rvv-1.0: fix address index overflow bug of indexed load/store insns, (continued)
- [RFC v4 20/70] target/riscv: rvv-1.0: fix address index overflow bug of indexed load/store insns, frank . chang, 2020/08/17
- [RFC v4 19/70] target/riscv: rvv-1.0: index load and store instructions, frank . chang, 2020/08/17
- [RFC v4 21/70] target/riscv: rvv-1.0: fault-only-first unit stride load, frank . chang, 2020/08/17
- [RFC v4 22/70] target/riscv: rvv-1.0: amo operations, frank . chang, 2020/08/17
- [RFC v4 23/70] target/riscv: rvv-1.0: load/store whole register instructions, frank . chang, 2020/08/17
- [RFC v4 24/70] target/riscv: rvv-1.0: update vext_max_elems() for load/store insns,
frank . chang <=
- [RFC v4 25/70] target/riscv: rvv-1.0: take fractional LMUL into vector max elements calculation, frank . chang, 2020/08/17
- [RFC v4 26/70] target/riscv: rvv-1.0: floating-point square-root instruction, frank . chang, 2020/08/17
- [RFC v4 27/70] target/riscv: rvv-1.0: floating-point classify instructions, frank . chang, 2020/08/17
- [RFC v4 28/70] target/riscv: rvv-1.0: mask population count instruction, frank . chang, 2020/08/17
- [RFC v4 29/70] target/riscv: rvv-1.0: find-first-set mask bit instruction, frank . chang, 2020/08/17
- [RFC v4 30/70] target/riscv: rvv-1.0: set-X-first mask bit instructions, frank . chang, 2020/08/17
- [RFC v4 31/70] target/riscv: rvv-1.0: iota instruction, frank . chang, 2020/08/17
- [RFC v4 32/70] target/riscv: rvv-1.0: element index instruction, frank . chang, 2020/08/17