qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 5/5] target/riscv: add vector amo operations


From: Richard Henderson
Subject: Re: [PATCH v4 5/5] target/riscv: add vector amo operations
Date: Thu, 27 Feb 2020 21:38:10 -0800
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Thunderbird/68.4.1

On 2/25/20 2:35 AM, LIU Zhiwei wrote:
> +    if (s->sew < 2) {
> +        return false;
> +    }

This could just as easily be in amo_check?

> +
> +    if (tb_cflags(s->base.tb) & CF_PARALLEL) {
> +#ifdef CONFIG_ATOMIC64
> +        fn = fns[0][seq][s->sew - 2];
> +#else
> +        gen_helper_exit_atomic(cpu_env);
> +        s->base.is_jmp = DISAS_NORETURN;
> +        return true;
> +#endif

Why are you raising exit_atomic without first checking that s->sew == 3?  We
can do 32-bit atomic operations always.

> +    } else {
> +        fn = fns[1][seq][s->sew - 2];
> +    }
> +    if (fn == NULL) {
> +        return false;
> +    }
> +
> +    return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s);
> +}
> +
> +static bool amo_check(DisasContext *s, arg_rwdvm* a)
> +{
> +    return (vext_check_isa_ill(s, RVV | RVA) &&
> +            (a->wd ? vext_check_overlap_mask(s, a->rd, a->vm) : 1) &&
> +            vext_check_reg(s, a->rd, false) &&
> +            vext_check_reg(s, a->rs2, false));
> +}

I guess the "If SEW is greater than XLEN, an illegal instruction exception is
raised" requirement is currently in the column of NULLs in the !CONFIG_RISCV64
block.  But it might be better to have it explicit and save the column of NULLs.

It makes sense to me to do both sew checks together, whether in amo_check or in
amo_op.

> +#define GEN_VEXT_AMO_NOATOMIC_OP(NAME, ETYPE, MTYPE, H, DO_OP, SUF)      \
> +static void vext_##NAME##_noatomic_op(void *vs3, target_ulong addr,      \
> +        uint32_t wd, uint32_t idx, CPURISCVState *env, uintptr_t retaddr)\
> +{                                                                        \
> +    ETYPE ret;                                                           \
> +    target_ulong tmp;                                                    \
> +    int mmu_idx = cpu_mmu_index(env, false);                             \
> +    tmp = cpu_ld##SUF##_mmuidx_ra(env, addr, mmu_idx, retaddr);          \
> +    ret = DO_OP((ETYPE)(MTYPE)tmp, *((ETYPE *)vs3 + H(idx)));            \
> +    cpu_st##SUF##_mmuidx_ra(env, addr, ret, mmu_idx, retaddr);           \
> +    if (wd) {                                                            \
> +        *((ETYPE *)vs3 + H(idx)) = (target_long)(MTYPE)tmp;              \

The target_long cast is wrong; should be ETYPE.

You can use cpu_ldX/stX_data (no mmu_idx or retaddr argument).  There should be
no faults, since you've already checked for read+write.

> +/* atomic opreation for vector atomic insructions */
> +#ifndef CONFIG_USER_ONLY
> +#define GEN_VEXT_ATOMIC_OP(NAME, ETYPE, MTYPE, MOFLAG, H, AMO)           \
> +static void vext_##NAME##_atomic_op(void *vs3, target_ulong addr,        \
> +        uint32_t wd, uint32_t idx, CPURISCVState *env)                   \
> +{                                                                        \
> +    target_ulong tmp;                                                    \
> +    int mem_idx = cpu_mmu_index(env, false);                             \
> +    tmp = helper_atomic_##AMO##_le(env, addr, *((ETYPE *)vs3 + H(idx)),  \
> +            make_memop_idx(MO_ALIGN | MOFLAG, mem_idx));                 \
> +    if (wd) {                                                            \
> +        *((ETYPE *)vs3 + H(idx)) = (target_long)(MTYPE)tmp;              \
> +    }                                                                    \
> +}
> +#else
> +#define GEN_VEXT_ATOMIC_OP(NAME, ETYPE, MTYPE, MOFLAG, H, AMO)           \
> +static void vext_##NAME##_atomic_op(void *vs3, target_ulong addr,        \
> +        uint32_t wd, uint32_t idx, CPURISCVState *env)                   \
> +{                                                                        \
> +    target_ulong tmp;                                                    \
> +    tmp = helper_atomic_##AMO##_le(env, addr, *((ETYPE *)vs3 + H(idx))); \
> +    if (wd) {                                                            \
> +        *((ETYPE *)vs3 + H(idx)) = (target_long)(MTYPE)tmp;              \
> +    }                                                                    \
> +}
> +#endif

This is not right.  It is not legal to call these helpers from another helper
-- they will use the wrong GETPC() and will not unwind properly.

> +static inline void vext_amo_atomic(void *vs3, void *v0, target_ulong base,
> +        void *vs2, CPURISCVState *env, uint32_t desc,
> +        vext_get_index_addr get_index_addr,
> +        vext_amo_atomic_fn atomic_op,
> +        vext_ld_clear_elem clear_elem,
> +        uint32_t esz, uint32_t msz, uintptr_t ra)
> +{
> +    uint32_t i;
> +    target_long addr;
> +    uint32_t wd = vext_wd(desc);
> +    uint32_t vm = vext_vm(desc);
> +    uint32_t mlen = vext_mlen(desc);
> +    uint32_t vlmax = vext_maxsz(desc) / esz;
> +
> +    for (i = 0; i < env->vl; i++) {
> +        if (!vm && !vext_elem_mask(v0, mlen, i)) {
> +            continue;
> +        }
> +        probe_read_access(env, get_index_addr(base, i, vs2), msz, ra);
> +        probe_write_access(env, get_index_addr(base, i, vs2), msz, ra);
> +    }

You probably need to check for aligned address here too, probably first so an
unaligned fault has priority over an invalid page fault.

The missing aligned address check is the only remaining exception that the
helper_atomic_* functions would raise, since you have properly checked for
read+write.  So it might be possible to get away with using the helpers, but I
don't like it.

But I do think it would be better to write your own helpers for the atomic
paths.  They need not check quite so much, since we have already done the
validation above.  You pretty much only need to use tlb_vaddr_to_host.

If that gets too ugly, we can talk about rearranging
accel/tcg/atomic_template.h so that it could be reused.

Alternately, we could simply *always* use the non-atomic helpers, and raise
exit_atomic if PARALLEL.

> +static inline void vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
> +        void *vs2, CPURISCVState *env, uint32_t desc,
> +        vext_get_index_addr get_index_addr,
> +        vext_amo_noatomic_fn noatomic_op,
> +        vext_ld_clear_elem clear_elem,
> +        uint32_t esz, uint32_t msz, uintptr_t ra)

Without the retaddr argument to the noatomic_fn, as described above,
vext_amo_noatomic and vext_amo_atomic are identical.


r~



reply via email to

[Prev in Thread] Current Thread [Next in Thread]