qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC v6 07/14] target-arm: translate: Use ld/st excl fo


From: Alex Bennée
Subject: Re: [Qemu-devel] [RFC v6 07/14] target-arm: translate: Use ld/st excl for atomic insns
Date: Wed, 06 Jan 2016 17:11:02 +0000
User-agent: mu4e 0.9.15; emacs 25.1.50.8

Alvise Rigo <address@hidden> writes:

> Use the new LL/SC runtime helpers to handle the ARM atomic
> instructions in softmmu_llsc_template.h.
>
> In general, the helper generator
> gen_helper_{ldlink,stcond}_aa32_i{8,16,32,64}() calls the function
> helper_{le,be}_{ldlink,stcond}{ub,uw,ulq}_mmu() implemented in
> softmmu_llsc_template.h.
>
> Suggested-by: Jani Kokkonen <address@hidden>
> Suggested-by: Claudio Fontana <address@hidden>
> Signed-off-by: Alvise Rigo <address@hidden>
> ---
>  target-arm/translate.c | 101 
> +++++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 97 insertions(+), 4 deletions(-)
>
> diff --git a/target-arm/translate.c b/target-arm/translate.c
> index 5d22879..e88d8a3 100644
> --- a/target-arm/translate.c
> +++ b/target-arm/translate.c
> @@ -64,8 +64,10 @@ TCGv_ptr cpu_env;
>  static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
>  static TCGv_i32 cpu_R[16];
>  TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
> +#ifndef CONFIG_TCG_USE_LDST_EXCL
>  TCGv_i64 cpu_exclusive_addr;
>  TCGv_i64 cpu_exclusive_val;
> +#endif
>  #ifdef CONFIG_USER_ONLY
>  TCGv_i64 cpu_exclusive_test;
>  TCGv_i32 cpu_exclusive_info;
> @@ -98,10 +100,12 @@ void arm_translate_init(void)
>      cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), 
> "VF");
>      cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), 
> "ZF");
>
> +#ifndef CONFIG_TCG_USE_LDST_EXCL
>      cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
>          offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
>      cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
>          offsetof(CPUARMState, exclusive_val), "exclusive_val");
> +#endif
>  #ifdef CONFIG_USER_ONLY
>      cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
>          offsetof(CPUARMState, exclusive_test), "exclusive_test");
> @@ -7414,15 +7418,59 @@ static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
>      tcg_gen_or_i32(cpu_ZF, lo, hi);
>  }
>
> -/* Load/Store exclusive instructions are implemented by remembering
> +/* If the softmmu is enabled, the translation of Load/Store exclusive
> + * instructions will rely on the gen_helper_{ldlink,stcond} helpers,
> + * offloading most of the work to the softmmu_llsc_template.h functions.
> +
> +   Otherwise, these instructions are implemented by remembering
>     the value/address loaded, and seeing if these are the same
>     when the store is performed. This should be sufficient to implement
>     the architecturally mandated semantics, and avoids having to monitor
>     regular stores.
>
> -   In system emulation mode only one CPU will be running at once, so
> -   this sequence is effectively atomic.  In user emulation mode we
> -   throw an exception and handle the atomic operation elsewhere.  */
> +   In user emulation mode we throw an exception and handle the atomic
> +   operation elsewhere.  */
> +#ifdef CONFIG_TCG_USE_LDST_EXCL
> +static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
> +                               TCGv_i32 addr, int size)
> + {
> +    TCGv_i32 tmp = tcg_temp_new_i32();
> +    TCGv_i32 mem_idx = tcg_temp_new_i32();
> +
> +    tcg_gen_movi_i32(mem_idx, get_mem_index(s));
> +
> +    if (size != 3) {
> +        switch (size) {
> +        case 0:
> +            gen_helper_ldlink_aa32_i8(tmp, cpu_env, addr, mem_idx);
> +            break;
> +        case 1:
> +            gen_helper_ldlink_aa32_i16(tmp, cpu_env, addr, mem_idx);
> +            break;
> +        case 2:
> +            gen_helper_ldlink_aa32_i32(tmp, cpu_env, addr, mem_idx);
> +            break;
> +        default:
> +            abort();
> +        }
> +
> +        store_reg(s, rt, tmp);
> +    } else {
> +        TCGv_i64 tmp64 = tcg_temp_new_i64();
> +        TCGv_i32 tmph = tcg_temp_new_i32();
> +
> +        gen_helper_ldlink_aa32_i64(tmp64, cpu_env, addr, mem_idx);
> +        tcg_gen_extr_i64_i32(tmp, tmph, tmp64);
> +
> +        store_reg(s, rt, tmp);
> +        store_reg(s, rt2, tmph);
> +
> +        tcg_temp_free_i64(tmp64);
> +    }
> +
> +    tcg_temp_free_i32(mem_idx);
> +}
> +#else
>  static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
>                                 TCGv_i32 addr, int size)
>  {
> @@ -7461,10 +7509,14 @@ static void gen_load_exclusive(DisasContext *s, int 
> rt, int rt2,
>      store_reg(s, rt, tmp);
>      tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
>  }
> +#endif
>
>  static void gen_clrex(DisasContext *s)
>  {
> +#ifdef CONFIG_TCG_USE_LDST_EXCL

I don't think it would be correct to ignore clrex in softmmu mode.
Assuming the code path had used it we may well be creating slow-path
transitions for no reason.

> +#else
>      tcg_gen_movi_i64(cpu_exclusive_addr, -1);
> +#endif
>  }
>
>  #ifdef CONFIG_USER_ONLY
> @@ -7476,6 +7528,47 @@ static void gen_store_exclusive(DisasContext *s, int 
> rd, int rt, int rt2,
>                       size | (rd << 4) | (rt << 8) | (rt2 << 12));
>      gen_exception_internal_insn(s, 4, EXCP_STREX);
>  }
> +#elif defined CONFIG_TCG_USE_LDST_EXCL
> +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
> +                                TCGv_i32 addr, int size)
> +{
> +    TCGv_i32 tmp, mem_idx;
> +
> +    mem_idx = tcg_temp_new_i32();
> +
> +    tcg_gen_movi_i32(mem_idx, get_mem_index(s));
> +    tmp = load_reg(s, rt);
> +
> +    if (size != 3) {
> +        switch (size) {
> +        case 0:
> +            gen_helper_stcond_aa32_i8(cpu_R[rd], cpu_env, addr, tmp, 
> mem_idx);
> +            break;
> +        case 1:
> +            gen_helper_stcond_aa32_i16(cpu_R[rd], cpu_env, addr, tmp, 
> mem_idx);
> +            break;
> +        case 2:
> +            gen_helper_stcond_aa32_i32(cpu_R[rd], cpu_env, addr, tmp, 
> mem_idx);
> +            break;
> +        default:
> +            abort();
> +        }
> +    } else {
> +        TCGv_i64 tmp64;
> +        TCGv_i32 tmp2;
> +
> +        tmp64 = tcg_temp_new_i64();
> +        tmp2 = load_reg(s, rt2);
> +        tcg_gen_concat_i32_i64(tmp64, tmp, tmp2);
> +        gen_helper_stcond_aa32_i64(cpu_R[rd], cpu_env, addr, tmp64, mem_idx);
> +
> +        tcg_temp_free_i32(tmp2);
> +        tcg_temp_free_i64(tmp64);
> +    }
> +
> +    tcg_temp_free_i32(tmp);
> +    tcg_temp_free_i32(mem_idx);
> +}
>  #else
>  static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
>                                  TCGv_i32 addr, int size)


--
Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]