qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v3 15/34] tcg: Add CONFIG_ATOMIC64


From: Alex Bennée
Subject: Re: [Qemu-devel] [PATCH v3 15/34] tcg: Add CONFIG_ATOMIC64
Date: Wed, 14 Sep 2016 11:12:01 +0100
User-agent: mu4e 0.9.17; emacs 25.1.12

Richard Henderson <address@hidden> writes:

> Allow qemu to build on 32-bit hosts without 64-bit atomic ops.
>
> Even if we only allow 32-bit hosts to multi-thread emulate 32-bit
> guests, we still need some way to handle the 32-bit guest using a
> 64-bit atomic operation.  Do so by dropping back to single-step.
>
> Signed-off-by: Richard Henderson <address@hidden>
> ---
>  configure         | 33 +++++++++++++++++++++++++++++++++
>  cputlb.c          |  4 ++++
>  tcg-runtime.c     |  7 +++++++
>  tcg/tcg-op.c      | 22 ++++++++++++++++++----
>  tcg/tcg-runtime.h | 46 ++++++++++++++++++++++++++++++++++++++++------
>  tcg/tcg.h         | 15 ++++++++++++---
>  6 files changed, 114 insertions(+), 13 deletions(-)
>
> diff --git a/configure b/configure
> index e500652..519de5d 100755
> --- a/configure
> +++ b/configure
> @@ -4462,6 +4462,35 @@ EOF
>    fi
>  fi
>
> +#########################################
> +# See if 64-bit atomic operations are supported.
> +# Note that without __atomic builtins, we can only
> +# assume atomic loads/stores max at pointer size.
> +
> +cat > $TMPC << EOF
> +#include <stdint.h>
> +int main(void)
> +{
> +  uint64_t x = 0, y = 0;
> +#ifdef __ATOMIC_RELAXED
> +  y = __atomic_load_8(&x, 0);
> +  __atomic_store_8(&x, y, 0);
> +  __atomic_compare_exchange_8(&x, &y, x, 0, 0, 0);
> +  __atomic_exchange_8(&x, y, 0);
> +  __atomic_fetch_add_8(&x, y, 0);
> +#else
> +  char is_host64[sizeof(void *) >= sizeof(uint64_t) ? 1 : -1];

I missed this piece of magic when I first read the patch :-/

> +  __sync_lock_test_and_set(&x, y);
> +  __sync_val_compare_and_swap(&x, y, 0);
> +  __sync_fetch_and_add(&x, y);
> +#endif
> +  return 0;
> +}
> +EOF
> +if compile_prog "" "" ; then
> +  atomic64=yes
> +fi
> +
>  ########################################
>  # check if getauxval is available.
>
> @@ -5415,6 +5444,10 @@ if test "$atomic128" = "yes" ; then
>    echo "CONFIG_ATOMIC128=y" >> $config_host_mak
>  fi
>
> +if test "$atomic64" = "yes" ; then
> +  echo "CONFIG_ATOMIC64=y" >> $config_host_mak
> +fi
> +
>  if test "$getauxval" = "yes" ; then
>    echo "CONFIG_GETAUXVAL=y" >> $config_host_mak
>  fi
> diff --git a/cputlb.c b/cputlb.c
> index 773cc85..8cdbf6c 100644
> --- a/cputlb.c
> +++ b/cputlb.c
> @@ -687,8 +687,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, 
> target_ulong addr,
>  #define DATA_SIZE 4
>  #include "atomic_template.h"
>
> +#ifdef CONFIG_ATOMIC64
>  #define DATA_SIZE 8
>  #include "atomic_template.h"
> +#endif
>
>  #ifdef CONFIG_ATOMIC128
>  #define DATA_SIZE 16
> @@ -713,8 +715,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, 
> target_ulong addr,
>  #define DATA_SIZE 4
>  #include "atomic_template.h"
>
> +#ifdef CONFIG_ATOMIC64
>  #define DATA_SIZE 8
>  #include "atomic_template.h"
> +#endif
>
>  /* Code access functions.  */
>
> diff --git a/tcg-runtime.c b/tcg-runtime.c
> index 0c97cdf..d7704d4 100644
> --- a/tcg-runtime.c
> +++ b/tcg-runtime.c
> @@ -101,6 +101,11 @@ int64_t HELPER(mulsh_i64)(int64_t arg1, int64_t arg2)
>      return h;
>  }
>
> +void HELPER(exit_atomic)(CPUArchState *env)
> +{
> +    cpu_loop_exit_atomic(ENV_GET_CPU(env), GETRA());
> +}
> +
>  #ifndef CONFIG_SOFTMMU
>  /* The softmmu versions of these helpers are in cputlb.c.  */
>
> @@ -130,8 +135,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, 
> target_ulong addr,
>  #define DATA_SIZE 4
>  #include "atomic_template.h"
>
> +#ifdef CONFIG_ATOMIC64
>  #define DATA_SIZE 8
>  #include "atomic_template.h"
> +#endif
>
>  /* The following is only callable from other helpers, and matches up
>     with the softmmu version.  */
> diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
> index e146ad4..1c09025 100644
> --- a/tcg/tcg-op.c
> +++ b/tcg/tcg-op.c
> @@ -2023,14 +2023,20 @@ typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, 
> TCGv, TCGv_i32);
>  typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64);
>  #endif
>
> +#ifdef CONFIG_ATOMIC64
> +# define WITH_ATOMIC64(X) X,
> +#else
> +# define WITH_ATOMIC64(X)
> +#endif
> +
>  static void * const table_cmpxchg[16] = {
>      [MO_8] = gen_helper_atomic_cmpxchgb,
>      [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
>      [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
>      [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le,
>      [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be,
> -    [MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le,
> -    [MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be,
> +    WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le)
> +    WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be)
>  };
>
>  void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
> @@ -2100,6 +2106,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv 
> addr, TCGv_i64 cmpv,
>          }
>          tcg_temp_free_i64(t1);
>      } else if ((memop & MO_SIZE) == MO_64) {
> +#ifdef CONFIG_ATOMIC64
>          gen_atomic_cx_i64 gen;
>
>          gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
> @@ -2114,6 +2121,9 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv 
> addr, TCGv_i64 cmpv,
>  #else
>          gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
>  #endif
> +#else
> +        gen_helper_exit_atomic(tcg_ctx.tcg_env);
> +#endif /* CONFIG_ATOMIC64 */
>      } else {
>          TCGv_i32 c32 = tcg_temp_new_i32();
>          TCGv_i32 n32 = tcg_temp_new_i32();
> @@ -2201,6 +2211,7 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, 
> TCGv_i64 val,
>      memop = tcg_canonicalize_memop(memop, 1, 0);
>
>      if ((memop & MO_SIZE) == MO_64) {
> +#ifdef CONFIG_ATOMIC64
>          gen_atomic_op_i64 gen;
>
>          gen = table[memop & (MO_SIZE | MO_BSWAP)];
> @@ -2215,6 +2226,9 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, 
> TCGv_i64 val,
>  #else
>          gen(ret, tcg_ctx.tcg_env, addr, val);
>  #endif
> +#else
> +        gen_helper_exit_atomic(tcg_ctx.tcg_env);
> +#endif /* CONFIG_ATOMIC64 */
>      } else {
>          TCGv_i32 v32 = tcg_temp_new_i32();
>          TCGv_i32 r32 = tcg_temp_new_i32();
> @@ -2239,8 +2253,8 @@ static void * const table_##NAME[16] = {                
>                 \
>      [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be,                   \
>      [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le,                   \
>      [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be,                   \
> -    [MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le,                   \
> -    [MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be,                   \
> +    WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le)     \
> +    WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be)     \
>  };                                                                      \
>  void tcg_gen_atomic_##NAME##_i32                                        \
>      (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
> diff --git a/tcg/tcg-runtime.h b/tcg/tcg-runtime.h
> index 22367aa..1deb86a 100644
> --- a/tcg/tcg-runtime.h
> +++ b/tcg/tcg-runtime.h
> @@ -15,23 +15,28 @@ DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, 
> s64)
>  DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
>  DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
>
> +DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
> +
>  #ifdef CONFIG_SOFTMMU
>
>  DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
>                     i32, env, tl, i32, i32, i32)
>  DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
>                     i32, env, tl, i32, i32, i32)
> -DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
> -                   i32, env, tl, i32, i32, i32)
> -DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
> -                   i64, env, tl, i64, i64, i32)
>  DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG,
>                     i32, env, tl, i32, i32, i32)
> +DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
> +                   i32, env, tl, i32, i32, i32)
>  DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
>                     i32, env, tl, i32, i32, i32)
> +#ifdef CONFIG_ATOMIC64
> +DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
> +                   i64, env, tl, i64, i64, i32)
>  DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
>                     i64, env, tl, i64, i64, i32)
> +#endif
>
> +#ifdef CONFIG_ATOMIC64
>  #define GEN_ATOMIC_HELPERS(NAME)                                  \
>      DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b),              \
>                         TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
> @@ -47,17 +52,33 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
>                         TCG_CALL_NO_WG, i64, env, tl, i64, i32)    \
>      DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be),           \
>                         TCG_CALL_NO_WG, i64, env, tl, i64, i32)
> +#else
> +#define GEN_ATOMIC_HELPERS(NAME)                                  \
> +    DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b),              \
> +                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
> +    DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le),           \
> +                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
> +    DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be),           \
> +                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
> +    DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le),           \
> +                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
> +    DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be),           \
> +                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)
> +#endif /* CONFIG_ATOMIC64 */
>
>  #else
>
>  DEF_HELPER_FLAGS_4(atomic_cmpxchgb, TCG_CALL_NO_WG, i32, env, tl, i32, i32)
>  DEF_HELPER_FLAGS_4(atomic_cmpxchgw_be, TCG_CALL_NO_WG, i32, env, tl, i32, 
> i32)
> -DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, 
> i32)
> -DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, 
> i64)
>  DEF_HELPER_FLAGS_4(atomic_cmpxchgw_le, TCG_CALL_NO_WG, i32, env, tl, i32, 
> i32)
> +DEF_HELPER_FLAGS_4(atomic_cmpxchgl_be, TCG_CALL_NO_WG, i32, env, tl, i32, 
> i32)
>  DEF_HELPER_FLAGS_4(atomic_cmpxchgl_le, TCG_CALL_NO_WG, i32, env, tl, i32, 
> i32)
> +#ifdef CONFIG_ATOMIC64
> +DEF_HELPER_FLAGS_4(atomic_cmpxchgq_be, TCG_CALL_NO_WG, i64, env, tl, i64, 
> i64)
>  DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, i64, env, tl, i64, 
> i64)
> +#endif
>
> +#ifdef CONFIG_ATOMIC64
>  #define GEN_ATOMIC_HELPERS(NAME)                             \
>      DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b),         \
>                         TCG_CALL_NO_WG, i32, env, tl, i32)    \
> @@ -73,6 +94,19 @@ DEF_HELPER_FLAGS_4(atomic_cmpxchgq_le, TCG_CALL_NO_WG, 
> i64, env, tl, i64, i64)
>                         TCG_CALL_NO_WG, i64, env, tl, i64)    \
>      DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), q_be),      \
>                         TCG_CALL_NO_WG, i64, env, tl, i64)
> +#else
> +#define GEN_ATOMIC_HELPERS(NAME)                             \
> +    DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), b),         \
> +                       TCG_CALL_NO_WG, i32, env, tl, i32)    \
> +    DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_le),      \
> +                       TCG_CALL_NO_WG, i32, env, tl, i32)    \
> +    DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), w_be),      \
> +                       TCG_CALL_NO_WG, i32, env, tl, i32)    \
> +    DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_le),      \
> +                       TCG_CALL_NO_WG, i32, env, tl, i32)    \
> +    DEF_HELPER_FLAGS_3(glue(glue(atomic_, NAME), l_be),      \
> +                       TCG_CALL_NO_WG, i32, env, tl, i32)
> +#endif /* CONFIG_ATOMIC64 */
>
>  #endif /* CONFIG_SOFTMMU */
>
> diff --git a/tcg/tcg.h b/tcg/tcg.h
> index 5a94cec..8bcf32a 100644
> --- a/tcg/tcg.h
> +++ b/tcg/tcg.h
> @@ -1202,14 +1202,23 @@ TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu         
> \
>      (CPUArchState *env, target_ulong addr, TYPE val,  \
>       TCGMemOpIdx oi, uintptr_t retaddr);
>
> +#ifdef CONFIG_ATOMIC64
>  #define GEN_ATOMIC_HELPER_ALL(NAME)          \
> -    GEN_ATOMIC_HELPER(NAME, uint32_t, b)      \
> +    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
>      GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
> -    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
> -    GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
>      GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
> +    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
>      GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)  \
> +    GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
>      GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
> +#else
> +#define GEN_ATOMIC_HELPER_ALL(NAME)          \
> +    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
> +    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
> +    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
> +    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
> +    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
> +#endif
>
>  GEN_ATOMIC_HELPER_ALL(fetch_add)
>  GEN_ATOMIC_HELPER_ALL(fetch_sub)

I find the nesting here makes it a bit harder to follow all the various
permutations of CONFIG_SOFTMMU/CONFIG_ATOMIC64 but I can't see a nicer
way of doing it with plain old C macros so *shrug*

Reviewed-by: Alex Bennée <address@hidden>


--
Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]