[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH 08/11] target/ppc: implement address swizzle for gen_ld_atomi
From: |
Mark Cave-Ayland |
Subject: |
[RFC PATCH 08/11] target/ppc: implement address swizzle for gen_ld_atomic() |
Date: |
Thu, 12 Dec 2024 15:14:09 +0000 |
The gen_ld_atomic() function uses a number of TCG atomic primitives within its
implementation. Update gen_ld_atomic() so that it implements the address swizzle
if required.
Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
---
target/ppc/translate.c | 81 +++++++++++++++++++++++++++++++++++++-----
1 file changed, 72 insertions(+), 9 deletions(-)
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 74aa398f25..b549525bb6 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -3039,31 +3039,94 @@ static void gen_ld_atomic(DisasContext *ctx, MemOp
memop)
memop |= MO_ALIGN;
switch (gpr_FC) {
case 0: /* Fetch and add */
- tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
+ if (need_addrswizzle_le(ctx)) {
+ TCGv ta = tcg_temp_new();
+
+ gen_addr_swizzle_le(ta, EA, memop);
+ tcg_gen_atomic_fetch_add_tl(dst, ta, src, ctx->mem_idx, memop);
+ } else {
+ tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
+ }
break;
case 1: /* Fetch and xor */
- tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
+ if (need_addrswizzle_le(ctx)) {
+ TCGv ta = tcg_temp_new();
+
+ gen_addr_swizzle_le(ta, EA, memop);
+ tcg_gen_atomic_fetch_xor_tl(dst, ta, src, ctx->mem_idx, memop);
+ } else {
+ tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
+ }
break;
case 2: /* Fetch and or */
- tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
+ if (need_addrswizzle_le(ctx)) {
+ TCGv ta = tcg_temp_new();
+
+ gen_addr_swizzle_le(ta, EA, memop);
+ tcg_gen_atomic_fetch_or_tl(dst, ta, src, ctx->mem_idx, memop);
+ } else {
+ tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
+ }
break;
case 3: /* Fetch and 'and' */
- tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
+ if (need_addrswizzle_le(ctx)) {
+ TCGv ta = tcg_temp_new();
+
+ gen_addr_swizzle_le(ta, EA, memop);
+ tcg_gen_atomic_fetch_and_tl(dst, ta, src, ctx->mem_idx, memop);
+ } else {
+ tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
+ }
break;
case 4: /* Fetch and max unsigned */
- tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
+ if (need_addrswizzle_le(ctx)) {
+ TCGv ta = tcg_temp_new();
+
+ gen_addr_swizzle_le(ta, EA, memop);
+ tcg_gen_atomic_fetch_umax_tl(dst, ta, src, ctx->mem_idx, memop);
+ } else {
+ tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
+ }
break;
case 5: /* Fetch and max signed */
- tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
+ if (need_addrswizzle_le(ctx)) {
+ TCGv ta = tcg_temp_new();
+
+ gen_addr_swizzle_le(ta, EA, memop);
+ tcg_gen_atomic_fetch_smax_tl(dst, ta, src, ctx->mem_idx, memop);
+ } else {
+ tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
+ }
break;
case 6: /* Fetch and min unsigned */
- tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
+ if (need_addrswizzle_le(ctx)) {
+ TCGv ta = tcg_temp_new();
+
+ gen_addr_swizzle_le(ta, EA, memop);
+ tcg_gen_atomic_fetch_umin_tl(dst, ta, src, ctx->mem_idx, memop);
+ } else {
+ tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
+ }
break;
case 7: /* Fetch and min signed */
- tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
+ if (need_addrswizzle_le(ctx)) {
+ TCGv ta = tcg_temp_new();
+
+ gen_addr_swizzle_le(ta, EA, memop);
+ tcg_gen_atomic_fetch_smin_tl(dst, ta, src, ctx->mem_idx, memop);
+ } else {
+ tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
+ }
break;
case 8: /* Swap */
- tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
+ if (need_addrswizzle_le(ctx)) {
+ TCGv ta = tcg_temp_new();
+
+ gen_addr_swizzle_le(ta, EA, memop);
+ tcg_gen_atomic_xchg_tl(dst, ta, src, ctx->mem_idx, memop);
+ } else {
+ tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
+ }
break;
case 16: /* Compare and swap not equal */
--
2.39.5
- Re: [RFC PATCH 01/11] target/ppc: introduce gen_ld_tl() function, (continued)
- [RFC PATCH 03/11] target/ppc: introduce gen_st_tl() function, Mark Cave-Ayland, 2024/12/12
- [RFC PATCH 04/11] target/ppc: replace tcg_gen_qemu_st_tl() with gen_st_tl(), Mark Cave-Ayland, 2024/12/12
- [RFC PATCH 06/11] target/ppc: introduce gen_addr_swizzle_le() function, Mark Cave-Ayland, 2024/12/12
- [RFC PATCH 07/11] target/ppc: implement address swizzle for instruction translation, Mark Cave-Ayland, 2024/12/12
- [RFC PATCH 05/11] target/ppc: introduce need_addrswizzle_le() function, Mark Cave-Ayland, 2024/12/12
- [RFC PATCH 08/11] target/ppc: implement address swizzle for gen_ld_atomic(),
Mark Cave-Ayland <=
- [RFC PATCH 11/11] target/ppc: update DisasContext default_tcg_memop_mask value, Mark Cave-Ayland, 2024/12/12
- [RFC PATCH 09/11] target/ppc: implement address swizzle for gen_st_atomic(), Mark Cave-Ayland, 2024/12/12
- [RFC PATCH 10/11] target/ppc: implement address swizzle for gen_conditional_store(), Mark Cave-Ayland, 2024/12/12
- [RFC PATCH 02/11] target/ppc: replace tcg_gen_qemu_ld_tl() with gen_ld_tl(), Mark Cave-Ayland, 2024/12/12