[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 19/24] target/arm: Enforce alignment for aa64 load-acq/store-r
From: |
Richard Henderson |
Subject: |
[PATCH v2 19/24] target/arm: Enforce alignment for aa64 load-acq/store-rel |
Date: |
Tue, 8 Dec 2020 12:01:13 -0600 |
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/translate-a64.c | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 152a0a37ab..67a9b3bb09 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -2669,7 +2669,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
true, rn != 31, size);
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
return;
@@ -2686,8 +2687,9 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
}
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
false, rn != 31, size);
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
- disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
+ rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
return;
@@ -3476,15 +3478,18 @@ static void disas_ldst_ldapr_stlr(DisasContext *s,
uint32_t insn)
int size = extract32(insn, 30, 2);
TCGv_i64 clean_addr, dirty_addr;
bool is_store = false;
- bool is_signed = false;
bool extend = false;
bool iss_sf;
+ MemOp mop;
if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
unallocated_encoding(s);
return;
}
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ mop = size | MO_ALIGN;
+
switch (opc) {
case 0: /* STLURB */
is_store = true;
@@ -3496,21 +3501,21 @@ static void disas_ldst_ldapr_stlr(DisasContext *s,
uint32_t insn)
unallocated_encoding(s);
return;
}
- is_signed = true;
+ mop |= MO_SIGN;
break;
case 3: /* LDAPURS* 32-bit variant */
if (size > 1) {
unallocated_encoding(s);
return;
}
- is_signed = true;
+ mop |= MO_SIGN;
extend = true; /* zero-extend 32->64 after signed load */
break;
default:
g_assert_not_reached();
}
- iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
+ iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
if (rn == 31) {
gen_check_sp_alignment(s);
@@ -3523,13 +3528,13 @@ static void disas_ldst_ldapr_stlr(DisasContext *s,
uint32_t insn)
if (is_store) {
/* Store-Release semantics */
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true);
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
} else {
/*
* Load-AcquirePC semantics; we implement as the slightly more
* restrictive Load-Acquire.
*/
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
extend, true, rt, iss_sf, true);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
}
--
2.25.1
- [PATCH v2 08/24] target/arm: Enforce alignment for LDA/LDAH/STL/STLH, (continued)
- [PATCH v2 08/24] target/arm: Enforce alignment for LDA/LDAH/STL/STLH, Richard Henderson, 2020/12/08
- [PATCH v2 11/24] target/arm: Enforce alignment for SRS, Richard Henderson, 2020/12/08
- [PATCH v2 10/24] target/arm: Enforce alignment for RFE, Richard Henderson, 2020/12/08
- [PATCH v2 12/24] target/arm: Enforce alignment for VLDM/VSTM, Richard Henderson, 2020/12/08
- [PATCH v2 14/24] target/arm: Enforce alignment for VLD1 (all lanes), Richard Henderson, 2020/12/08
- [PATCH v2 15/24] target/arm: Enforce alignment for VLDn/VSTn (multiple), Richard Henderson, 2020/12/08
- [PATCH v2 16/24] target/arm: Enforce alignment for VLDn/VSTn (single), Richard Henderson, 2020/12/08
- [PATCH v2 13/24] target/arm: Enforce alignment for VLDR/VSTR, Richard Henderson, 2020/12/08
- [PATCH v2 17/24] target/arm: Use finalize_memop for aa64 gpr load/store, Richard Henderson, 2020/12/08
- [PATCH v2 18/24] target/arm: Use finalize_memop for aa64 fpr load/store, Richard Henderson, 2020/12/08
- [PATCH v2 19/24] target/arm: Enforce alignment for aa64 load-acq/store-rel,
Richard Henderson <=
- [PATCH v2 21/24] target/arm: Enforce alignment for aa64 vector LDn/STn (multiple), Richard Henderson, 2020/12/08
- [PATCH v2 20/24] target/arm: Use MemOp for size + endian in aa64 vector ld/st, Richard Henderson, 2020/12/08
- [PATCH v2 22/24] target/arm: Enforce alignment for aa64 vector LDn/STn (single), Richard Henderson, 2020/12/08
- [PATCH v2 23/24] target/arm: Enforce alignment for sve LD1R, Richard Henderson, 2020/12/08
- [PATCH v2 24/24] target/arm: Enforce alignment for sve unpredicated LDR/STR, Richard Henderson, 2020/12/08