[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 23/30] target/arm: Enforce alignment for VLDn/VSTn (single)
From: |
Richard Henderson |
Subject: |
[PATCH v3 23/30] target/arm: Enforce alignment for VLDn/VSTn (single) |
Date: |
Mon, 11 Jan 2021 09:01:06 -1000 |
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/translate-neon.c.inc | 48 ++++++++++++++++++++++++++++-----
1 file changed, 42 insertions(+), 6 deletions(-)
diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc
index e706c37c80..a02b8369a1 100644
--- a/target/arm/translate-neon.c.inc
+++ b/target/arm/translate-neon.c.inc
@@ -629,6 +629,7 @@ static bool trans_VLDST_single(DisasContext *s,
arg_VLDST_single *a)
int nregs = a->n + 1;
int vd = a->vd;
TCGv_i32 addr, tmp;
+ MemOp mop;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
@@ -678,23 +679,58 @@ static bool trans_VLDST_single(DisasContext *s,
arg_VLDST_single *a)
return true;
}
+ /* Pick up SCTLR settings */
+ mop = finalize_memop(s, a->size);
+
+ if (a->align) {
+ MemOp align_op;
+
+ switch (nregs) {
+ case 1:
+ /* For VLD1, use natural alignment. */
+ align_op = MO_ALIGN;
+ break;
+ case 2:
+ /* For VLD2, use double alignment. */
+ align_op = pow2_align(a->size + 1);
+ break;
+ case 4:
+ if (a->size == MO_32) {
+ /*
+ * For VLD4.32, align = 1 is double alignment, align = 2 is
+ * quad alignment; align = 3 is rejected above.
+ */
+ align_op = pow2_align(a->size + a->align);
+ } else {
+ /* For VLD4.8 and VLD.16, we want quad alignment. */
+ align_op = pow2_align(a->size + 2);
+ }
+ break;
+ default:
+ /* For VLD3, the alignment field is zero and rejected above. */
+ g_assert_not_reached();
+ }
+
+ mop = (mop & ~MO_AMASK) | align_op;
+ }
+
tmp = tcg_temp_new_i32();
addr = tcg_temp_new_i32();
load_reg_var(s, addr, a->rn);
- /*
- * TODO: if we implemented alignment exceptions, we should check
- * addr against the alignment encoded in a->align here.
- */
+
for (reg = 0; reg < nregs; reg++) {
if (a->l) {
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
+ gen_aa32_ld_internal_i32(s, tmp, addr, get_mem_index(s), mop);
neon_store_element(vd, a->reg_idx, a->size, tmp);
} else { /* Store */
neon_load_element(tmp, vd, a->reg_idx, a->size);
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
+ gen_aa32_st_internal_i32(s, tmp, addr, get_mem_index(s), mop);
}
vd += a->stride;
tcg_gen_addi_i32(addr, addr, 1 << a->size);
+
+ /* Subsequent memory operations inherit alignment */
+ mop &= ~MO_AMASK;
}
tcg_temp_free_i32(addr);
tcg_temp_free_i32(tmp);
--
2.25.1
- [PATCH v3 09/30] target/arm: Add ALIGN_MEM to TBFLAG_ANY, (continued)
- [PATCH v3 09/30] target/arm: Add ALIGN_MEM to TBFLAG_ANY, Richard Henderson, 2021/01/11
- [PATCH v3 10/30] target/arm: Adjust gen_aa32_{ld, st}_i32 for align+endianness, Richard Henderson, 2021/01/11
- [PATCH v3 11/30] target/arm: Merge gen_aa32_frob64 into gen_aa32_ld_i64, Richard Henderson, 2021/01/11
- [PATCH v3 12/30] target/arm: Fix SCTLR_B test for TCGv_i64 load/store, Richard Henderson, 2021/01/11
- [PATCH v3 13/30] target/arm: Adjust gen_aa32_{ld, st}_i64 for align+endianness, Richard Henderson, 2021/01/11
- [PATCH v3 14/30] target/arm: Enforce word alignment for LDRD/STRD, Richard Henderson, 2021/01/11
- [PATCH v3 20/30] target/arm: Enforce alignment for VLDR/VSTR, Richard Henderson, 2021/01/11
- [PATCH v3 21/30] target/arm: Enforce alignment for VLDn (all lanes), Richard Henderson, 2021/01/11
- [PATCH v3 17/30] target/arm: Enforce alignment for RFE, Richard Henderson, 2021/01/11
- [PATCH v3 18/30] target/arm: Enforce alignment for SRS, Richard Henderson, 2021/01/11
- [PATCH v3 23/30] target/arm: Enforce alignment for VLDn/VSTn (single),
Richard Henderson <=
- [PATCH v3 28/30] target/arm: Enforce alignment for aa64 vector LDn/STn (multiple), Richard Henderson, 2021/01/11
- [PATCH v3 27/30] target/arm: Use MemOp for size + endian in aa64 vector ld/st, Richard Henderson, 2021/01/11
- [PATCH v3 16/30] target/arm: Enforce alignment for LDM/STM, Richard Henderson, 2021/01/11
- [PATCH v3 26/30] target/arm: Enforce alignment for aa64 load-acq/store-rel, Richard Henderson, 2021/01/11
- [PATCH v3 30/30] target/arm: Enforce alignment for sve LD1R, Richard Henderson, 2021/01/11
- [PATCH v3 15/30] target/arm: Enforce alignment for LDA/LDAH/STL/STLH, Richard Henderson, 2021/01/11
- [PATCH v3 19/30] target/arm: Enforce alignment for VLDM/VSTM, Richard Henderson, 2021/01/11
- [PATCH v3 22/30] target/arm: Enforce alignment for VLDn/VSTn (multiple), Richard Henderson, 2021/01/11
- [PATCH v3 24/30] target/arm: Use finalize_memop for aa64 gpr load/store, Richard Henderson, 2021/01/11
- [PATCH v3 25/30] target/arm: Use finalize_memop for aa64 fpr load/store, Richard Henderson, 2021/01/11