[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 20/24] target/arm: Use MemOp for size + endian in aa64 vector
From: |
Richard Henderson |
Subject: |
[PATCH v2 20/24] target/arm: Use MemOp for size + endian in aa64 vector ld/st |
Date: |
Tue, 8 Dec 2020 12:01:14 -0600 |
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/translate-a64.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 67a9b3bb09..4395721446 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -1140,24 +1140,24 @@ static void write_vec_element_i32(DisasContext *s,
TCGv_i32 tcg_src,
/* Store from vector register to memory */
static void do_vec_st(DisasContext *s, int srcidx, int element,
- TCGv_i64 tcg_addr, int size, MemOp endian)
+ TCGv_i64 tcg_addr, MemOp mop)
{
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
- read_vec_element(s, tcg_tmp, srcidx, element, size);
- tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
+ read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
+ tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
tcg_temp_free_i64(tcg_tmp);
}
/* Load from memory to vector register */
static void do_vec_ld(DisasContext *s, int destidx, int element,
- TCGv_i64 tcg_addr, int size, MemOp endian)
+ TCGv_i64 tcg_addr, MemOp mop)
{
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
- write_vec_element(s, tcg_tmp, destidx, element, size);
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
+ write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
tcg_temp_free_i64(tcg_tmp);
}
@@ -3705,9 +3705,9 @@ static void disas_ldst_multiple_struct(DisasContext *s,
uint32_t insn)
for (xs = 0; xs < selem; xs++) {
int tt = (rt + r + xs) % 32;
if (is_store) {
- do_vec_st(s, tt, e, clean_addr, size, endian);
+ do_vec_st(s, tt, e, clean_addr, size | endian);
} else {
- do_vec_ld(s, tt, e, clean_addr, size, endian);
+ do_vec_ld(s, tt, e, clean_addr, size | endian);
}
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
}
@@ -3856,9 +3856,9 @@ static void disas_ldst_single_struct(DisasContext *s,
uint32_t insn)
} else {
/* Load/store one element per register */
if (is_load) {
- do_vec_ld(s, rt, index, clean_addr, scale, s->be_data);
+ do_vec_ld(s, rt, index, clean_addr, scale | s->be_data);
} else {
- do_vec_st(s, rt, index, clean_addr, scale, s->be_data);
+ do_vec_st(s, rt, index, clean_addr, scale | s->be_data);
}
}
tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
--
2.25.1
- [PATCH v2 10/24] target/arm: Enforce alignment for RFE, (continued)
- [PATCH v2 10/24] target/arm: Enforce alignment for RFE, Richard Henderson, 2020/12/08
- [PATCH v2 12/24] target/arm: Enforce alignment for VLDM/VSTM, Richard Henderson, 2020/12/08
- [PATCH v2 14/24] target/arm: Enforce alignment for VLD1 (all lanes), Richard Henderson, 2020/12/08
- [PATCH v2 15/24] target/arm: Enforce alignment for VLDn/VSTn (multiple), Richard Henderson, 2020/12/08
- [PATCH v2 16/24] target/arm: Enforce alignment for VLDn/VSTn (single), Richard Henderson, 2020/12/08
- [PATCH v2 13/24] target/arm: Enforce alignment for VLDR/VSTR, Richard Henderson, 2020/12/08
- [PATCH v2 17/24] target/arm: Use finalize_memop for aa64 gpr load/store, Richard Henderson, 2020/12/08
- [PATCH v2 18/24] target/arm: Use finalize_memop for aa64 fpr load/store, Richard Henderson, 2020/12/08
- [PATCH v2 19/24] target/arm: Enforce alignment for aa64 load-acq/store-rel, Richard Henderson, 2020/12/08
- [PATCH v2 21/24] target/arm: Enforce alignment for aa64 vector LDn/STn (multiple), Richard Henderson, 2020/12/08
- [PATCH v2 20/24] target/arm: Use MemOp for size + endian in aa64 vector ld/st,
Richard Henderson <=
- [PATCH v2 22/24] target/arm: Enforce alignment for aa64 vector LDn/STn (single), Richard Henderson, 2020/12/08
- [PATCH v2 23/24] target/arm: Enforce alignment for sve LD1R, Richard Henderson, 2020/12/08
- [PATCH v2 24/24] target/arm: Enforce alignment for sve unpredicated LDR/STR, Richard Henderson, 2020/12/08