[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 25/47] tcg: Allocate TCGTemp pairs in host memory order
From: |
Richard Henderson |
Subject: |
[PULL 25/47] tcg: Allocate TCGTemp pairs in host memory order |
Date: |
Thu, 29 Dec 2022 16:01:59 -0800 |
Allocate the first of a pair at the lower address, and the
second of a pair at the higher address. This will make it
easier to find the beginning of the larger memory block.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tcg-internal.h | 4 ++--
tcg/tcg.c | 58 ++++++++++++++++++++++------------------------
2 files changed, 30 insertions(+), 32 deletions(-)
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
index a9ea27f67a..2c06b5116a 100644
--- a/tcg/tcg-internal.h
+++ b/tcg/tcg-internal.h
@@ -62,11 +62,11 @@ static inline unsigned tcg_call_flags(TCGOp *op)
#if TCG_TARGET_REG_BITS == 32
static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
{
- return temp_tcgv_i32(tcgv_i64_temp(t));
+ return temp_tcgv_i32(tcgv_i64_temp(t) + HOST_BIG_ENDIAN);
}
static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
{
- return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
+ return temp_tcgv_i32(tcgv_i64_temp(t) + !HOST_BIG_ENDIAN);
}
#else
extern TCGv_i32 TCGV_LOW(TCGv_i64) QEMU_ERROR("32-bit code path is reachable");
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 28ab174f1b..c830b3756d 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -887,10 +887,7 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type,
TCGv_ptr base,
TCGContext *s = tcg_ctx;
TCGTemp *base_ts = tcgv_ptr_temp(base);
TCGTemp *ts = tcg_global_alloc(s);
- int indirect_reg = 0, bigendian = 0;
-#if HOST_BIG_ENDIAN
- bigendian = 1;
-#endif
+ int indirect_reg = 0;
switch (base_ts->kind) {
case TEMP_FIXED:
@@ -916,7 +913,7 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr
base,
ts->indirect_reg = indirect_reg;
ts->mem_allocated = 1;
ts->mem_base = base_ts;
- ts->mem_offset = offset + bigendian * 4;
+ ts->mem_offset = offset;
pstrcpy(buf, sizeof(buf), name);
pstrcat(buf, sizeof(buf), "_0");
ts->name = strdup(buf);
@@ -927,7 +924,7 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr
base,
ts2->indirect_reg = indirect_reg;
ts2->mem_allocated = 1;
ts2->mem_base = base_ts;
- ts2->mem_offset = offset + (1 - bigendian) * 4;
+ ts2->mem_offset = offset + 4;
ts2->temp_subindex = 1;
pstrcpy(buf, sizeof(buf), name);
pstrcat(buf, sizeof(buf), "_1");
@@ -1073,37 +1070,43 @@ TCGTemp *tcg_constant_internal(TCGType type, int64_t
val)
ts = g_hash_table_lookup(h, &val);
if (ts == NULL) {
+ int64_t *val_ptr;
+
ts = tcg_temp_alloc(s);
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
TCGTemp *ts2 = tcg_temp_alloc(s);
+ tcg_debug_assert(ts2 == ts + 1);
+
ts->base_type = TCG_TYPE_I64;
ts->type = TCG_TYPE_I32;
ts->kind = TEMP_CONST;
ts->temp_allocated = 1;
- /*
- * Retain the full value of the 64-bit constant in the low
- * part, so that the hash table works. Actual uses will
- * truncate the value to the low part.
- */
- ts->val = val;
- tcg_debug_assert(ts2 == ts + 1);
ts2->base_type = TCG_TYPE_I64;
ts2->type = TCG_TYPE_I32;
ts2->kind = TEMP_CONST;
ts2->temp_allocated = 1;
ts2->temp_subindex = 1;
- ts2->val = val >> 32;
+
+ /*
+ * Retain the full value of the 64-bit constant in the low
+ * part, so that the hash table works. Actual uses will
+ * truncate the value to the low part.
+ */
+ ts[HOST_BIG_ENDIAN].val = val;
+ ts[!HOST_BIG_ENDIAN].val = val >> 32;
+ val_ptr = &ts[HOST_BIG_ENDIAN].val;
} else {
ts->base_type = type;
ts->type = type;
ts->kind = TEMP_CONST;
ts->temp_allocated = 1;
ts->val = val;
+ val_ptr = &ts->val;
}
- g_hash_table_insert(h, &ts->val, ts);
+ g_hash_table_insert(h, val_ptr, ts);
}
return ts;
@@ -1515,13 +1518,8 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs,
TCGTemp **args)
pi = 0;
if (ret != NULL) {
if (TCG_TARGET_REG_BITS < 64 && (typemask & 6) == dh_typecode_i64) {
-#if HOST_BIG_ENDIAN
- op->args[pi++] = temp_arg(ret + 1);
- op->args[pi++] = temp_arg(ret);
-#else
op->args[pi++] = temp_arg(ret);
op->args[pi++] = temp_arg(ret + 1);
-#endif
nb_rets = 2;
} else {
op->args[pi++] = temp_arg(ret);
@@ -1555,8 +1553,8 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs,
TCGTemp **args)
}
if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
- op->args[pi++] = temp_arg(args[i] + HOST_BIG_ENDIAN);
- op->args[pi++] = temp_arg(args[i] + !HOST_BIG_ENDIAN);
+ op->args[pi++] = temp_arg(args[i]);
+ op->args[pi++] = temp_arg(args[i] + 1);
real_args += 2;
continue;
}
@@ -4077,14 +4075,14 @@ static bool tcg_reg_alloc_dup2(TCGContext *s, const
TCGOp *op)
}
/* If the two inputs form one 64-bit value, try dupm_vec. */
- if (itsl + 1 == itsh && itsl->base_type == TCG_TYPE_I64) {
- temp_sync(s, itsl, s->reserved_regs, 0, 0);
- temp_sync(s, itsh, s->reserved_regs, 0, 0);
-#if HOST_BIG_ENDIAN
- TCGTemp *its = itsh;
-#else
- TCGTemp *its = itsl;
-#endif
+ if (itsl->temp_subindex == HOST_BIG_ENDIAN &&
+ itsh->temp_subindex == !HOST_BIG_ENDIAN &&
+ itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) {
+ TCGTemp *its = itsl - HOST_BIG_ENDIAN;
+
+ temp_sync(s, its + 0, s->reserved_regs, 0, 0);
+ temp_sync(s, its + 1, s->reserved_regs, 0, 0);
+
if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
its->mem_base->reg, its->mem_offset)) {
goto done;
--
2.34.1
- [PULL 19/47] tcg: Introduce paired register allocation, (continued)
- [PULL 19/47] tcg: Introduce paired register allocation, Richard Henderson, 2022/12/29
- [PULL 21/47] target/sparc: Avoid TCGV_{LOW,HIGH}, Richard Henderson, 2022/12/29
- [PULL 22/47] tcg: Move TCG_{LOW,HIGH} to tcg-internal.h, Richard Henderson, 2022/12/29
- [PULL 36/47] tcg: Vary the allocation size for TCGOp, Richard Henderson, 2022/12/29
- [PULL 26/47] tcg: Move TCG_TYPE_COUNT outside enum, Richard Henderson, 2022/12/29
- [PULL 33/47] accel/tcg/plugin: Avoid duplicate copy in copy_call, Richard Henderson, 2022/12/29
- [PULL 34/47] accel/tcg/plugin: Use copy_op in append_{udata,mem}_cb, Richard Henderson, 2022/12/29
- [PULL 23/47] tcg: Add temp_subindex to TCGTemp, Richard Henderson, 2022/12/29
- [PULL 35/47] tcg: Pass number of arguments to tcg_emit_op() / tcg_op_insert_*(), Richard Henderson, 2022/12/29
- [PULL 37/47] tcg: Use output_pref wrapper function, Richard Henderson, 2022/12/29
- [PULL 25/47] tcg: Allocate TCGTemp pairs in host memory order,
Richard Henderson <=
- [PULL 27/47] tcg: Introduce tcg_type_size, Richard Henderson, 2022/12/29
- [PULL 31/47] tcg: Use TCG_CALL_ARG_EVEN for TCI special case, Richard Henderson, 2022/12/29
- [PULL 29/47] tcg: Replace TCG_TARGET_CALL_ALIGN_ARGS with TCG_TARGET_CALL_ARG_I64, Richard Henderson, 2022/12/29
- [PULL 39/47] tcg: Convert typecode_to_ffi from array to function, Richard Henderson, 2022/12/29
- [PULL 28/47] tcg: Introduce TCGCallReturnKind and TCGCallArgumentKind, Richard Henderson, 2022/12/29
- [PULL 32/47] accel/tcg/plugin: Don't search for the function pointer index, Richard Henderson, 2022/12/29
- [PULL 24/47] tcg: Simplify calls to temp_sync vs mem_coherent, Richard Henderson, 2022/12/29
- [PULL 30/47] tcg: Replace TCG_TARGET_EXTEND_ARGS with TCG_TARGET_CALL_ARG_I32, Richard Henderson, 2022/12/29
- [PULL 38/47] tcg: Reorg function calls, Richard Henderson, 2022/12/29
- [PULL 42/47] tcg/aarch64: Merge tcg_out_callr into tcg_out_call, Richard Henderson, 2022/12/29