[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 03/14] Rename spe_status to vec_status
From: |
Nathan Froyd |
Subject: |
[Qemu-devel] [PATCH 03/14] Rename spe_status to vec_status |
Date: |
Thu, 22 Jan 2009 12:44:03 -0800 |
Only one of Altivec and SPE will be available on a given chip.
Signed-off-by: Nathan Froyd <address@hidden>
---
target-ppc/cpu.h | 4 +-
target-ppc/op_helper.c | 112 ++++++++++++++++++++++++------------------------
2 files changed, 59 insertions(+), 57 deletions(-)
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index dafe7f3..006f58d 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -611,8 +611,10 @@ struct CPUPPCState {
uint32_t vscr;
/* SPE registers */
uint64_t spe_acc;
- float_status spe_status;
uint32_t spe_fscr;
+ /* SPE and Altivec can share a status since they will never be used
+ * simultaneously */
+ float_status vec_status;
/* Internal devices resources */
/* Time base and decrementer */
diff --git a/target-ppc/op_helper.c b/target-ppc/op_helper.c
index 9820040..3086bfd 100644
--- a/target-ppc/op_helper.c
+++ b/target-ppc/op_helper.c
@@ -2882,7 +2882,7 @@ static always_inline uint32_t efscfsi (uint32_t val)
{
CPU_FloatU u;
- u.f = int32_to_float32(val, &env->spe_status);
+ u.f = int32_to_float32(val, &env->vec_status);
return u.l;
}
@@ -2891,7 +2891,7 @@ static always_inline uint32_t efscfui (uint32_t val)
{
CPU_FloatU u;
- u.f = uint32_to_float32(val, &env->spe_status);
+ u.f = uint32_to_float32(val, &env->vec_status);
return u.l;
}
@@ -2905,7 +2905,7 @@ static always_inline int32_t efsctsi (uint32_t val)
if (unlikely(float32_is_nan(u.f)))
return 0;
- return float32_to_int32(u.f, &env->spe_status);
+ return float32_to_int32(u.f, &env->vec_status);
}
static always_inline uint32_t efsctui (uint32_t val)
@@ -2917,7 +2917,7 @@ static always_inline uint32_t efsctui (uint32_t val)
if (unlikely(float32_is_nan(u.f)))
return 0;
- return float32_to_uint32(u.f, &env->spe_status);
+ return float32_to_uint32(u.f, &env->vec_status);
}
static always_inline uint32_t efsctsiz (uint32_t val)
@@ -2929,7 +2929,7 @@ static always_inline uint32_t efsctsiz (uint32_t val)
if (unlikely(float32_is_nan(u.f)))
return 0;
- return float32_to_int32_round_to_zero(u.f, &env->spe_status);
+ return float32_to_int32_round_to_zero(u.f, &env->vec_status);
}
static always_inline uint32_t efsctuiz (uint32_t val)
@@ -2941,7 +2941,7 @@ static always_inline uint32_t efsctuiz (uint32_t val)
if (unlikely(float32_is_nan(u.f)))
return 0;
- return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
+ return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
}
static always_inline uint32_t efscfsf (uint32_t val)
@@ -2949,9 +2949,9 @@ static always_inline uint32_t efscfsf (uint32_t val)
CPU_FloatU u;
float32 tmp;
- u.f = int32_to_float32(val, &env->spe_status);
- tmp = int64_to_float32(1ULL << 32, &env->spe_status);
- u.f = float32_div(u.f, tmp, &env->spe_status);
+ u.f = int32_to_float32(val, &env->vec_status);
+ tmp = int64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_div(u.f, tmp, &env->vec_status);
return u.l;
}
@@ -2961,9 +2961,9 @@ static always_inline uint32_t efscfuf (uint32_t val)
CPU_FloatU u;
float32 tmp;
- u.f = uint32_to_float32(val, &env->spe_status);
- tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
- u.f = float32_div(u.f, tmp, &env->spe_status);
+ u.f = uint32_to_float32(val, &env->vec_status);
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_div(u.f, tmp, &env->vec_status);
return u.l;
}
@@ -2977,10 +2977,10 @@ static always_inline uint32_t efsctsf (uint32_t val)
/* NaN are not treated the same way IEEE 754 does */
if (unlikely(float32_is_nan(u.f)))
return 0;
- tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
- u.f = float32_mul(u.f, tmp, &env->spe_status);
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_mul(u.f, tmp, &env->vec_status);
- return float32_to_int32(u.f, &env->spe_status);
+ return float32_to_int32(u.f, &env->vec_status);
}
static always_inline uint32_t efsctuf (uint32_t val)
@@ -2992,10 +2992,10 @@ static always_inline uint32_t efsctuf (uint32_t val)
/* NaN are not treated the same way IEEE 754 does */
if (unlikely(float32_is_nan(u.f)))
return 0;
- tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
- u.f = float32_mul(u.f, tmp, &env->spe_status);
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_mul(u.f, tmp, &env->vec_status);
- return float32_to_uint32(u.f, &env->spe_status);
+ return float32_to_uint32(u.f, &env->vec_status);
}
#define HELPER_SPE_SINGLE_CONV(name) \
@@ -3057,7 +3057,7 @@ static always_inline uint32_t efsadd (uint32_t op1,
uint32_t op2)
CPU_FloatU u1, u2;
u1.l = op1;
u2.l = op2;
- u1.f = float32_add(u1.f, u2.f, &env->spe_status);
+ u1.f = float32_add(u1.f, u2.f, &env->vec_status);
return u1.l;
}
@@ -3066,7 +3066,7 @@ static always_inline uint32_t efssub (uint32_t op1,
uint32_t op2)
CPU_FloatU u1, u2;
u1.l = op1;
u2.l = op2;
- u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
+ u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
return u1.l;
}
@@ -3075,7 +3075,7 @@ static always_inline uint32_t efsmul (uint32_t op1,
uint32_t op2)
CPU_FloatU u1, u2;
u1.l = op1;
u2.l = op2;
- u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
+ u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
return u1.l;
}
@@ -3084,7 +3084,7 @@ static always_inline uint32_t efsdiv (uint32_t op1,
uint32_t op2)
CPU_FloatU u1, u2;
u1.l = op1;
u2.l = op2;
- u1.f = float32_div(u1.f, u2.f, &env->spe_status);
+ u1.f = float32_div(u1.f, u2.f, &env->vec_status);
return u1.l;
}
@@ -3123,7 +3123,7 @@ static always_inline uint32_t efststlt (uint32_t op1,
uint32_t op2)
CPU_FloatU u1, u2;
u1.l = op1;
u2.l = op2;
- return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
+ return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
}
static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
@@ -3131,7 +3131,7 @@ static always_inline uint32_t efststgt (uint32_t op1,
uint32_t op2)
CPU_FloatU u1, u2;
u1.l = op1;
u2.l = op2;
- return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
+ return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
}
static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
@@ -3139,7 +3139,7 @@ static always_inline uint32_t efststeq (uint32_t op1,
uint32_t op2)
CPU_FloatU u1, u2;
u1.l = op1;
u2.l = op2;
- return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
+ return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
}
static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
@@ -3206,7 +3206,7 @@ uint64_t helper_efdcfsi (uint32_t val)
{
CPU_DoubleU u;
- u.d = int32_to_float64(val, &env->spe_status);
+ u.d = int32_to_float64(val, &env->vec_status);
return u.ll;
}
@@ -3215,7 +3215,7 @@ uint64_t helper_efdcfsid (uint64_t val)
{
CPU_DoubleU u;
- u.d = int64_to_float64(val, &env->spe_status);
+ u.d = int64_to_float64(val, &env->vec_status);
return u.ll;
}
@@ -3224,7 +3224,7 @@ uint64_t helper_efdcfui (uint32_t val)
{
CPU_DoubleU u;
- u.d = uint32_to_float64(val, &env->spe_status);
+ u.d = uint32_to_float64(val, &env->vec_status);
return u.ll;
}
@@ -3233,7 +3233,7 @@ uint64_t helper_efdcfuid (uint64_t val)
{
CPU_DoubleU u;
- u.d = uint64_to_float64(val, &env->spe_status);
+ u.d = uint64_to_float64(val, &env->vec_status);
return u.ll;
}
@@ -3247,7 +3247,7 @@ uint32_t helper_efdctsi (uint64_t val)
if (unlikely(float64_is_nan(u.d)))
return 0;
- return float64_to_int32(u.d, &env->spe_status);
+ return float64_to_int32(u.d, &env->vec_status);
}
uint32_t helper_efdctui (uint64_t val)
@@ -3259,7 +3259,7 @@ uint32_t helper_efdctui (uint64_t val)
if (unlikely(float64_is_nan(u.d)))
return 0;
- return float64_to_uint32(u.d, &env->spe_status);
+ return float64_to_uint32(u.d, &env->vec_status);
}
uint32_t helper_efdctsiz (uint64_t val)
@@ -3271,7 +3271,7 @@ uint32_t helper_efdctsiz (uint64_t val)
if (unlikely(float64_is_nan(u.d)))
return 0;
- return float64_to_int32_round_to_zero(u.d, &env->spe_status);
+ return float64_to_int32_round_to_zero(u.d, &env->vec_status);
}
uint64_t helper_efdctsidz (uint64_t val)
@@ -3283,7 +3283,7 @@ uint64_t helper_efdctsidz (uint64_t val)
if (unlikely(float64_is_nan(u.d)))
return 0;
- return float64_to_int64_round_to_zero(u.d, &env->spe_status);
+ return float64_to_int64_round_to_zero(u.d, &env->vec_status);
}
uint32_t helper_efdctuiz (uint64_t val)
@@ -3295,7 +3295,7 @@ uint32_t helper_efdctuiz (uint64_t val)
if (unlikely(float64_is_nan(u.d)))
return 0;
- return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
+ return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
}
uint64_t helper_efdctuidz (uint64_t val)
@@ -3307,7 +3307,7 @@ uint64_t helper_efdctuidz (uint64_t val)
if (unlikely(float64_is_nan(u.d)))
return 0;
- return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
+ return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
}
uint64_t helper_efdcfsf (uint32_t val)
@@ -3315,9 +3315,9 @@ uint64_t helper_efdcfsf (uint32_t val)
CPU_DoubleU u;
float64 tmp;
- u.d = int32_to_float64(val, &env->spe_status);
- tmp = int64_to_float64(1ULL << 32, &env->spe_status);
- u.d = float64_div(u.d, tmp, &env->spe_status);
+ u.d = int32_to_float64(val, &env->vec_status);
+ tmp = int64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_div(u.d, tmp, &env->vec_status);
return u.ll;
}
@@ -3327,9 +3327,9 @@ uint64_t helper_efdcfuf (uint32_t val)
CPU_DoubleU u;
float64 tmp;
- u.d = uint32_to_float64(val, &env->spe_status);
- tmp = int64_to_float64(1ULL << 32, &env->spe_status);
- u.d = float64_div(u.d, tmp, &env->spe_status);
+ u.d = uint32_to_float64(val, &env->vec_status);
+ tmp = int64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_div(u.d, tmp, &env->vec_status);
return u.ll;
}
@@ -3343,10 +3343,10 @@ uint32_t helper_efdctsf (uint64_t val)
/* NaN are not treated the same way IEEE 754 does */
if (unlikely(float64_is_nan(u.d)))
return 0;
- tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
- u.d = float64_mul(u.d, tmp, &env->spe_status);
+ tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_mul(u.d, tmp, &env->vec_status);
- return float64_to_int32(u.d, &env->spe_status);
+ return float64_to_int32(u.d, &env->vec_status);
}
uint32_t helper_efdctuf (uint64_t val)
@@ -3358,10 +3358,10 @@ uint32_t helper_efdctuf (uint64_t val)
/* NaN are not treated the same way IEEE 754 does */
if (unlikely(float64_is_nan(u.d)))
return 0;
- tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
- u.d = float64_mul(u.d, tmp, &env->spe_status);
+ tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_mul(u.d, tmp, &env->vec_status);
- return float64_to_uint32(u.d, &env->spe_status);
+ return float64_to_uint32(u.d, &env->vec_status);
}
uint32_t helper_efscfd (uint64_t val)
@@ -3370,7 +3370,7 @@ uint32_t helper_efscfd (uint64_t val)
CPU_FloatU u2;
u1.ll = val;
- u2.f = float64_to_float32(u1.d, &env->spe_status);
+ u2.f = float64_to_float32(u1.d, &env->vec_status);
return u2.l;
}
@@ -3381,7 +3381,7 @@ uint64_t helper_efdcfs (uint32_t val)
CPU_FloatU u1;
u1.l = val;
- u2.d = float32_to_float64(u1.f, &env->spe_status);
+ u2.d = float32_to_float64(u1.f, &env->vec_status);
return u2.ll;
}
@@ -3392,7 +3392,7 @@ uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
CPU_DoubleU u1, u2;
u1.ll = op1;
u2.ll = op2;
- u1.d = float64_add(u1.d, u2.d, &env->spe_status);
+ u1.d = float64_add(u1.d, u2.d, &env->vec_status);
return u1.ll;
}
@@ -3401,7 +3401,7 @@ uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
CPU_DoubleU u1, u2;
u1.ll = op1;
u2.ll = op2;
- u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
+ u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
return u1.ll;
}
@@ -3410,7 +3410,7 @@ uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
CPU_DoubleU u1, u2;
u1.ll = op1;
u2.ll = op2;
- u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
+ u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
return u1.ll;
}
@@ -3419,7 +3419,7 @@ uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
CPU_DoubleU u1, u2;
u1.ll = op1;
u2.ll = op2;
- u1.d = float64_div(u1.d, u2.d, &env->spe_status);
+ u1.d = float64_div(u1.d, u2.d, &env->vec_status);
return u1.ll;
}
@@ -3429,7 +3429,7 @@ uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
CPU_DoubleU u1, u2;
u1.ll = op1;
u2.ll = op2;
- return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
+ return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
}
uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
@@ -3437,7 +3437,7 @@ uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
CPU_DoubleU u1, u2;
u1.ll = op1;
u2.ll = op2;
- return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
+ return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
}
uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
@@ -3445,7 +3445,7 @@ uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
CPU_DoubleU u1, u2;
u1.ll = op1;
u2.ll = op2;
- return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
+ return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
}
uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
--
1.6.0.5
- [Qemu-devel] [PATCH 05/14] Make mtvscr use a helper, (continued)
- [Qemu-devel] [PATCH 05/14] Make mtvscr use a helper, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 14/14] Add vrsqrtefp instruction, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 09/14] Add vcmp{eq,ge,gt,b}fp{,.} instructions, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 13/14] Add vrefp instruction, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 01/14] Add f field to ppc_avr_t, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 10/14] Add vrfi{m,n,p,z} instructions, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 12/14] Add vct{u,s}xs instructions, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 02/14] Add various NaN-handling macros, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 04/14] Add calls to initialize VSCR on appropriate machines, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 11/14] Add vcf{u,s}x instructions, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 03/14] Rename spe_status to vec_status,
Nathan Froyd <=
- [Qemu-devel] [PATCH 08/14] Add vmaddfp and vnmsubfp instructions, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 07/14] Add v{add,sub}fp instructions, Nathan Froyd, 2009/01/22
- [Qemu-devel] [PATCH 06/14] Add v{max,min}fp instructions, Nathan Froyd, 2009/01/22