qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [V4 PATCH (resend) 2/6] target-ppc: Allow little-endian use


From: Tom Musta
Subject: [Qemu-devel] [V4 PATCH (resend) 2/6] target-ppc: Allow little-endian user mode.
Date: Thu, 29 May 2014 09:12:20 -0500

From: Doug Kwan <address@hidden>

This allows running PPC64 little-endian in user mode if target is configured
that way.  In PPC64 LE user mode we set MSR.LE during initialization.

Signed-off-by: Doug Kwan <address@hidden>
Signed-off-by: Tom Musta <address@hidden>

---
V2: Overhaul handling of byteswapping in code generation and mem helpers.

V3: Eliminating MSR[LE] check in user mode per Alex Graf's review.  Addressed
checkpatch violations.

V4: Fixed endianness problem in user-mode emulation of stqcx (spotted by Peter
Maydell)

 linux-user/main.c           |    8 ++-
 target-ppc/mem_helper.c     |   26 ++++++-
 target-ppc/translate.c      |  151 +++++++++++++++++--------------------------
 target-ppc/translate_init.c |    3 +
 4 files changed, 92 insertions(+), 96 deletions(-)

diff --git a/linux-user/main.c b/linux-user/main.c
index 882186e..c5668af 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -1484,7 +1484,7 @@ static int do_store_exclusive(CPUPPCState *env)
 {
     target_ulong addr;
     target_ulong page_addr;
-    target_ulong val, val2 __attribute__((unused));
+    target_ulong val, val2 __attribute__((unused)) = 0;
     int flags;
     int segv = 0;
 
@@ -1527,6 +1527,12 @@ static int do_store_exclusive(CPUPPCState *env)
                 case 8: segv = put_user_u64(val, addr); break;
                 case 16: {
                     if (val2 == env->reserve_val2) {
+                        if (msr_le) {
+                            val2 = val;
+                            val = env->gpr[reg+1];
+                        } else {
+                            val2 = env->gpr[reg+1];
+                        }
                         segv = put_user_u64(val, addr);
                         if (!segv) {
                             segv = put_user_u64(val2, addr + 8);
diff --git a/target-ppc/mem_helper.c b/target-ppc/mem_helper.c
index d9c8c36..5275f90 100644
--- a/target-ppc/mem_helper.c
+++ b/target-ppc/mem_helper.c
@@ -28,6 +28,15 @@
 
 //#define DEBUG_OP
 
+static inline bool needs_byteswap(const CPUPPCState *env)
+{
+#if defined(TARGET_WORDS_BIGENDIAN)
+  return msr_le;
+#else
+  return !msr_le;
+#endif
+}
+
 /*****************************************************************************/
 /* Memory load and stores */
 
@@ -47,7 +56,7 @@ static inline target_ulong addr_add(CPUPPCState *env, 
target_ulong addr,
 void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
 {
     for (; reg < 32; reg++) {
-        if (msr_le) {
+        if (needs_byteswap(env)) {
             env->gpr[reg] = bswap32(cpu_ldl_data(env, addr));
         } else {
             env->gpr[reg] = cpu_ldl_data(env, addr);
@@ -59,7 +68,7 @@ void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t 
reg)
 void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
 {
     for (; reg < 32; reg++) {
-        if (msr_le) {
+        if (needs_byteswap(env)) {
             cpu_stl_data(env, addr, bswap32((uint32_t)env->gpr[reg]));
         } else {
             cpu_stl_data(env, addr, (uint32_t)env->gpr[reg]);
@@ -202,6 +211,11 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong 
addr, uint32_t reg,
 #define LO_IDX 0
 #endif
 
+/* We use msr_le to determine index ordering in a vector.  However,
+   byteswapping is not simply controlled by msr_le.  We also need to take
+   into account endianness of the target.  This is done for the little-endian
+   PPC64 user-mode target. */
+
 #define LVE(name, access, swap, element)                        \
     void helper_##name(CPUPPCState *env, ppc_avr_t *r,          \
                        target_ulong addr)                       \
@@ -210,9 +224,11 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong 
addr, uint32_t reg,
         int adjust = HI_IDX*(n_elems - 1);                      \
         int sh = sizeof(r->element[0]) >> 1;                    \
         int index = (addr & 0xf) >> sh;                         \
-                                                                \
         if (msr_le) {                                           \
             index = n_elems - index - 1;                        \
+        }                                                       \
+                                                                \
+        if (needs_byteswap(env)) {                              \
             r->element[LO_IDX ? index : (adjust - index)] =     \
                 swap(access(env, addr));                        \
         } else {                                                \
@@ -235,9 +251,11 @@ LVE(lvewx, cpu_ldl_data, bswap32, u32)
         int adjust = HI_IDX * (n_elems - 1);                            \
         int sh = sizeof(r->element[0]) >> 1;                            \
         int index = (addr & 0xf) >> sh;                                 \
-                                                                        \
         if (msr_le) {                                                   \
             index = n_elems - index - 1;                                \
+        }                                                               \
+                                                                        \
+        if (needs_byteswap(env)) {                                      \
             access(env, addr, swap(r->element[LO_IDX ? index :          \
                                               (adjust - index)]));      \
         } else {                                                        \
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 6283b2c..87ddf87 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -195,6 +195,7 @@ typedef struct DisasContext {
     int access_type;
     /* Translation flags */
     int le_mode;
+    TCGMemOp default_tcg_memop_mask;
 #if defined(TARGET_PPC64)
     int sf_mode;
     int has_cfar;
@@ -209,6 +210,16 @@ typedef struct DisasContext {
     uint64_t insns_flags2;
 } DisasContext;
 
+/* Return true iff byteswap is needed in a scalar memop */
+static inline bool need_byteswap(const DisasContext *ctx)
+{
+#if defined(TARGET_WORDS_BIGENDIAN)
+     return ctx->le_mode;
+#else
+     return !ctx->le_mode;
+#endif
+}
+
 /* True when active word size < size of target_long.  */
 #ifdef TARGET_PPC64
 # define NARROW_MODE(C)  (!(C)->sf_mode)
@@ -2651,29 +2662,20 @@ static inline void gen_qemu_ld8s(DisasContext *ctx, 
TCGv arg1, TCGv arg2)
 
 static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
-    if (unlikely(ctx->le_mode)) {
-        tcg_gen_bswap16_tl(arg1, arg1);
-    }
+    TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
+    tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
 }
 
 static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    if (unlikely(ctx->le_mode)) {
-        tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
-        tcg_gen_bswap16_tl(arg1, arg1);
-        tcg_gen_ext16s_tl(arg1, arg1);
-    } else {
-        tcg_gen_qemu_ld16s(arg1, arg2, ctx->mem_idx);
-    }
+    TCGMemOp op = MO_SW | ctx->default_tcg_memop_mask;
+    tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
 }
 
 static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
-    if (unlikely(ctx->le_mode)) {
-        tcg_gen_bswap32_tl(arg1, arg1);
-    }
+    TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
+    tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
 }
 
 static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
@@ -2686,12 +2688,8 @@ static void gen_qemu_ld32u_i64(DisasContext *ctx, 
TCGv_i64 val, TCGv addr)
 
 static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    if (unlikely(ctx->le_mode)) {
-        tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
-        tcg_gen_bswap32_tl(arg1, arg1);
-        tcg_gen_ext32s_tl(arg1, arg1);
-    } else
-        tcg_gen_qemu_ld32s(arg1, arg2, ctx->mem_idx);
+    TCGMemOp op = MO_SL | ctx->default_tcg_memop_mask;
+    tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
 }
 
 static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
@@ -2704,10 +2702,8 @@ static void gen_qemu_ld32s_i64(DisasContext *ctx, 
TCGv_i64 val, TCGv addr)
 
 static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
 {
-    tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx);
-    if (unlikely(ctx->le_mode)) {
-        tcg_gen_bswap64_i64(arg1, arg1);
-    }
+    TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
+    tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
 }
 
 static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
@@ -2717,28 +2713,14 @@ static inline void gen_qemu_st8(DisasContext *ctx, TCGv 
arg1, TCGv arg2)
 
 static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    if (unlikely(ctx->le_mode)) {
-        TCGv t0 = tcg_temp_new();
-        tcg_gen_ext16u_tl(t0, arg1);
-        tcg_gen_bswap16_tl(t0, t0);
-        tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
-        tcg_temp_free(t0);
-    } else {
-        tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
-    }
+    TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
+    tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
 }
 
 static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    if (unlikely(ctx->le_mode)) {
-        TCGv t0 = tcg_temp_new();
-        tcg_gen_ext32u_tl(t0, arg1);
-        tcg_gen_bswap32_tl(t0, t0);
-        tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
-        tcg_temp_free(t0);
-    } else {
-        tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
-    }
+    TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
+    tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
 }
 
 static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
@@ -2751,13 +2733,8 @@ static void gen_qemu_st32_i64(DisasContext *ctx, 
TCGv_i64 val, TCGv addr)
 
 static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
 {
-    if (unlikely(ctx->le_mode)) {
-        TCGv_i64 t0 = tcg_temp_new_i64();
-        tcg_gen_bswap64_i64(t0, arg1);
-        tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx);
-        tcg_temp_free_i64(t0);
-    } else
-        tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx);
+    TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
+    tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
 }
 
 #define GEN_LD(name, ldop, opc, type)                                         \
@@ -2901,6 +2878,8 @@ static void gen_lq(DisasContext *ctx)
     EA = tcg_temp_new();
     gen_addr_imm_index(ctx, EA, 0x0F);
 
+    /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
+       64-bit byteswap already. */
     if (unlikely(ctx->le_mode)) {
         gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
         gen_addr_add(ctx, EA, EA, 8);
@@ -3019,6 +2998,8 @@ static void gen_std(DisasContext *ctx)
         EA = tcg_temp_new();
         gen_addr_imm_index(ctx, EA, 0x03);
 
+        /* We only need to swap high and low halves. gen_qemu_st64 does
+           necessary 64-bit byteswap already. */
         if (unlikely(ctx->le_mode)) {
             gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
             gen_addr_add(ctx, EA, EA, 8);
@@ -3048,23 +3029,20 @@ static void gen_std(DisasContext *ctx)
 }
 #endif
 /***                Integer load and store with byte reverse               ***/
+
 /* lhbrx */
 static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
-    if (likely(!ctx->le_mode)) {
-        tcg_gen_bswap16_tl(arg1, arg1);
-    }
+    TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
+    tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
 }
 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
 
 /* lwbrx */
 static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
-    if (likely(!ctx->le_mode)) {
-        tcg_gen_bswap32_tl(arg1, arg1);
-    }
+    TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
+    tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
 }
 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
 
@@ -3072,10 +3050,8 @@ GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
 /* ldbrx */
 static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx);
-    if (likely(!ctx->le_mode)) {
-        tcg_gen_bswap64_tl(arg1, arg1);
-    }
+    TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
+    tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
 }
 GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX);
 #endif  /* TARGET_PPC64 */
@@ -3083,30 +3059,16 @@ GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, 
PPC2_DBRX);
 /* sthbrx */
 static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    if (likely(!ctx->le_mode)) {
-        TCGv t0 = tcg_temp_new();
-        tcg_gen_ext16u_tl(t0, arg1);
-        tcg_gen_bswap16_tl(t0, t0);
-        tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
-        tcg_temp_free(t0);
-    } else {
-        tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
-    }
+    TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
+    tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
 }
 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
 
 /* stwbrx */
 static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    if (likely(!ctx->le_mode)) {
-        TCGv t0 = tcg_temp_new();
-        tcg_gen_ext32u_tl(t0, arg1);
-        tcg_gen_bswap32_tl(t0, t0);
-        tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
-        tcg_temp_free(t0);
-    } else {
-        tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
-    }
+    TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
+    tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
 }
 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
 
@@ -3114,14 +3076,8 @@ GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
 /* stdbrx */
 static inline void gen_qemu_st64r(DisasContext *ctx, TCGv arg1, TCGv arg2)
 {
-    if (likely(!ctx->le_mode)) {
-        TCGv t0 = tcg_temp_new();
-        tcg_gen_bswap64_tl(t0, arg1);
-        tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx);
-        tcg_temp_free(t0);
-    } else {
-        tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx);
-    }
+    TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
+    tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
 }
 GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX);
 #endif  /* TARGET_PPC64 */
@@ -3541,7 +3497,9 @@ static void gen_lfdp(DisasContext *ctx)
     }
     gen_set_access_type(ctx, ACCESS_FLOAT);
     EA = tcg_temp_new();
-    gen_addr_imm_index(ctx, EA, 0);                                           \
+    gen_addr_imm_index(ctx, EA, 0);
+    /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
+       64-bit byteswap already. */
     if (unlikely(ctx->le_mode)) {
         gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
         tcg_gen_addi_tl(EA, EA, 8);
@@ -3565,6 +3523,8 @@ static void gen_lfdpx(DisasContext *ctx)
     gen_set_access_type(ctx, ACCESS_FLOAT);
     EA = tcg_temp_new();
     gen_addr_reg_index(ctx, EA);
+    /* We only need to swap high and low halves. gen_qemu_ld64 does necessary
+       64-bit byteswap already. */
     if (unlikely(ctx->le_mode)) {
         gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
         tcg_gen_addi_tl(EA, EA, 8);
@@ -3713,7 +3673,9 @@ static void gen_stfdp(DisasContext *ctx)
     }
     gen_set_access_type(ctx, ACCESS_FLOAT);
     EA = tcg_temp_new();
-    gen_addr_imm_index(ctx, EA, 0);                                           \
+    gen_addr_imm_index(ctx, EA, 0);
+    /* We only need to swap high and low halves. gen_qemu_st64 does necessary
+       64-bit byteswap already. */
     if (unlikely(ctx->le_mode)) {
         gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
         tcg_gen_addi_tl(EA, EA, 8);
@@ -3737,6 +3699,8 @@ static void gen_stfdpx(DisasContext *ctx)
     gen_set_access_type(ctx, ACCESS_FLOAT);
     EA = tcg_temp_new();
     gen_addr_reg_index(ctx, EA);
+    /* We only need to swap high and low halves. gen_qemu_st64 does necessary
+       64-bit byteswap already. */
     if (unlikely(ctx->le_mode)) {
         gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
         tcg_gen_addi_tl(EA, EA, 8);
@@ -6690,6 +6654,8 @@ static void glue(gen_, name)(DisasContext *ctx)
     EA = tcg_temp_new();                                                      \
     gen_addr_reg_index(ctx, EA);                                              \
     tcg_gen_andi_tl(EA, EA, ~0xf);                                            \
+    /* We only need to swap high and low halves. gen_qemu_ld64 does necessary \
+       64-bit byteswap already. */                                            \
     if (ctx->le_mode) {                                                       \
         gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA);                    \
         tcg_gen_addi_tl(EA, EA, 8);                                           \
@@ -6714,6 +6680,8 @@ static void gen_st##name(DisasContext *ctx)               
                    \
     EA = tcg_temp_new();                                                      \
     gen_addr_reg_index(ctx, EA);                                              \
     tcg_gen_andi_tl(EA, EA, ~0xf);                                            \
+    /* We only need to swap high and low halves. gen_qemu_st64 does necessary \
+       64-bit byteswap already. */                                            \
     if (ctx->le_mode) {                                                       \
         gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA);                    \
         tcg_gen_addi_tl(EA, EA, 8);                                           \
@@ -11336,6 +11304,7 @@ static inline void 
gen_intermediate_code_internal(PowerPCCPU *cpu,
     ctx.insns_flags2 = env->insns_flags2;
     ctx.access_type = -1;
     ctx.le_mode = env->hflags & (1 << MSR_LE) ? 1 : 0;
+    ctx.default_tcg_memop_mask = ctx.le_mode ? MO_LE : MO_BE;
 #if defined(TARGET_PPC64)
     ctx.sf_mode = msr_is_64bit(env, env->msr);
     ctx.has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
@@ -11400,7 +11369,7 @@ static inline void 
gen_intermediate_code_internal(PowerPCCPU *cpu,
                   ctx.nip, ctx.mem_idx, (int)msr_ir);
         if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
             gen_io_start();
-        if (unlikely(ctx.le_mode)) {
+        if (unlikely(need_byteswap(&ctx))) {
             ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip));
         } else {
             ctx.opcode = cpu_ldl_code(env, ctx.nip);
diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
index 4d94015..4a9f5b8 100644
--- a/target-ppc/translate_init.c
+++ b/target-ppc/translate_init.c
@@ -8418,6 +8418,9 @@ static void ppc_cpu_reset(CPUState *s)
     msr |= (target_ulong)1 << MSR_VR; /* Allow altivec usage */
     msr |= (target_ulong)1 << MSR_SPE; /* Allow SPE usage */
     msr |= (target_ulong)1 << MSR_PR;
+#if !defined(TARGET_WORDS_BIGENDIAN)
+    msr |= (target_ulong)1 << MSR_LE; /* Little-endian user mode */
+#endif
 #endif
 
 #if defined(TARGET_PPC64)
-- 
1.7.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]