qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2 17/18] tcg-i386: Adjust tcg_out_tlb_load for


From: Aurelien Jarno
Subject: Re: [Qemu-devel] [PATCH v2 17/18] tcg-i386: Adjust tcg_out_tlb_load for x32
Date: Mon, 2 Sep 2013 13:10:41 +0200
User-agent: Mutt/1.5.21 (2010-09-15)

On Thu, Aug 29, 2013 at 02:09:45PM -0700, Richard Henderson wrote:
> Signed-off-by: Richard Henderson <address@hidden>
> ---
>  tcg/i386/tcg-target.c | 41 +++++++++++++++++++++++++++--------------
>  1 file changed, 27 insertions(+), 14 deletions(-)
> 
> diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
> index 247c9d2..cde134f 100644
> --- a/tcg/i386/tcg-target.c
> +++ b/tcg/i386/tcg-target.c
> @@ -1085,33 +1085,46 @@ static inline void tcg_out_tlb_load(TCGContext *s, 
> int addrlo_idx,
>      const int addrlo = args[addrlo_idx];
>      const int r0 = TCG_REG_L0;
>      const int r1 = TCG_REG_L1;
> -    TCGType type = TCG_TYPE_I32;
> -    int rexw = 0;
> +    TCGType ttype = TCG_TYPE_I32;
> +    TCGType htype = TCG_TYPE_I32;
> +    int trexw = 0, hrexw = 0;
>  
> -    if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 64) {
> -        type = TCG_TYPE_I64;
> -        rexw = P_REXW;
> +    if (TCG_TARGET_REG_BITS == 64) {
> +        if (TARGET_LONG_BITS == 64) {
> +            ttype = TCG_TYPE_I64;
> +            trexw = P_REXW;
> +        }
> +        if (TCG_TYPE_PTR == TCG_TYPE_I64) {
> +            htype = TCG_TYPE_I64;
> +            hrexw = P_REXW;
> +        }
>      }
>  
> -    tcg_out_mov(s, type, r0, addrlo);
> -    tcg_out_mov(s, type, r1, addrlo);
> +    tcg_out_mov(s, htype, r0, addrlo);
> +    tcg_out_mov(s, ttype, r1, addrlo);
>  
> -    tcg_out_shifti(s, SHIFT_SHR + rexw, r0,
> +    tcg_out_shifti(s, SHIFT_SHR + hrexw, r0,
>                     TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
>  
> -    tgen_arithi(s, ARITH_AND + rexw, r1,
> +    tgen_arithi(s, ARITH_AND + trexw, r1,
>                  TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
> -    tgen_arithi(s, ARITH_AND + rexw, r0,
> +    tgen_arithi(s, ARITH_AND + hrexw, r0,
>                  (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
>  
> -    tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r0, TCG_AREG0, r0, 0,
> +    tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
>                               offsetof(CPUArchState, tlb_table[mem_index][0])
>                               + which);
>  
>      /* cmp 0(r0), r1 */
> -    tcg_out_modrm_offset(s, OPC_CMP_GvEv + rexw, r1, r0, 0);
> +    tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0);
>  
> -    tcg_out_mov(s, type, r1, addrlo);
> +    /* Prepare for both the fast path add of the tlb addend, and the slow
> +       path function argument setup.  There are two cases worth note:
> +       For 32-bit guest and x86_64 host, MOVL zero-extends the guest address
> +       before the fastpath ADDQ below.  For 64-bit guest and x32 host, MOVQ
> +       copies the entire guest address for the slow path, while truncation
> +       for the 32-bit host happens with the fastpath ADDL below.  */
> +    tcg_out_mov(s, ttype, r1, addrlo);
>  
>      /* jne slow_path */
>      tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
> @@ -1131,7 +1144,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, int 
> addrlo_idx,
>      /* TLB Hit.  */
>  
>      /* add addend(r0), r1 */
> -    tcg_out_modrm_offset(s, OPC_ADD_GvEv + P_REXW, r1, r0,
> +    tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
>                           offsetof(CPUTLBEntry, addend) - which);
>  }
>  #elif defined(__x86_64__) && defined(__linux__)

Reviewed-by: Aurelien Jarno <address@hidden>

-- 
Aurelien Jarno                          GPG: 1024D/F1BCDB73
address@hidden                 http://www.aurel32.net



reply via email to

[Prev in Thread] Current Thread [Next in Thread]