qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2] cpu: implementing victim TLB for QEMU system


From: Max Filippov
Subject: Re: [Qemu-devel] [PATCH v2] cpu: implementing victim TLB for QEMU system emulated TLB
Date: Fri, 24 Jan 2014 00:44:19 +0400

Hi Xin,

On Thu, Jan 23, 2014 at 11:49 PM, Xin Tong <address@hidden> wrote:

[...]

> diff --git a/cputlb.c b/cputlb.c
> index b533f3f..03a048a 100644
> --- a/cputlb.c
> +++ b/cputlb.c
> @@ -34,6 +34,22 @@
>  /* statistics */
>  int tlb_flush_count;
>
> +/* swap the 2 given TLB entries as well as their corresponding IOTLB */
> +inline void swap_tlb(CPUTLBEntry *te, CPUTLBEntry *se, hwaddr *iote,
> +                     hwaddr *iose)
> +{
> +   hwaddr iotmp;
> +   CPUTLBEntry t;
> +   /* swap iotlb */
> +   iotmp = *iote;
> +   *iote = *iose;
> +   *iose = iotmp;
> +   /* swap tlb */
> +   memcpy(&t, te, sizeof(CPUTLBEntry));
> +   memcpy(te, se, sizeof(CPUTLBEntry));
> +   memcpy(se, &t, sizeof(CPUTLBEntry));

You could use assignment operator, it works:
    t = *te;
    *te = *se;
    *se = t;

Also all users of this function are inline functions from softmmu_template.h
Could this function be a static inline function in that file too?

> +}

[...]

> --- a/include/exec/softmmu_template.h
> +++ b/include/exec/softmmu_template.h
> @@ -141,6 +141,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, 
> target_ulong addr, int mmu_idx,
>      target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
>      uintptr_t haddr;
>      DATA_TYPE res;
> +    int vtlb_idx;
>
>      /* Adjust the given return address.  */
>      retaddr -= GETPC_ADJ;
> @@ -153,7 +154,24 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, 
> target_ulong addr, int mmu_idx,
>              do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, 
> retaddr);
>          }
>  #endif
> -        tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
> +        /* we are about to do a page table walk. our last hope is the
> +         * victim tlb. try to refill from the victim tlb before walking the
> +         * page table. */
> +        for (vtlb_idx = CPU_VTLB_SIZE; vtlb_idx >= 0; --vtlb_idx) {
> +            if (env->tlb_v_table[mmu_idx][vtlb_idx].ADDR_READ

Is it ADDR_READ or addr_read, as it is called in other places?

> +                == (addr & TARGET_PAGE_MASK)) {
> +                /* found entry in victim tlb */
> +                swap_tlb(&env->tlb_table[mmu_idx][index],
> +                         &env->tlb_v_table[mmu_idx][vtlb_idx],
> +                         &env->iotlb[mmu_idx][index],
> +                         &env->iotlb_v[mmu_idx][vtlb_idx]);
> +                break;
> +            }
> +        }
> +        /* miss victim tlb */
> +        if (vtlb_idx < 0) {
> +            tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
> +        }
>          tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
>      }
>
> @@ -223,6 +241,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, 
> target_ulong addr, int mmu_idx,
>      target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
>      uintptr_t haddr;
>      DATA_TYPE res;
> +    int vtlb_idx;
>
>      /* Adjust the given return address.  */
>      retaddr -= GETPC_ADJ;
> @@ -235,7 +254,24 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, 
> target_ulong addr, int mmu_idx,
>              do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, 
> retaddr);
>          }
>  #endif
> -        tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
> +        /* we are about to do a page table walk. our last hope is the
> +         * victim tlb. try to refill from the victim tlb before walking the
> +         * page table. */
> +        for (vtlb_idx = CPU_VTLB_SIZE; vtlb_idx >= 0; --vtlb_idx) {
> +            if (env->tlb_v_table[mmu_idx][vtlb_idx].ADDR_READ
> +                == (addr & TARGET_PAGE_MASK)) {
> +                /* found entry in victim tlb */
> +                swap_tlb(&env->tlb_table[mmu_idx][index],
> +                         &env->tlb_v_table[mmu_idx][vtlb_idx],
> +                         &env->iotlb[mmu_idx][index],
> +                         &env->iotlb_v[mmu_idx][vtlb_idx]);
> +                break;
> +            }
> +        }

It could be a good inline function.

> +        /* miss victim tlb */
> +        if (vtlb_idx < 0) {
> +            tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
> +        }
>          tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
>      }
>
> @@ -342,6 +378,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong 
> addr, DATA_TYPE val,
>      int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
>      target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
>      uintptr_t haddr;
> +    int vtlb_idx;
>
>      /* Adjust the given return address.  */
>      retaddr -= GETPC_ADJ;
> @@ -354,7 +391,24 @@ void helper_le_st_name(CPUArchState *env, target_ulong 
> addr, DATA_TYPE val,
>              do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
>          }
>  #endif
> -        tlb_fill(env, addr, 1, mmu_idx, retaddr);
> +        /* we are about to do a page table walk. our last hope is the
> +         * victim tlb. try to refill from the victim tlb before walking the
> +         * page table. */
> +        for (vtlb_idx = CPU_VTLB_SIZE; vtlb_idx >= 0; --vtlb_idx) {
> +            if (env->tlb_v_table[mmu_idx][vtlb_idx].addr_write
> +                == (addr & TARGET_PAGE_MASK)) {
> +                /* found entry in victim tlb */
> +                swap_tlb(&env->tlb_table[mmu_idx][index],
> +                         &env->tlb_v_table[mmu_idx][vtlb_idx],
> +                         &env->iotlb[mmu_idx][index],
> +                         &env->iotlb_v[mmu_idx][vtlb_idx]);
> +                break;
> +            }
> +        }
> +        /* miss victim tlb */
> +        if (vtlb_idx < 0) {
> +            tlb_fill(env, addr, 1, mmu_idx, retaddr);
> +        }
>          tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
>      }
>
> @@ -418,6 +472,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong 
> addr, DATA_TYPE val,
>      int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
>      target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
>      uintptr_t haddr;
> +    int vtlb_idx;
>
>      /* Adjust the given return address.  */
>      retaddr -= GETPC_ADJ;
> @@ -430,7 +485,24 @@ void helper_be_st_name(CPUArchState *env, target_ulong 
> addr, DATA_TYPE val,
>              do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
>          }
>  #endif
> -        tlb_fill(env, addr, 1, mmu_idx, retaddr);
> +        /* we are about to do a page table walk. our last hope is the
> +         * victim tlb. try to refill from the victim tlb before walking the
> +         * page table. */
> +        for (vtlb_idx = CPU_VTLB_SIZE; vtlb_idx >= 0; --vtlb_idx) {
> +            if (env->tlb_v_table[mmu_idx][vtlb_idx].addr_write
> +                == (addr & TARGET_PAGE_MASK)) {
> +                /* found entry in victim tlb */
> +                swap_tlb(&env->tlb_table[mmu_idx][index],
> +                         &env->tlb_v_table[mmu_idx][vtlb_idx],
> +                         &env->iotlb[mmu_idx][index],
> +                         &env->iotlb_v[mmu_idx][vtlb_idx]);
> +                break;
> +            }
> +        }

It could be a good inline function too.

> +        /* miss victim tlb */
> +        if (vtlb_idx < 0) {
> +            tlb_fill(env, addr, 1, mmu_idx, retaddr);
> +        }
>          tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
>      }

-- 
Thanks.
-- Max



reply via email to

[Prev in Thread] Current Thread [Next in Thread]