qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC v7 09/16] softmmu: Include MMIO/invalid exclusive


From: alvise rigo
Subject: Re: [Qemu-devel] [RFC v7 09/16] softmmu: Include MMIO/invalid exclusive accesses
Date: Thu, 18 Feb 2016 15:18:04 +0100

On Tue, Feb 16, 2016 at 6:49 PM, Alex Bennée <address@hidden> wrote:
>
> Alvise Rigo <address@hidden> writes:
>
>> Enable exclusive accesses when the MMIO/invalid flag is set in the TLB
>> entry.
>>
>> In case a LL access is done to MMIO memory, we treat it differently from
>> a RAM access in that we do not rely on the EXCL bitmap to flag the page
>> as exclusive. In fact, we don't even need the TLB_EXCL flag to force the
>> slow path, since it is always forced anyway.
>>
>> This commit does not take care of invalidating an MMIO exclusive range from
>> other non-exclusive accesses i.e. CPU1 LoadLink to MMIO address X and
>> CPU2 writes to X. This will be addressed in the following commit.
>>
>> Suggested-by: Jani Kokkonen <address@hidden>
>> Suggested-by: Claudio Fontana <address@hidden>
>> Signed-off-by: Alvise Rigo <address@hidden>
>> ---
>>  cputlb.c           |  7 +++----
>>  softmmu_template.h | 26 ++++++++++++++++++++------
>>  2 files changed, 23 insertions(+), 10 deletions(-)
>>
>> diff --git a/cputlb.c b/cputlb.c
>> index aa9cc17..87d09c8 100644
>> --- a/cputlb.c
>> +++ b/cputlb.c
>> @@ -424,7 +424,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong 
>> vaddr,
>>          if ((memory_region_is_ram(section->mr) && section->readonly)
>>              || memory_region_is_romd(section->mr)) {
>>              /* Write access calls the I/O callback.  */
>> -            te->addr_write = address | TLB_MMIO;
>> +            address |= TLB_MMIO;
>>          } else if (memory_region_is_ram(section->mr)
>>                     && cpu_physical_memory_is_clean(section->mr->ram_addr
>>                                                     + xlat)) {
>> @@ -437,11 +437,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, 
>> target_ulong vaddr,
>>              if (cpu_physical_memory_is_excl(section->mr->ram_addr + xlat)) {
>>                  /* There is at least one vCPU that has flagged the address 
>> as
>>                   * exclusive. */
>> -                te->addr_write = address | TLB_EXCL;
>> -            } else {
>> -                te->addr_write = address;
>> +                address |= TLB_EXCL;
>>              }
>>          }
>> +        te->addr_write = address;
>
> As mentioned before I think this bit belongs in the earlier patch.
>
>>      } else {
>>          te->addr_write = -1;
>>      }
>> diff --git a/softmmu_template.h b/softmmu_template.h
>> index 267c52a..c54bdc9 100644
>> --- a/softmmu_template.h
>> +++ b/softmmu_template.h
>> @@ -476,7 +476,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong 
>> addr, DATA_TYPE val,
>>
>>      /* Handle an IO access or exclusive access.  */
>>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
>> -        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
>> +        if (tlb_addr & TLB_EXCL) {
>>              CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
>>              CPUState *cpu = ENV_GET_CPU(env);
>>              CPUClass *cc = CPU_GET_CLASS(cpu);
>> @@ -500,8 +500,15 @@ void helper_le_st_name(CPUArchState *env, target_ulong 
>> addr, DATA_TYPE val,
>>                  }
>>              }
>>
>> -            glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
>> -                                                    mmu_idx, index, 
>> retaddr);
>> +            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO
>> access */
>
> What about the other flags? Shouldn't this be tlb_addr & TLB_MMIO?

The upstream QEMU's condition to follow the IO access path is:
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK))
Now, we split this in:
if (tlb_addr & TLB_EXCL)
for RAM exclusive accesses and
if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL))
for IO accesses. In this last case, we handle also the IO exclusive accesses.

>
>> +                glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
>> +                                                         mmu_idx, index,
>> +                                                         retaddr);
>> +            } else {
>> +                glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
>> +                                                        mmu_idx, index,
>> +                                                        retaddr);
>> +            }
>>
>>              lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
>>
>> @@ -620,7 +627,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong 
>> addr, DATA_TYPE val,
>>
>>      /* Handle an IO access or exclusive access.  */
>>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
>> -        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
>> +        if (tlb_addr & TLB_EXCL) {
>>              CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
>>              CPUState *cpu = ENV_GET_CPU(env);
>>              CPUClass *cc = CPU_GET_CLASS(cpu);
>> @@ -644,8 +651,15 @@ void helper_be_st_name(CPUArchState *env, target_ulong 
>> addr, DATA_TYPE val,
>>                  }
>>              }
>>
>> -            glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
>> -                                                    mmu_idx, index, 
>> retaddr);
>> +            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access 
>> */
>> +                glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
>> +                                                         mmu_idx, index,
>> +                                                         retaddr);
>> +            } else {
>> +                glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
>> +                                                        mmu_idx, index,
>> +                                                        retaddr);
>> +            }
>>
>>              lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
>
>
> --
> Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]