qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [patch v4 09/16] memory: introduce mmio request pending


From: Avi Kivity
Subject: Re: [Qemu-devel] [patch v4 09/16] memory: introduce mmio request pending to anti nested DMA
Date: Mon, 22 Oct 2012 12:28:57 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:15.0) Gecko/20120911 Thunderbird/15.0.1

On 10/22/2012 11:23 AM, Liu Ping Fan wrote:
> Rejecting the nested mmio request which does not aim at RAM, so we
> can avoid the potential deadlock caused by the random lock sequence
> of two device's local lock.

I can't say I like this but it's better than anything else we have.

>  }
>  
> +int get_context_type(void)
> +{
> +    QemuThread *t = pthread_getspecific(qemu_thread_key);
> +    return t->context_type;
> +}
> +
> +void set_context_type(int type)
> +{
> +    QemuThread *t = pthread_getspecific(qemu_thread_key);
> +    t->context_type = type;
> +}

Please define an enum so we know what it means.

> +
>  static void *qemu_kvm_cpu_thread_fn(void *arg)
>  {
>      CPUArchState *env = arg;
> @@ -736,6 +748,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
>      int r;
>  
>      pthread_setspecific(qemu_thread_key, cpu->thread);
> +    set_context_type(0);
> +

Setting this for every thread means we're going to miss some.

> @@ -3500,7 +3502,8 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
> uint8_t *buf,
>          qemu_mutex_lock(&mem_map_lock);
>          safe_ref = phys_page_lookup(page, &obj_mrs);
>          qemu_mutex_unlock(&mem_map_lock);
> -        if (safe_ref == 0) {
> +
> +        if (safe_ref == 0 && context == 1) {
>              qemu_mutex_lock_iothread();
>              qemu_mutex_lock(&mem_map_lock);
>              /* At the 2nd try, mem map can change, so need to judge it again 
> */
> @@ -3511,7 +3514,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
> uint8_t *buf,
>              }
>          }
>          section = &obj_mrs;
> -
> +        if (context == 1) {
> +            nested_dma = thread->mmio_request_pending++ > 1 ? 1 : 0;
> +        }
>          if (is_write) {
>              if (!memory_region_is_ram(section->mr)) {
>                  target_phys_addr_t addr1;
> @@ -3521,17 +3526,23 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
> uint8_t *buf,
>                  if (l >= 4 && ((addr1 & 3) == 0)) {
>                      /* 32 bit write access */
>                      val = ldl_p(buf);
> -                    io_mem_write(section->mr, addr1, val, 4);
> +                    if (!nested_dma) {
> +                        io_mem_write(section->mr, addr1, val, 4);
> +                    }
>                      l = 4;
>                  } else if (l >= 2 && ((addr1 & 1) == 0)) {
>                      /* 16 bit write access */
>                      val = lduw_p(buf);
> -                    io_mem_write(section->mr, addr1, val, 2);
> +                    if (!nested_dma) {
> +                        io_mem_write(section->mr, addr1, val, 2);
> +                    }
>                      l = 2;
>                  } else {
>                      /* 8 bit write access */
>                      val = ldub_p(buf);
> -                    io_mem_write(section->mr, addr1, val, 1);
> +                    if (!nested_dma) {
> +                        io_mem_write(section->mr, addr1, val, 1);
> +                    }
>                      l = 1;
>                  }


We need to abort on nested_dma so we know something bad happened and we
have to fix it.

> @@ -12,6 +12,9 @@ struct QemuCond {
>  
>  struct QemuThread {
>      pthread_t thread;
> +    /* 0 clean; 1 mmio; 2 io */
> +    int context_type;
> +    int mmio_request_pending;
>  };

QemuThread is at a too low level of abstraction.  It's just a wrapper
around the host threading facilities, it shouldn't add anything else.



-- 
error compiling committee.c: too many arguments to function



reply via email to

[Prev in Thread] Current Thread [Next in Thread]