qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v1 04/23] xen_pt: use separate MemoryListeners f


From: Stefano Stabellini
Subject: Re: [Qemu-devel] [PATCH v1 04/23] xen_pt: use separate MemoryListeners for memory and I/O
Date: Mon, 8 Oct 2012 13:18:08 +0100
User-agent: Alpine 2.02 (DEB 1266 2009-07-14)

On Sun, 7 Oct 2012, Avi Kivity wrote:
> Using an unfiltered memory listener will cause regions to be reported
> fails multiple times if we have more than two address spaces.  Use a separate
> listener for memory and I/O, and utilize MemoryListener's address space
> filtering to fix this.
> 
> Signed-off-by: Avi Kivity <address@hidden>

It looks OK to me


>  hw/xen_pt.c | 38 +++++++++++++++++++++++++++++++++++++-
>  hw/xen_pt.h |  1 +
>  2 files changed, 38 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/xen_pt.c b/hw/xen_pt.c
> index 307119a..438ad54 100644
> --- a/hw/xen_pt.c
> +++ b/hw/xen_pt.c
> @@ -59,6 +59,7 @@
>  #include "xen_backend.h"
>  #include "xen_pt.h"
>  #include "range.h"
> +#include "exec-memory.h"
>  
>  #define XEN_PT_NR_IRQS (256)
>  static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0};
> @@ -621,6 +622,22 @@ static void xen_pt_region_del(MemoryListener *l, 
> MemoryRegionSection *sec)
>      xen_pt_region_update(s, sec, false);
>  }
>  
> +static void xen_pt_io_region_add(MemoryListener *l, MemoryRegionSection *sec)
> +{
> +    XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
> +                                             io_listener);
> +
> +    xen_pt_region_update(s, sec, true);
> +}
> +
> +static void xen_pt_io_region_del(MemoryListener *l, MemoryRegionSection *sec)
> +{
> +    XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
> +                                             io_listener);
> +
> +    xen_pt_region_update(s, sec, false);
> +}
> +
>  static void xen_pt_region_nop(MemoryListener *l, MemoryRegionSection *s)
>  {
>  }
> @@ -654,6 +671,22 @@ static void xen_pt_eventfd_fns(MemoryListener *l, 
> MemoryRegionSection *s,
>      .priority = 10,
>  };
>  
> +static const MemoryListener xen_pt_io_listener = {
> +    .begin = xen_pt_begin,
> +    .commit = xen_pt_commit,
> +    .region_add = xen_pt_io_region_add,
> +    .region_nop = xen_pt_region_nop,
> +    .region_del = xen_pt_io_region_del,
> +    .log_start = xen_pt_log_fns,
> +    .log_stop = xen_pt_log_fns,
> +    .log_sync = xen_pt_log_fns,
> +    .log_global_start = xen_pt_log_global_fns,
> +    .log_global_stop = xen_pt_log_global_fns,
> +    .eventfd_add = xen_pt_eventfd_fns,
> +    .eventfd_del = xen_pt_eventfd_fns,
> +    .priority = 10,
> +};
> +
>  /* init */
>  
>  static int xen_pt_initfn(PCIDevice *d)
> @@ -691,6 +724,7 @@ static int xen_pt_initfn(PCIDevice *d)
>      }
>  
>      s->memory_listener = xen_pt_memory_listener;
> +    s->io_listener = xen_pt_io_listener;
>  
>      /* Handle real device's MMIO/PIO BARs */
>      xen_pt_register_regions(s);
> @@ -757,7 +791,8 @@ static int xen_pt_initfn(PCIDevice *d)
>      }
>  
>  out:
> -    memory_listener_register(&s->memory_listener, NULL);
> +    memory_listener_register(&s->memory_listener, get_system_memory());
> +    memory_listener_register(&s->io_listener, get_system_io());
>      XEN_PT_LOG(d, "Real physical device %02x:%02x.%d registered 
> successfuly!\n",
>                 bus, slot, func);
>  
> @@ -812,6 +847,7 @@ static void xen_pt_unregister_device(PCIDevice *d)
>  
>      xen_pt_unregister_regions(s);
>      memory_listener_unregister(&s->memory_listener);
> +    memory_listener_unregister(&s->io_listener);
>  
>      xen_host_pci_device_put(&s->real_device);
>  }
> diff --git a/hw/xen_pt.h b/hw/xen_pt.h
> index 112477a..f15e69a 100644
> --- a/hw/xen_pt.h
> +++ b/hw/xen_pt.h
> @@ -209,6 +209,7 @@ struct XenPCIPassthroughState {
>      MemoryRegion rom;
>  
>      MemoryListener memory_listener;
> +    MemoryListener io_listener;
>  };
>  
>  int xen_pt_config_init(XenPCIPassthroughState *s);
> -- 
> 1.7.12
> 
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]