qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v4 16/29] vhost+postcopy: Stash RAMBlock and off


From: Marc-André Lureau
Subject: Re: [Qemu-devel] [PATCH v4 16/29] vhost+postcopy: Stash RAMBlock and offset
Date: Mon, 12 Mar 2018 15:51:59 +0100

On Thu, Mar 8, 2018 at 8:57 PM, Dr. David Alan Gilbert (git)
<address@hidden> wrote:
> From: "Dr. David Alan Gilbert" <address@hidden>
>
> Stash the RAMBlock and offset for later use looking up
> addresses.
>
> Signed-off-by: Dr. David Alan Gilbert <address@hidden>

Reviewed-by: Marc-André Lureau <address@hidden>


> ---
>  hw/virtio/trace-events |  1 +
>  hw/virtio/vhost-user.c | 34 ++++++++++++++++++++++++++++++++++
>  2 files changed, 35 insertions(+)
>
> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> index 05d18ada77..d7e9e1084b 100644
> --- a/hw/virtio/trace-events
> +++ b/hw/virtio/trace-events
> @@ -9,6 +9,7 @@ vhost_section(const char *name, int r) "%s:%d"
>  # hw/virtio/vhost-user.c
>  vhost_user_postcopy_listen(void) ""
>  vhost_user_set_mem_table_postcopy(uint64_t client_addr, uint64_t qhva, int 
> reply_i, int region_i) "client:0x%"PRIx64" for hva: 0x%"PRIx64" reply %d 
> region %d"
> +vhost_user_set_mem_table_withfd(int index, const char *name, uint64_t 
> memory_size, uint64_t guest_phys_addr, uint64_t userspace_addr, uint64_t 
> offset) "%d:%s: size:0x%"PRIx64" GPA:0x%"PRIx64" QVA/userspace:0x%"PRIx64" RB 
> offset:0x%"PRIx64
>
>  # hw/virtio/virtio.c
>  virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned 
> out_num) "elem %p size %zd in_num %u out_num %u"
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 6875f729e8..fd65616961 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -175,6 +175,15 @@ struct vhost_user {
>      NotifierWithReturn postcopy_notifier;
>      struct PostCopyFD  postcopy_fd;
>      uint64_t           postcopy_client_bases[VHOST_MEMORY_MAX_NREGIONS];
> +    /* Length of the region_rb and region_rb_offset arrays */
> +    size_t             region_rb_len;
> +    /* RAMBlock associated with a given region */
> +    RAMBlock         **region_rb;
> +    /* The offset from the start of the RAMBlock to the start of the
> +     * vhost region.
> +     */
> +    ram_addr_t        *region_rb_offset;
> +
>      /* True once we've entered postcopy_listen */
>      bool               postcopy_listen;
>  };
> @@ -362,6 +371,17 @@ static int vhost_user_set_mem_table_postcopy(struct 
> vhost_dev *dev,
>          msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
>      }
>
> +    if (u->region_rb_len < dev->mem->nregions) {
> +        u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
> +        u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
> +                                      dev->mem->nregions);
> +        memset(&(u->region_rb[u->region_rb_len]), '\0',
> +               sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
> +        memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
> +               sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
> +        u->region_rb_len = dev->mem->nregions;
> +    }
> +
>      for (i = 0; i < dev->mem->nregions; ++i) {
>          struct vhost_memory_region *reg = dev->mem->regions + i;
>          ram_addr_t offset;
> @@ -372,6 +392,12 @@ static int vhost_user_set_mem_table_postcopy(struct 
> vhost_dev *dev,
>                                       &offset);
>          fd = memory_region_get_fd(mr);
>          if (fd > 0) {
> +            trace_vhost_user_set_mem_table_withfd(fd_num, mr->name,
> +                                                  reg->memory_size,
> +                                                  reg->guest_phys_addr,
> +                                                  reg->userspace_addr, 
> offset);
> +            u->region_rb_offset[i] = offset;
> +            u->region_rb[i] = mr->ram_block;
>              msg.payload.memory.regions[fd_num].userspace_addr =
>                  reg->userspace_addr;
>              msg.payload.memory.regions[fd_num].memory_size  = 
> reg->memory_size;
> @@ -380,6 +406,9 @@ static int vhost_user_set_mem_table_postcopy(struct 
> vhost_dev *dev,
>              msg.payload.memory.regions[fd_num].mmap_offset = offset;
>              assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
>              fds[fd_num++] = fd;
> +        } else {
> +            u->region_rb_offset[i] = 0;
> +            u->region_rb[i] = NULL;
>          }
>      }
>
> @@ -1148,6 +1177,11 @@ static int vhost_user_cleanup(struct vhost_dev *dev)
>          close(u->slave_fd);
>          u->slave_fd = -1;
>      }
> +    g_free(u->region_rb);
> +    u->region_rb = NULL;
> +    g_free(u->region_rb_offset);
> +    u->region_rb_offset = NULL;
> +    u->region_rb_len = 0;
>      g_free(u);
>      dev->opaque = 0;
>
> --
> 2.14.3
>
>



-- 
Marc-André Lureau



reply via email to

[Prev in Thread] Current Thread [Next in Thread]