qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC PATCH v2 3/5] vhost-user-dev: Add cache BAR


From: Albert Esteve
Subject: Re: [RFC PATCH v2 3/5] vhost-user-dev: Add cache BAR
Date: Wed, 4 Sep 2024 13:20:09 +0200



On Thu, Jul 11, 2024 at 10:25 AM Stefan Hajnoczi <stefanha@redhat.com> wrote:
On Fri, Jun 28, 2024 at 04:57:08PM +0200, Albert Esteve wrote:
> Add a cache BAR in the vhost-user-device
> into which files can be directly mapped.
>
> The number, shmid, and size of the VIRTIO Shared
> Memory subregions is retrieved through a get_shmem_config
> message sent by the vhost-user-base module
> on the realize step, after virtio_init().
>
> By default, if VHOST_USER_PROTOCOL_F_SHMEM
> feature is not supported by the backend,
> there is no cache.
>
> Signed-off-by: Albert Esteve <aesteve@redhat.com>

Michael: Please review vhost_user_device_pci_realize() below regarding
virtio-pci BAR layout. Thanks!

> ---
>  hw/virtio/vhost-user-base.c       | 39 +++++++++++++++++++++++++++++--
>  hw/virtio/vhost-user-device-pci.c | 37 ++++++++++++++++++++++++++---
>  2 files changed, 71 insertions(+), 5 deletions(-)
>
> diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
> index a83167191e..e47c568a55 100644
> --- a/hw/virtio/vhost-user-base.c
> +++ b/hw/virtio/vhost-user-base.c
> @@ -268,7 +268,9 @@ static void vub_device_realize(DeviceState *dev, Error **errp)
>  {
>      VirtIODevice *vdev = VIRTIO_DEVICE(dev);
>      VHostUserBase *vub = VHOST_USER_BASE(dev);
> -    int ret;
> +    uint64_t memory_sizes[8];
> +    void *cache_ptr;
> +    int i, ret, nregions;

>      if (!vub->chardev.chr) {
>          error_setg(errp, "vhost-user-base: missing chardev");
> @@ -311,7 +313,7 @@ static void vub_device_realize(DeviceState *dev, Error **errp)

>      /* Allocate queues */
>      vub->vqs = g_ptr_array_sized_new(vub->num_vqs);
> -    for (int i = 0; i < vub->num_vqs; i++) {
> +    for (i = 0; i < vub->num_vqs; i++) {
>          g_ptr_array_add(vub->vqs,
>                          virtio_add_queue(vdev, vub->vq_size,
>                                           vub_handle_output));
> @@ -328,6 +330,39 @@ static void vub_device_realize(DeviceState *dev, Error **errp)
>          do_vhost_user_cleanup(vdev, vub);
>      }

> +    ret = vub->vhost_dev.vhost_ops->vhost_get_shmem_config(&vub->vhost_dev,
> +                                                           &nregions,
> +                                                           memory_sizes,
> +                                                           errp);
> +
> +    if (ret < 0) {
> +        do_vhost_user_cleanup(vdev, vub);
> +    }
> +
> +    for (i = 0; i < nregions; i++) {
> +        if (memory_sizes[i]) {
> +            if (!is_power_of_2(memory_sizes[i]) ||
> +                memory_sizes[i] < qemu_real_host_page_size()) {

Or just if (memory_sizes[i] % qemu_real_host_page_size() != 0)?

I like both options. The original is more explicit, your proposal is more concise.
I will change it.
 

> +                error_setg(errp, "Shared memory %d size must be a power of 2 "
> +                                 "no smaller than the page size", i);
> +                return;
> +            }
> +
> +            cache_ptr = mmap(NULL, memory_sizes[i], PROT_READ,

Should this be PROT_NONE like in
vhost_user_backend_handle_shmem_unmap()?

Since this is supposed to be blank memory, I think you may be
right. But I am not completely certain. I'll change it and check if
everything works as expected on my side.
 

> +                            MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
> +            if (cache_ptr == MAP_FAILED) {
> +                error_setg(errp, "Unable to mmap blank cache: %s",
> +                           strerror(errno));

error_setg_errno() can be used here.

> +                return;
> +            }
> +
> +            virtio_new_shmem_region(vdev);
> +            memory_region_init_ram_ptr(&vdev->shmem_list[i],
> +                                    OBJECT(vdev), "vub-shm-" + i,
> +                                    memory_sizes[i], cache_ptr);
> +        }
> +    }
> +
>      qemu_chr_fe_set_handlers(&vub->chardev, NULL, NULL, vub_event, NULL,
>                               dev, NULL, true);
>  }
> diff --git a/hw/virtio/vhost-user-device-pci.c b/hw/virtio/vhost-user-device-pci.c
> index efaf55d3dd..314bacfb7a 100644
> --- a/hw/virtio/vhost-user-device-pci.c
> +++ b/hw/virtio/vhost-user-device-pci.c
> @@ -8,14 +8,18 @@
>   */

>  #include "qemu/osdep.h"
> +#include "qapi/error.h"
>  #include "hw/qdev-properties.h"
>  #include "hw/virtio/vhost-user-base.h"
>  #include "hw/virtio/virtio-pci.h"

> +#define VIRTIO_DEVICE_PCI_CACHE_BAR 2
> +
>  struct VHostUserDevicePCI {
>      VirtIOPCIProxy parent_obj;

>      VHostUserBase vub;
> +    MemoryRegion cachebar;
>  };

>  #define TYPE_VHOST_USER_DEVICE_PCI "vhost-user-device-pci-base"
> @@ -25,10 +29,37 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostUserDevicePCI, VHOST_USER_DEVICE_PCI)
>  static void vhost_user_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
>  {
>      VHostUserDevicePCI *dev = VHOST_USER_DEVICE_PCI(vpci_dev);
> -    DeviceState *vdev = DEVICE(&dev->vub);
> -
> +    DeviceState *dev_state = DEVICE(&dev->vub);
> +    VirtIODevice *vdev = VIRTIO_DEVICE(dev_state);
> +    uint64_t offset = 0, cache_size = 0;
> +    int i;
> +   
>      vpci_dev->nvectors = 1;
> -    qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
> +    qdev_realize(dev_state, BUS(&vpci_dev->bus), errp);
> +
> +    for (i = 0; i < vdev->n_shmem_regions; i++) {
> +        if (vdev->shmem_list[i].size > UINT64_MAX - cache_size) {
> +            error_setg(errp, "Total shared memory required overflow");
> +            return;
> +        }
> +        cache_size = cache_size + vdev->shmem_list[i].size;
> +    }
> +    if (cache_size) {
> +        memory_region_init(&dev->cachebar, OBJECT(vpci_dev),
> +                           "vhost-device-pci-cachebar", cache_size);
> +        for (i = 0; i < vdev->n_shmem_regions; i++) {
> +            memory_region_add_subregion(&dev->cachebar, offset,
> +                                        &vdev->shmem_list[i]);
> +            virtio_pci_add_shm_cap(vpci_dev, VIRTIO_DEVICE_PCI_CACHE_BAR,
> +                                   offset, vdev->shmem_list[i].size, i);
> +            offset = offset + vdev->shmem_list[i].size;
> +        }
> +        pci_register_bar(&vpci_dev->pci_dev, VIRTIO_DEVICE_PCI_CACHE_BAR,
> +                        PCI_BASE_ADDRESS_SPACE_MEMORY |
> +                        PCI_BASE_ADDRESS_MEM_PREFETCH |
> +                        PCI_BASE_ADDRESS_MEM_TYPE_64,
> +                        &dev->cachebar);
> +    }
>  }

>  static void vhost_user_device_pci_class_init(ObjectClass *klass, void *data)
> --
> 2.45.2
>

reply via email to

[Prev in Thread] Current Thread [Next in Thread]