[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH 01/31] vdpa: Reorder virtio/vhost-vdpa.c functions
|
From: |
Eugenio Perez Martin |
|
Subject: |
Re: [PATCH 01/31] vdpa: Reorder virtio/vhost-vdpa.c functions |
|
Date: |
Fri, 28 Jan 2022 08:57:51 +0100 |
On Fri, Jan 28, 2022 at 6:59 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2022/1/22 上午4:27, Eugenio Pérez 写道:
> > vhost_vdpa_set_features and vhost_vdpa_init need to use
> > vhost_vdpa_get_features in svq mode.
> >
> > vhost_vdpa_dev_start needs to use almost all _set_ functions:
> > vhost_vdpa_set_vring_dev_kick, vhost_vdpa_set_vring_dev_call,
> > vhost_vdpa_set_dev_vring_base and vhost_vdpa_set_dev_vring_num.
> >
> > No functional change intended.
>
>
> Is it related (a must) to the SVQ code?
>
Yes, SVQ needs to access the device variants to configure it, while
exposing the SVQ ones.
For example for set_features, SVQ needs to set device features in the
start code, but expose SVQ ones to the guest.
Another possibility is to forward-declare them but I feel it pollutes
the code more, doesn't it? Is there any reason to avoid the reordering
beyond reducing the number of changes/patches?
Thanks!
> Thanks
>
>
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> > hw/virtio/vhost-vdpa.c | 164 ++++++++++++++++++++---------------------
> > 1 file changed, 82 insertions(+), 82 deletions(-)
> >
> > diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> > index 04ea43704f..6c10a7f05f 100644
> > --- a/hw/virtio/vhost-vdpa.c
> > +++ b/hw/virtio/vhost-vdpa.c
> > @@ -342,41 +342,6 @@ static bool vhost_vdpa_one_time_request(struct
> > vhost_dev *dev)
> > return v->index != 0;
> > }
> >
> > -static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error
> > **errp)
> > -{
> > - struct vhost_vdpa *v;
> > - assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> > - trace_vhost_vdpa_init(dev, opaque);
> > - int ret;
> > -
> > - /*
> > - * Similar to VFIO, we end up pinning all guest memory and have to
> > - * disable discarding of RAM.
> > - */
> > - ret = ram_block_discard_disable(true);
> > - if (ret) {
> > - error_report("Cannot set discarding of RAM broken");
> > - return ret;
> > - }
> > -
> > - v = opaque;
> > - v->dev = dev;
> > - dev->opaque = opaque ;
> > - v->listener = vhost_vdpa_memory_listener;
> > - v->msg_type = VHOST_IOTLB_MSG_V2;
> > -
> > - vhost_vdpa_get_iova_range(v);
> > -
> > - if (vhost_vdpa_one_time_request(dev)) {
> > - return 0;
> > - }
> > -
> > - vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
> > - VIRTIO_CONFIG_S_DRIVER);
> > -
> > - return 0;
> > -}
> > -
> > static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
> > int queue_index)
> > {
> > @@ -506,24 +471,6 @@ static int vhost_vdpa_set_mem_table(struct vhost_dev
> > *dev,
> > return 0;
> > }
> >
> > -static int vhost_vdpa_set_features(struct vhost_dev *dev,
> > - uint64_t features)
> > -{
> > - int ret;
> > -
> > - if (vhost_vdpa_one_time_request(dev)) {
> > - return 0;
> > - }
> > -
> > - trace_vhost_vdpa_set_features(dev, features);
> > - ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
> > - if (ret) {
> > - return ret;
> > - }
> > -
> > - return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
> > -}
> > -
> > static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
> > {
> > uint64_t features;
> > @@ -646,35 +593,6 @@ static int vhost_vdpa_get_config(struct vhost_dev
> > *dev, uint8_t *config,
> > return ret;
> > }
> >
> > -static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
> > -{
> > - struct vhost_vdpa *v = dev->opaque;
> > - trace_vhost_vdpa_dev_start(dev, started);
> > -
> > - if (started) {
> > - vhost_vdpa_host_notifiers_init(dev);
> > - vhost_vdpa_set_vring_ready(dev);
> > - } else {
> > - vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
> > - }
> > -
> > - if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
> > - return 0;
> > - }
> > -
> > - if (started) {
> > - memory_listener_register(&v->listener, &address_space_memory);
> > - return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
> > - } else {
> > - vhost_vdpa_reset_device(dev);
> > - vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
> > - VIRTIO_CONFIG_S_DRIVER);
> > - memory_listener_unregister(&v->listener);
> > -
> > - return 0;
> > - }
> > -}
> > -
> > static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
> > struct vhost_log *log)
> > {
> > @@ -735,6 +653,35 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev
> > *dev,
> > return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
> > }
> >
> > +static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
> > +{
> > + struct vhost_vdpa *v = dev->opaque;
> > + trace_vhost_vdpa_dev_start(dev, started);
> > +
> > + if (started) {
> > + vhost_vdpa_host_notifiers_init(dev);
> > + vhost_vdpa_set_vring_ready(dev);
> > + } else {
> > + vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
> > + }
> > +
> > + if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
> > + return 0;
> > + }
> > +
> > + if (started) {
> > + memory_listener_register(&v->listener, &address_space_memory);
> > + return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
> > + } else {
> > + vhost_vdpa_reset_device(dev);
> > + vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
> > + VIRTIO_CONFIG_S_DRIVER);
> > + memory_listener_unregister(&v->listener);
> > +
> > + return 0;
> > + }
> > +}
> > +
> > static int vhost_vdpa_get_features(struct vhost_dev *dev,
> > uint64_t *features)
> > {
> > @@ -745,6 +692,24 @@ static int vhost_vdpa_get_features(struct vhost_dev
> > *dev,
> > return ret;
> > }
> >
> > +static int vhost_vdpa_set_features(struct vhost_dev *dev,
> > + uint64_t features)
> > +{
> > + int ret;
> > +
> > + if (vhost_vdpa_one_time_request(dev)) {
> > + return 0;
> > + }
> > +
> > + trace_vhost_vdpa_set_features(dev, features);
> > + ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
> > + if (ret) {
> > + return ret;
> > + }
> > +
> > + return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
> > +}
> > +
> > static int vhost_vdpa_set_owner(struct vhost_dev *dev)
> > {
> > if (vhost_vdpa_one_time_request(dev)) {
> > @@ -772,6 +737,41 @@ static bool vhost_vdpa_force_iommu(struct vhost_dev
> > *dev)
> > return true;
> > }
> >
> > +static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error
> > **errp)
> > +{
> > + struct vhost_vdpa *v;
> > + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
> > + trace_vhost_vdpa_init(dev, opaque);
> > + int ret;
> > +
> > + /*
> > + * Similar to VFIO, we end up pinning all guest memory and have to
> > + * disable discarding of RAM.
> > + */
> > + ret = ram_block_discard_disable(true);
> > + if (ret) {
> > + error_report("Cannot set discarding of RAM broken");
> > + return ret;
> > + }
> > +
> > + v = opaque;
> > + v->dev = dev;
> > + dev->opaque = opaque ;
> > + v->listener = vhost_vdpa_memory_listener;
> > + v->msg_type = VHOST_IOTLB_MSG_V2;
> > +
> > + vhost_vdpa_get_iova_range(v);
> > +
> > + if (vhost_vdpa_one_time_request(dev)) {
> > + return 0;
> > + }
> > +
> > + vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
> > + VIRTIO_CONFIG_S_DRIVER);
> > +
> > + return 0;
> > +}
> > +
> > const VhostOps vdpa_ops = {
> > .backend_type = VHOST_BACKEND_TYPE_VDPA,
> > .vhost_backend_init = vhost_vdpa_init,
>
- [PATCH 00/31] vDPA shadow virtqueue, Eugenio Pérez, 2022/01/21
- [PATCH 02/31] vhost: Add VhostShadowVirtqueue, Eugenio Pérez, 2022/01/21
- [PATCH 01/31] vdpa: Reorder virtio/vhost-vdpa.c functions, Eugenio Pérez, 2022/01/21
- [PATCH 03/31] vdpa: Add vhost_svq_get_dev_kick_notifier, Eugenio Pérez, 2022/01/21
- [PATCH 04/31] vdpa: Add vhost_svq_set_svq_kick_fd, Eugenio Pérez, 2022/01/21
- [PATCH 05/31] vhost: Add Shadow VirtQueue kick forwarding capabilities, Eugenio Pérez, 2022/01/21
- [PATCH 06/31] vhost: Route guest->host notification through shadow virtqueue, Eugenio Pérez, 2022/01/21