qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v9 06/12] vdpa: request iova_range only once


From: Jason Wang
Subject: Re: [PATCH v9 06/12] vdpa: request iova_range only once
Date: Fri, 16 Dec 2022 15:29:04 +0800

On Thu, Dec 15, 2022 at 7:32 PM Eugenio Pérez <eperezma@redhat.com> wrote:
>
> Currently iova range is requested once per queue pair in the case of
> net. Reduce the number of ioctls asking it once at initialization and
> reusing that value for each vhost_vdpa.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
>  hw/virtio/vhost-vdpa.c | 15 ---------------
>  net/vhost-vdpa.c       | 27 ++++++++++++++-------------
>  2 files changed, 14 insertions(+), 28 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 691bcc811a..9b7f4ef083 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -365,19 +365,6 @@ static int vhost_vdpa_add_status(struct vhost_dev *dev, 
> uint8_t status)
>      return 0;
>  }
>
> -static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
> -{
> -    int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
> -                              &v->iova_range);
> -    if (ret != 0) {
> -        v->iova_range.first = 0;
> -        v->iova_range.last = UINT64_MAX;
> -    }
> -
> -    trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
> -                                    v->iova_range.last);
> -}
> -
>  /*
>   * The use of this function is for requests that only need to be
>   * applied once. Typically such request occurs at the beginning
> @@ -465,8 +452,6 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void 
> *opaque, Error **errp)
>          goto err;
>      }
>
> -    vhost_vdpa_get_iova_range(v);
> -
>      if (!vhost_vdpa_first_dev(dev)) {
>          return 0;
>      }
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 2c0ff6d7b0..b6462f0192 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -541,14 +541,15 @@ static const VhostShadowVirtqueueOps 
> vhost_vdpa_net_svq_ops = {
>  };
>
>  static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> -                                           const char *device,
> -                                           const char *name,
> -                                           int vdpa_device_fd,
> -                                           int queue_pair_index,
> -                                           int nvqs,
> -                                           bool is_datapath,
> -                                           bool svq,
> -                                           VhostIOVATree *iova_tree)
> +                                       const char *device,
> +                                       const char *name,
> +                                       int vdpa_device_fd,
> +                                       int queue_pair_index,
> +                                       int nvqs,
> +                                       bool is_datapath,
> +                                       bool svq,
> +                                       struct vhost_vdpa_iova_range 
> iova_range,
> +                                       VhostIOVATree *iova_tree)

Nit: it's better not mix style changes.

Other than this:

Acked-by: Jason Wang <jasonwang@redhat.com>

Thanks

>  {
>      NetClientState *nc = NULL;
>      VhostVDPAState *s;
> @@ -567,6 +568,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState 
> *peer,
>      s->vhost_vdpa.device_fd = vdpa_device_fd;
>      s->vhost_vdpa.index = queue_pair_index;
>      s->vhost_vdpa.shadow_vqs_enabled = svq;
> +    s->vhost_vdpa.iova_range = iova_range;
>      s->vhost_vdpa.iova_tree = iova_tree;
>      if (!is_datapath) {
>          s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
> @@ -646,6 +648,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char 
> *name,
>      int vdpa_device_fd;
>      g_autofree NetClientState **ncs = NULL;
>      g_autoptr(VhostIOVATree) iova_tree = NULL;
> +    struct vhost_vdpa_iova_range iova_range;
>      NetClientState *nc;
>      int queue_pairs, r, i = 0, has_cvq = 0;
>
> @@ -689,14 +692,12 @@ int net_init_vhost_vdpa(const Netdev *netdev, const 
> char *name,
>          return queue_pairs;
>      }
>
> +    vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
>      if (opts->x_svq) {
> -        struct vhost_vdpa_iova_range iova_range;
> -
>          if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
>              goto err_svq;
>          }
>
> -        vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
>          iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
>      }
>
> @@ -705,7 +706,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char 
> *name,
>      for (i = 0; i < queue_pairs; i++) {
>          ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
>                                       vdpa_device_fd, i, 2, true, opts->x_svq,
> -                                     iova_tree);
> +                                     iova_range, iova_tree);
>          if (!ncs[i])
>              goto err;
>      }
> @@ -713,7 +714,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char 
> *name,
>      if (has_cvq) {
>          nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
>                                   vdpa_device_fd, i, 1, false,
> -                                 opts->x_svq, iova_tree);
> +                                 opts->x_svq, iova_range, iova_tree);
>          if (!nc)
>              goto err;
>      }
> --
> 2.31.1
>




reply via email to

[Prev in Thread] Current Thread [Next in Thread]