qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v3 3/4] virtio-9p: break device if buffers are m


From: Michael S. Tsirkin
Subject: Re: [Qemu-devel] [PATCH v3 3/4] virtio-9p: break device if buffers are misconfigured
Date: Thu, 22 Jun 2017 01:09:24 +0300

On Wed, Jun 21, 2017 at 07:42:15PM +0200, Greg Kurz wrote:
> The 9P protocol is transport agnostic: if the guest misconfigured the
> buffers, the best we can do is to set the broken flag on the device.
> 
> We check if the transport isn't broken already to avoid printing extra
> error messages.
> 
> Signed-off-by: Greg Kurz <address@hidden>
> ---
>  hw/9pfs/9p.c               |    2 +-
>  hw/9pfs/9p.h               |    2 +-
>  hw/9pfs/virtio-9p-device.c |   45 
> ++++++++++++++++++++++++++++++++++++++++----
>  hw/9pfs/xen-9p-backend.c   |    3 ++-
>  4 files changed, 45 insertions(+), 7 deletions(-)
> 
> diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
> index 96d268334865..da0d6da65b45 100644
> --- a/hw/9pfs/9p.c
> +++ b/hw/9pfs/9p.c
> @@ -1664,7 +1664,7 @@ static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, 
> V9fsPDU *pdu,
>      unsigned int niov;
>  
>      if (is_write) {
> -        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov);
> +        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + 
> skip);
>      } else {
>          pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + 
> skip);
>      }
> diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
> index aac1b0b2ce3d..d1cfeaf10e4f 100644
> --- a/hw/9pfs/9p.h
> +++ b/hw/9pfs/9p.h
> @@ -363,7 +363,7 @@ struct V9fsTransport {
>      void        (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
>                                          unsigned int *pniov, size_t size);
>      void        (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
> -                                         unsigned int *pniov);
> +                                         unsigned int *pniov, size_t size);
>      void        (*push_and_notify)(V9fsPDU *pdu);
>  };
>  
> diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
> index 1a68c1622d3a..873f42e24818 100644
> --- a/hw/9pfs/virtio-9p-device.c
> +++ b/hw/9pfs/virtio-9p-device.c
> @@ -146,8 +146,18 @@ static ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t 
> offset,
>      V9fsState *s = pdu->s;
>      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
>      VirtQueueElement *elem = v->elems[pdu->idx];
> +    int ret;
>  
> -    return v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap);
> +    ret = v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap);
> +    if (ret < 0) {
> +        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> +
> +        if (!vdev->broken) {
> +            virtio_error(vdev, "Failed to encode VirtFS reply type %d",
> +                         pdu->id + 1);
> +        }
> +    }
> +    return ret;
>  }
>  
>  static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
> @@ -156,28 +166,55 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, 
> size_t offset,
>      V9fsState *s = pdu->s;
>      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
>      VirtQueueElement *elem = v->elems[pdu->idx];
> +    int ret;
>  
> -    return v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, 
> ap);
> +    ret = v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, 
> ap);
> +    if (ret < 0) {
> +        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> +
> +        if (!vdev->broken) {

Do we really need this test? And why?

> +            virtio_error(vdev, "Failed to decode VirtFS request type %d",
> +                         pdu->id);
> +        }
> +    }
> +    return ret;
>  }
>  
> -/* The size parameter is used by other transports. Do not drop it. */
>  static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
>                                          unsigned int *pniov, size_t size)
>  {
>      V9fsState *s = pdu->s;
>      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
>      VirtQueueElement *elem = v->elems[pdu->idx];
> +    size_t buf_size = iov_size(elem->in_sg, elem->in_num);
> +
> +    if (buf_size < size) {
> +        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> +
> +        virtio_error(vdev,
> +                     "VirtFS reply type %d needs %zu bytes, buffer has %zu",
> +                     pdu->id + 1, size, buf_size);
> +    }
>  
>      *piov = elem->in_sg;
>      *pniov = elem->in_num;
>  }
>  
>  static void virtio_init_out_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
> -                                         unsigned int *pniov)
> +                                         unsigned int *pniov, size_t size)
>  {
>      V9fsState *s = pdu->s;
>      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
>      VirtQueueElement *elem = v->elems[pdu->idx];
> +    size_t buf_size = iov_size(elem->out_sg, elem->out_num);
> +
> +    if (buf_size < size) {
> +        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> +
> +        virtio_error(vdev,
> +                     "VirtFS request type %d needs %zu bytes, buffer has 
> %zu",
> +                     pdu->id, size, buf_size);
> +    }
>  
>      *piov = elem->out_sg;
>      *pniov = elem->out_num;
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index 922cc967be63..a82cf817fe45 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -147,7 +147,8 @@ static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
>  
>  static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
>                                             struct iovec **piov,
> -                                           unsigned int *pniov)
> +                                           unsigned int *pniov,
> +                                           size_t size)
>  {
>      Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
>      Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];



reply via email to

[Prev in Thread] Current Thread [Next in Thread]