qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] Re: [PATCH 40/41] virtio-blk: port to vmstate


From: Michael S. Tsirkin
Subject: [Qemu-devel] Re: [PATCH 40/41] virtio-blk: port to vmstate
Date: Wed, 2 Dec 2009 19:54:18 +0200
User-agent: Mutt/1.5.19 (2009-01-05)

On Wed, Dec 02, 2009 at 01:04:38PM +0100, Juan Quintela wrote:
> This driver send a struct directly in the wire, where the struct
> contains:
> - target_phis_addr_t (can be 32 or 64 bits depending of host)
> - void * (on host)
> - size_t.
> 
> It has no hope of working across 32/64 or big/little endian.  This problem 
> exist in previous one.

I don't understand how does it work at all.
Passing pointers in migration buffer?
Does guest just happen to get mapped at the same address
in qemu after migration?
Even with address randomization?

Does anyone know?

Also, no security, right?

> Signed-off-by: Juan Quintela <address@hidden>
> ---
>  hw/virtio-blk.c |   50 +++++++++++++++++++++++++++++++++++++-------------
>  1 files changed, 37 insertions(+), 13 deletions(-)
> 
> diff --git a/hw/virtio-blk.c b/hw/virtio-blk.c
> index 0b04d0d..c618dc6 100644
> --- a/hw/virtio-blk.c
> +++ b/hw/virtio-blk.c
> @@ -450,28 +450,34 @@ static uint32_t virtio_blk_get_features(VirtIODevice 
> *vdev)
>      return features;
>  }
> 
> -static void virtio_blk_save(QEMUFile *f, void *opaque)
> +static const VMStateDescription vmstate_virtio_blk_req = {
> +    .name = "virtio-blk-req",
> +    .version_id = 1,
> +    .minimum_version_id = 1,
> +    .minimum_version_id_old = 1,
> +    .fields      = (VMStateField []) {
> +        VMSTATE_BUFFER_UNSAFE(elem, VirtIOBlockReq, 0, 
> sizeof(VirtQueueElement)),

line too long

> +        VMSTATE_END_OF_LIST()
> +    }
> +};
> +
> +static void put_virtio_req(QEMUFile *f, void *pv, size_t size)
>  {
> -    VirtIOBlock *s = opaque;
> +    VirtIOBlockReqHead *rq = pv;
>      VirtIOBlockReq *req;;
> 
> -    virtio_save(&s->vdev, f);
> -
> -    QLIST_FOREACH(req, &s->rq, next) {
> +    QLIST_FOREACH(req, rq, next) {
>          qemu_put_sbyte(f, 1);
>          qemu_put_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem));
>      }
>      qemu_put_sbyte(f, 0);
>  }
> 
> -static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id)
> +static int get_virtio_req(QEMUFile *f, void *pv, size_t size)
>  {
> -    VirtIOBlock *s = opaque;
> +    VirtIOBlockReqHead *rq = pv;
> +    VirtIOBlock *s = container_of(rq, struct VirtIOBlock, rq);
> 
> -    if (version_id != 2)
> -        return -EINVAL;
> -
> -    virtio_load(&s->vdev, f);
>      while (qemu_get_sbyte(f)) {
>          VirtIOBlockReq *req = virtio_blk_alloc_request(s);
>          qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem));
> @@ -481,6 +487,25 @@ static int virtio_blk_load(QEMUFile *f, void *opaque, 
> int version_id)
>      return 0;
>  }
> 
> +const VMStateInfo vmstate_info_virtio_blk_req = {
> +    .name = "virtio_blk_req",
> +    .get  = get_virtio_req,
> +    .put  = put_virtio_req,
> +};
> +
> +static const VMStateDescription vmstate_virtio_blk = {
> +    .name = "virtio-blk",
> +    .version_id = 2,
> +    .minimum_version_id = 2,
> +    .minimum_version_id_old = 2,
> +    .fields      = (VMStateField []) {
> +        VMSTATE_VIRTIO(vdev, VirtIOBlock),
> +        VMSTATE_SINGLE(rq, VirtIOBlock, 0,
> +                       vmstate_info_virtio_blk_req, VirtIOBlockReqHead),
> +        VMSTATE_END_OF_LIST()
> +    }
> +};
> +
>  VirtIODevice *virtio_blk_init(DeviceState *dev, DriveInfo *dinfo)
>  {
>      VirtIOBlock *s;
> @@ -510,8 +535,7 @@ VirtIODevice *virtio_blk_init(DeviceState *dev, DriveInfo 
> *dinfo)
>      s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output);
> 
>      qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
> -    register_savevm("virtio-blk", virtio_blk_id++, 2,
> -                    virtio_blk_save, virtio_blk_load, s);
> +    vmstate_register(virtio_blk_id++, &vmstate_virtio_blk, s);
> 
>      return &s->vdev;
>  }
> -- 
> 1.6.5.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]