qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH -V3 09/32] virtio-9p: Implement P9_TWRITE/ Threa


From: jvrao
Subject: Re: [Qemu-devel] [PATCH -V3 09/32] virtio-9p: Implement P9_TWRITE/ Thread model in QEMU
Date: Sun, 28 Mar 2010 23:36:09 -0700
User-agent: Thunderbird 2.0.0.24 (Windows/20100228)

Aneesh Kumar K.V wrote:
> From: Anthony Liguori <address@hidden>

We have implemented all the vfs calls in state machine model so that we are 
prepared
for the model where the VCPU thread(s) does the initial work until it needs to 
block then it
submits that work (via a function pointer) to a thread pool.  A thread in that 
thread pool
picks up the work, and completes the blocking call, when blocking call returns 
a callback is
invoked in the IO thread.  Then the IO thread runs until the next blocking 
function, and goto start.

Basically the VCPU/IO threads does all the non-blocking work, and let the 
threads in the
thread pool work on the blocking calls like mkdir() stat() etc.

My question is, why not let the whole work done by the thread in the thread 
pool?
VCPU thread receives the PDU and hands over the entire job to worker thread.
When all work is completed, either the worker thread or the IO thread(we can 
switch back at this point if needed) marks the request as completed in the 
virtqueue and injects an
interrupt to notify the guest.

We can still keep the same number of threads in the thread pool. 
This way, we are not increasing #of threads employed by QEMU...also it makes 
code lot 
more easy to read/maintain.

I may be missing something..but would like to know more on the advantages of 
this model.

Thanks,
JV

> 
> This gets write to file to work
> 
> Signed-off-by: Anthony Liguori <address@hidden>
> Signed-off-by: Venkateswararao Jujjuri <address@hidden>
> Signed-off-by: Aneesh Kumar K.V <address@hidden>
> ---
>  hw/virtio-9p-local.c |    7 ++++
>  hw/virtio-9p.c       |   97 ++++++++++++++++++++++++++++++++++++++++++++++++-
>  2 files changed, 102 insertions(+), 2 deletions(-)
> 
> diff --git a/hw/virtio-9p-local.c b/hw/virtio-9p-local.c
> index d77ecc2..c5d1db3 100644
> --- a/hw/virtio-9p-local.c
> +++ b/hw/virtio-9p-local.c
> @@ -129,6 +129,12 @@ static off_t local_lseek(void *opaque, int fd, off_t 
> offset, int whence)
>      return lseek(fd, offset, whence);
>  }
> 
> +static ssize_t local_writev(void *opaque, int fd, const struct iovec *iov,
> +                         int iovcnt)
> +{
> +    return writev(fd, iov, iovcnt);
> +}
> +
>  static V9fsPosixFileOperations ops = {
>      .lstat = local_lstat,
>      .setuid = local_setuid,
> @@ -143,6 +149,7 @@ static V9fsPosixFileOperations ops = {
>      .seekdir = local_seekdir,
>      .readv = local_readv,
>      .lseek = local_lseek,
> +    .writev = local_writev,
>  };
> 
>  V9fsPosixFileOperations *virtio_9p_init_local(const char *path)
> diff --git a/hw/virtio-9p.c b/hw/virtio-9p.c
> index 3ac6255..bc26d66 100644
> --- a/hw/virtio-9p.c
> +++ b/hw/virtio-9p.c
> @@ -168,6 +168,12 @@ static off_t posix_lseek(V9fsState *s, int fd, off_t 
> offset, int whence)
>      return s->ops->lseek(s->ops->opaque, fd, offset, whence);
>  }
> 
> +static int posix_writev(V9fsState *s, int fd, const struct iovec *iov,
> +                       int iovcnt)
> +{
> +    return s->ops->writev(s->ops->opaque, fd, iov, iovcnt);
> +}
> +
>  static void v9fs_string_init(V9fsString *str)
>  {
>      str->data = NULL;
> @@ -1319,10 +1325,97 @@ out:
>      complete_pdu(s, pdu, err);
>  }
> 
> +typedef struct V9fsWriteState {
> +    V9fsPDU *pdu;
> +    size_t offset;
> +    int32_t fid;
> +    int32_t len;
> +    int32_t count;
> +    int32_t total;
> +    int64_t off;
> +    V9fsFidState *fidp;
> +    struct iovec iov[128]; /* FIXME: bad, bad, bad */
> +    struct iovec *sg;
> +    int cnt;
> +} V9fsWriteState;
> +
> +static void v9fs_write_post_writev(V9fsState *s, V9fsWriteState *vs,
> +                                   ssize_t err)
> +{
> +    BUG_ON(vs->len < 0);
> +    vs->total += vs->len;
> +    vs->sg = adjust_sg(vs->sg, vs->len, &vs->cnt);
> +    if (vs->total < vs->count && vs->len > 0) {
> +        do {
> +            if (0)
> +                print_sg(vs->sg, vs->cnt);
> +            vs->len =  posix_writev(s, vs->fidp->fd, vs->sg, vs->cnt);
> +        } while (vs->len == -1 && errno == EINTR);
> +        v9fs_write_post_writev(s, vs, err);
> +    }
> +    vs->offset += pdu_marshal(vs->pdu, vs->offset, "d", vs->total);
> +
> +    err = vs->offset;
> +    complete_pdu(s, vs->pdu, err);
> +    qemu_free(vs);
> +}
> +
> +static void v9fs_write_post_lseek(V9fsState *s, V9fsWriteState *vs, ssize_t 
> err)
> +{
> +    BUG_ON(err == -1);
> +
> +    vs->sg = cap_sg(vs->sg, vs->count, &vs->cnt);
> +
> +    if (vs->total < vs->count) {
> +        do {
> +            if (0)
> +                print_sg(vs->sg, vs->cnt);
> +            vs->len = posix_writev(s, vs->fidp->fd, vs->sg, vs->cnt);
> +        } while (vs->len == -1 && errno == EINTR);
> +
> +        v9fs_write_post_writev(s, vs, err);
> +        return;
> +    }
> +
> +    complete_pdu(s, vs->pdu, err);
> +    qemu_free(vs);
> +}
> +
>  static void v9fs_write(V9fsState *s, V9fsPDU *pdu)
>  {
> -    if (debug_9p_pdu)
> -        pprint_pdu(pdu);
> +    V9fsWriteState *vs;
> +    ssize_t err;
> +
> +    vs = qemu_malloc(sizeof(*vs));
> +
> +    vs->pdu = pdu;
> +    vs->offset = 7;
> +    vs->sg = vs->iov;
> +    vs->total = 0;
> +    vs->len = 0;
> +
> +    pdu_unmarshal(vs->pdu, vs->offset, "dqdv", &vs->fid, &vs->off, 
> &vs->count,
> +                    vs->sg, &vs->cnt);
> +
> +    vs->fidp = lookup_fid(s, vs->fid);
> +    if (vs->fidp == NULL) {
> +        err = -EINVAL;
> +        goto out;
> +    }
> +
> +    if (vs->fidp->fd == -1) {
> +        err = -EINVAL;
> +        goto out;
> +    }
> +
> +    err = posix_lseek(s, vs->fidp->fd, vs->off, SEEK_SET);
> +
> +    v9fs_write_post_lseek(s, vs, err);
> +    return;
> +
> +out:
> +    complete_pdu(s, vs->pdu, err);
> +    qemu_free(vs);
>  }
> 
>  static void v9fs_create(V9fsState *s, V9fsPDU *pdu)






reply via email to

[Prev in Thread] Current Thread [Next in Thread]