qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 4/9] Convert stat into 2nd threading model


From: Arun R Bharadwaj
Subject: [Qemu-devel] [PATCH 4/9] Convert stat into 2nd threading model
Date: Thu, 14 Oct 2010 17:54:25 +0530
User-agent: StGit/0.15

From: Sripathi Kodi <address@hidden>

In this model we hand over the vcpu thread only executes till
the first blocking operation. It then hands over the call to
the worker thread, which does everything needed to complete
the call. It can make multiple blocking calls. It finally
signals the IO thread to do complete_pdu().

Gautham suggested that I use a generic function to call
complete_pdu, which could further simplify the post_op
structure. However, some of the calls (stat included) need
to free up some memory after calling complete_pdu. Handling
this becomes messy if I use a common complete function.

Review comments welcome. Review comment from Gautham is a
necessity.

Signed-off-by: Sripathi Kodi <address@hidden>
---
 hw/virtio-9p.c |   58 ++++++++++++++++++++++++++++++++------------------------
 hw/virtio-9p.h |    4 ++++
 2 files changed, 37 insertions(+), 25 deletions(-)

diff --git a/hw/virtio-9p.c b/hw/virtio-9p.c
index ef6175a..d994293 100644
--- a/hw/virtio-9p.c
+++ b/hw/virtio-9p.c
@@ -1381,54 +1381,62 @@ out:
     v9fs_string_free(&aname);
 }
 
-static void v9fs_stat_post_lstat(V9fsState *s, V9fsStatState *vs, int err)
+/* This is called by the IO thread */
+static void v9fs_stat_do_complete(void *opaque)
 {
-    if (err == -1) {
-        err = -errno;
+    V9fsStatState *vs = (V9fsStatState *)opaque;
+    complete_pdu(vs->s, vs->pdu, vs->err);
+    v9fs_stat_free(&vs->v9stat);
+    qemu_free(vs);
+}
+
+/* This is called by the async thread. It does everything
+ * other than calling complete_pdu
+ */
+static void v9fs_stat_worker(ThreadletWork *work)
+{
+    V9fsStatState *vs = container_of(work, V9fsStatState, work);
+
+    vs->fidp = lookup_fid(vs->s, vs->fid);
+    if (vs->fidp == NULL) {
+        vs->err = -ENOENT;
         goto out;
     }
 
-    err = stat_to_v9stat(s, &vs->fidp->path, &vs->stbuf, &vs->v9stat);
-    if (err) {
+    qemu_rwmutex_rdlock(&global_rename_lock);
+    vs->err = v9fs_do_lstat(vs->s, &vs->fidp->path, &vs->stbuf);
+    if (vs->err == -1) {
+        vs->err = -errno;
+        goto out;
+    }
+    vs->err = stat_to_v9stat(vs->s, &vs->fidp->path, &vs->stbuf, &vs->v9stat);
+    if (vs->err) {
         goto out;
     }
     vs->offset += pdu_marshal(vs->pdu, vs->offset, "wS", 0, &vs->v9stat);
-    err = vs->offset;
+    vs->err = vs->offset;
 
 out:
-    complete_pdu(s, vs->pdu, err);
-    v9fs_stat_free(&vs->v9stat);
-    qemu_free(vs);
+    qemu_rwmutex_unlock(&global_rename_lock);
+    v9fs_async_helper_done(v9fs_stat_do_complete, vs);
 }
 
 static void v9fs_stat(V9fsState *s, V9fsPDU *pdu)
 {
-    int32_t fid;
     V9fsStatState *vs;
-    ssize_t err = 0;
 
     vs = qemu_malloc(sizeof(*vs));
     vs->pdu = pdu;
     vs->offset = 7;
+    vs->s = s;
 
     memset(&vs->v9stat, 0, sizeof(vs->v9stat));
 
-    pdu_unmarshal(vs->pdu, vs->offset, "d", &fid);
-
-    vs->fidp = lookup_fid(s, fid);
-    if (vs->fidp == NULL) {
-        err = -ENOENT;
-        goto out;
-    }
+    pdu_unmarshal(vs->pdu, vs->offset, "d", &vs->fid);
 
-    err = v9fs_do_lstat(s, &vs->fidp->path, &vs->stbuf);
-    v9fs_stat_post_lstat(s, vs, err);
+    vs->work.func = v9fs_stat_worker;
+    submit_threadlet(&vs->work);
     return;
-
-out:
-    complete_pdu(s, vs->pdu, err);
-    v9fs_stat_free(&vs->v9stat);
-    qemu_free(vs);
 }
 
 static void v9fs_getattr_post_lstat(V9fsState *s, V9fsStatStateDotl *vs,
diff --git a/hw/virtio-9p.h b/hw/virtio-9p.h
index f58d3d8..9ced508 100644
--- a/hw/virtio-9p.h
+++ b/hw/virtio-9p.h
@@ -248,6 +248,10 @@ typedef struct V9fsStatState {
     V9fsStat v9stat;
     V9fsFidState *fidp;
     struct stat stbuf;
+    V9fsState *s;
+    int32_t fid;
+    int32_t err;
+    ThreadletWork work;
 } V9fsStatState;
 
 typedef struct V9fsStatDotl {




reply via email to

[Prev in Thread] Current Thread [Next in Thread]