qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 04/12] Add ThreadletQueue.


From: Arun R Bharadwaj
Subject: [Qemu-devel] [PATCH 04/12] Add ThreadletQueue.
Date: Thu, 13 Jan 2011 17:44:37 +0530
User-agent: StGit/0.15

This patch adds a global queue of type ThreadletQueue and
removes the earlier usage of request_list queue.

We want to create the thread on the first submit. Hence we
need to track whether the globalqueue is initialized or not.

Signed-off-by: Arun R Bharadwaj <address@hidden>
---
 posix-aio-compat.c |  149 +++++++++++++++++++++++++++-------------------------
 1 files changed, 76 insertions(+), 73 deletions(-)

diff --git a/posix-aio-compat.c b/posix-aio-compat.c
index 4fa2c47..b5d70c9 100644
--- a/posix-aio-compat.c
+++ b/posix-aio-compat.c
@@ -31,8 +31,23 @@
 
 #include "block/raw-posix-aio.h"
 
+#define MAX_GLOBAL_THREADS  64
+#define MIN_GLOBAL_THREADS   8
+
 static QemuMutex aiocb_mutex;
 static QemuCond aiocb_completion;
+
+typedef struct ThreadletQueue
+{
+    QemuMutex lock;
+    QemuCond cond;
+    int max_threads;
+    int min_threads;
+    int cur_threads;
+    int idle_threads;
+    QTAILQ_HEAD(, ThreadletWork) request_list;
+} ThreadletQueue;
+
 typedef struct ThreadletWork
 {
     QTAILQ_ENTRY(ThreadletWork) node;
@@ -66,15 +81,10 @@ typedef struct PosixAioState {
     struct qemu_paiocb *first_aio;
 } PosixAioState;
 
-
-static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
-static pthread_t thread_id;
+/* Default ThreadletQueue */
+static ThreadletQueue globalqueue;
+static int globalqueue_init;
 static pthread_attr_t attr;
-static int max_threads = 64;
-static int cur_threads = 0;
-static int idle_threads = 0;
-static QTAILQ_HEAD(, ThreadletWork) request_list;
 
 #ifdef CONFIG_PREADV
 static int preadv_present = 1;
@@ -93,32 +103,6 @@ static void die(const char *what)
     die2(errno, what);
 }
 
-static void mutex_lock(pthread_mutex_t *mutex)
-{
-    int ret = pthread_mutex_lock(mutex);
-    if (ret) die2(ret, "pthread_mutex_lock");
-}
-
-static void mutex_unlock(pthread_mutex_t *mutex)
-{
-    int ret = pthread_mutex_unlock(mutex);
-    if (ret) die2(ret, "pthread_mutex_unlock");
-}
-
-static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
-                           struct timespec *ts)
-{
-    int ret = pthread_cond_timedwait(cond, mutex, ts);
-    if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait");
-    return ret;
-}
-
-static void cond_signal(pthread_cond_t *cond)
-{
-    int ret = pthread_cond_signal(cond);
-    if (ret) die2(ret, "pthread_cond_signal");
-}
-
 static void thread_create(pthread_t *thread, pthread_attr_t *attr,
                           void *(*start_routine)(void*), void *arg)
 {
@@ -311,42 +295,45 @@ static ssize_t handle_aiocb_rw(struct qemu_paiocb *aiocb)
 
 static void *threadlet_worker(void *data)
 {
+    ThreadletQueue *queue = data;
 
+    qemu_mutex_lock(&queue->lock);
     while (1) {
-        ssize_t ret = 0;
-        qemu_timeval tv;
-        struct timespec ts;
         ThreadletWork *work;
+        int ret = 0;
 
-        qemu_gettimeofday(&tv);
-        ts.tv_sec = tv.tv_sec + 10;
-        ts.tv_nsec = 0;
-
-        mutex_lock(&lock);
-
-        while (QTAILQ_EMPTY(&request_list) &&
-               !(ret == ETIMEDOUT)) {
-            ret = cond_timedwait(&cond, &lock, &ts);
+        while (QTAILQ_EMPTY(&queue->request_list) &&
+               (ret != ETIMEDOUT)) {
+            /* wait for cond to be signalled or broadcast for 1000s */
+            ret = qemu_cond_timedwait((&queue->cond),
+                                      &(queue->lock), 10*100000);
         }
 
-        if (QTAILQ_EMPTY(&request_list)) {
-            idle_threads--;
-            cur_threads--;
-            mutex_unlock(&lock);
-            break;
-        }
-        work = QTAILQ_FIRST(&request_list);
-        QTAILQ_REMOVE(&request_list, work, node);
-        idle_threads--;
-        mutex_unlock(&lock);
+        assert(queue->idle_threads != 0);
+        if (QTAILQ_EMPTY(&queue->request_list)) {
+            if (queue->cur_threads > queue->min_threads) {
+                /* We retain the minimum number of threads */
+                break;
+            }
+        } else {
+            work = QTAILQ_FIRST(&queue->request_list);
+            QTAILQ_REMOVE(&queue->request_list, work, node);
+
+            queue->idle_threads--;
+            qemu_mutex_unlock(&queue->lock);
 
-        work->func(work);
-        mutex_lock(&lock);
-        idle_threads++;
-        mutex_unlock(&lock);
+            /* execute the work function */
+            work->func(work);
 
+            qemu_mutex_lock(&queue->lock);
+            queue->idle_threads++;
+        }
     }
 
+    queue->idle_threads--;
+    queue->cur_threads--;
+    qemu_mutex_unlock(&queue->lock);
+
     return NULL;
 }
 
@@ -389,18 +376,19 @@ static void handle_work(ThreadletWork *work)
     }
 }
 
-static void spawn_thread(void)
+static void spawn_threadlet(ThreadletQueue *queue)
 {
+    pthread_t thread_id;
     sigset_t set, oldset;
 
-    cur_threads++;
-    idle_threads++;
+    queue->cur_threads++;
+    queue->idle_threads++;
 
     /* block all signals */
     if (sigfillset(&set)) die("sigfillset");
     if (sigprocmask(SIG_SETMASK, &set, &oldset)) die("sigprocmask");
 
-    thread_create(&thread_id, &attr, threadlet_worker, NULL);
+    thread_create(&thread_id, &attr, threadlet_worker, queue);
 
     if (sigprocmask(SIG_SETMASK, &oldset, NULL)) die("sigprocmask restore");
 }
@@ -412,14 +400,29 @@ static void qemu_paio_submit(struct qemu_paiocb *aiocb)
     aiocb->active = 0;
     qemu_mutex_unlock(&aiocb_mutex);
 
-    mutex_lock(&lock);
-    if (idle_threads == 0 && cur_threads < max_threads)
-        spawn_thread();
+    qemu_mutex_lock(&globalqueue.lock);
+
+    if (!globalqueue_init) {
+        globalqueue.cur_threads  = 0;
+        globalqueue.idle_threads = 0;
+        globalqueue.max_threads = MAX_GLOBAL_THREADS;
+        globalqueue.min_threads = MIN_GLOBAL_THREADS;
+        QTAILQ_INIT(&globalqueue.request_list);
+        qemu_mutex_init(&globalqueue.lock);
+        qemu_cond_init(&globalqueue.cond);
+
+        globalqueue_init = 1;
+    }
+
+    if (globalqueue.idle_threads == 0 &&
+        globalqueue.cur_threads < globalqueue.max_threads)
+        spawn_threadlet(&globalqueue);
 
     aiocb->work.func = handle_work;
-    QTAILQ_INSERT_TAIL(&request_list, &aiocb->work, node);
-    mutex_unlock(&lock);
-    cond_signal(&cond);
+
+    QTAILQ_INSERT_TAIL(&globalqueue.request_list, &aiocb->work, node);
+    qemu_cond_signal(&globalqueue.cond);
+    qemu_mutex_unlock(&globalqueue.lock);
 }
 
 static ssize_t qemu_paio_return(struct qemu_paiocb *aiocb)
@@ -566,12 +569,14 @@ static void paio_cancel(BlockDriverAIOCB *blockacb)
     int active = 0;
 
     qemu_mutex_lock(&aiocb_mutex);
+    qemu_mutex_lock(&globalqueue.lock);
     if (!acb->active) {
-        QTAILQ_REMOVE(&request_list, &acb->work, node);
+        QTAILQ_REMOVE(&globalqueue.request_list, &acb->work, node);
         acb->ret = -ECANCELED;
     } else if (acb->ret == -EINPROGRESS) {
         active = 1;
     }
+    qemu_mutex_unlock(&globalqueue.lock);
 
     if (!active) {
         acb->ret = -ECANCELED;
@@ -689,8 +694,6 @@ int paio_init(void)
     if (ret)
         die2(ret, "pthread_attr_setdetachstate");
 
-    QTAILQ_INIT(&request_list);
-
     posix_aio_state = s;
     return 0;
 }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]