qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 1/3] Make paio subsystem use threadlets infrastr


From: Arun R Bharadwaj
Subject: Re: [Qemu-devel] [PATCH 1/3] Make paio subsystem use threadlets infrastructure
Date: Wed, 10 Nov 2010 23:24:42 +0530
User-agent: Mutt/1.5.20 (2009-06-14)

* Stefan Hajnoczi <address@hidden> [2010-11-10 13:45:29]:

> On Wed, Nov 10, 2010 at 1:19 PM, Arun R Bharadwaj
> <address@hidden> wrote:
> > @@ -301,106 +431,58 @@ static ssize_t handle_aiocb_rw(struct qemu_paiocb 
> > *aiocb)
> >     return nbytes;
> >  }
> >
> > -static void *aio_thread(void *unused)
> > +static void aio_thread(ThreadletWork *work)
> >  {
> >     pid_t pid;
> > +    struct qemu_paiocb *aiocb = container_of(work, struct qemu_paiocb, 
> > work);
> > +    ssize_t ret = 0;
> >
> >     pid = getpid();
> >
> > -    while (1) {
> > -        struct qemu_paiocb *aiocb;
> > -        ssize_t ret = 0;
> > -        qemu_timeval tv;
> > -        struct timespec ts;
> > -
> > -        qemu_gettimeofday(&tv);
> > -        ts.tv_sec = tv.tv_sec + 10;
> > -        ts.tv_nsec = 0;
> > -
> > -        mutex_lock(&lock);
> > -
> > -        while (QTAILQ_EMPTY(&request_list) &&
> > -               !(ret == ETIMEDOUT)) {
> > -            ret = cond_timedwait(&cond, &lock, &ts);
> > -        }
> > -
> > -        if (QTAILQ_EMPTY(&request_list))
> > -            break;
> > -
> > -        aiocb = QTAILQ_FIRST(&request_list);
> > -        QTAILQ_REMOVE(&request_list, aiocb, node);
> > -        aiocb->active = 1;
> > -        idle_threads--;
> > -        mutex_unlock(&lock);
> > -
> > -        switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
> > -        case QEMU_AIO_READ:
> > -        case QEMU_AIO_WRITE:
> > -            ret = handle_aiocb_rw(aiocb);
> > -            break;
> > -        case QEMU_AIO_FLUSH:
> > -            ret = handle_aiocb_flush(aiocb);
> > -            break;
> > -        case QEMU_AIO_IOCTL:
> > -            ret = handle_aiocb_ioctl(aiocb);
> > -            break;
> > -        default:
> > -            fprintf(stderr, "invalid aio request (0x%x)\n", 
> > aiocb->aio_type);
> > -            ret = -EINVAL;
> > -            break;
> > -        }
> > -
> > -        mutex_lock(&lock);
> > -        aiocb->ret = ret;
> > -        idle_threads++;
> > -        mutex_unlock(&lock);
> > -
> > -        if (kill(pid, aiocb->ev_signo)) die("kill failed");
> > +    switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) {
> > +    case QEMU_AIO_READ:
> > +    case QEMU_AIO_WRITE:
> > +        ret = handle_aiocb_rw(aiocb);
> > +        break;
> > +    case QEMU_AIO_FLUSH:
> > +        ret = handle_aiocb_flush(aiocb);
> > +        break;
> > +    case QEMU_AIO_IOCTL:
> > +        ret = handle_aiocb_ioctl(aiocb);
> > +        break;
> > +    default:
> > +        fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type);
> > +        ret = -EINVAL;
> > +        break;
> >     }
> >
> > -    idle_threads--;
> > -    cur_threads--;
> > -    mutex_unlock(&lock);
> > -
> > -    return NULL;
> > -}
> > -
> > -static void spawn_thread(void)
> > -{
> > -    sigset_t set, oldset;
> > -
> > -    cur_threads++;
> > -    idle_threads++;
> > +    qemu_mutex_lock(&aiocb_mutex);
> > +    aiocb->ret = ret;
> 
> This is where qemu_cond_broadcast() is needed.
> 
> > +    qemu_mutex_unlock(&aiocb_mutex);
> >
> > -    /* block all signals */
> > -    if (sigfillset(&set)) die("sigfillset");
> > -    if (sigprocmask(SIG_SETMASK, &set, &oldset)) die("sigprocmask");
> > -
> > -    thread_create(&thread_id, &attr, aio_thread, NULL);
> > -
> > -    if (sigprocmask(SIG_SETMASK, &oldset, NULL)) die("sigprocmask 
> > restore");
> > +    if (kill(pid, aiocb->ev_signo)) {
> > +        die("kill failed");
> > +    }
> >  }
> >
> 
> I wonder if the condition variable has a measurable performance
> overhead.  We unconditionally broadcast on paiocb completion.  One
> idea would be to keep a counter of waiters (should only ever be 0 or
> 1) protected by aiocb_mutex and broadcast only when there is a waiter.
>  I just want to share this idea, I don't know if it's necessary to
> implement it or if it could even work without a race condition.
> 

I did not understand exactly why we are going to see a performane hit.
We will be doing a broadcast only after the aio_thread has finished
the work right? So how is this going to affect performance even if we
do a useless broadcast?

-arun
> >     }
> >
> >     paio_remove(acb);
> > @@ -618,11 +692,12 @@ int paio_init(void)
> >     struct sigaction act;
> >     PosixAioState *s;
> >     int fds[2];
> > -    int ret;
> >
> >     if (posix_aio_state)
> >         return 0;
> >
> > +    qemu_mutex_init(&aiocb_mutex);
> 
> Also needs qemu_cond_init(&aiocb_completion).
> 
> Stefan
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]