qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2 0/3] AioContext: ctx->dispatching is dead, al


From: Paolo Bonzini
Subject: Re: [Qemu-devel] [PATCH v2 0/3] AioContext: ctx->dispatching is dead, all hail ctx->notify_me
Date: Fri, 17 Jul 2015 15:02:05 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Thunderbird/38.0.1


On 17/07/2015 14:58, Richard W.M. Jones wrote:
> On Fri, Jul 17, 2015 at 11:30:38AM +0200, Paolo Bonzini wrote:
>> error: kvm run failed Function not implemented
>> PC=00000000bf671210  SP=00000000c00001f0
>> X00=000000000a003e70 X01=0000000000000000 X02=00000000bf680548 
>> X03=0000000000000030
>> X04=00000000bbb5fc18 X05=00000000004b7770 X06=00000000bf721930 
>> X07=000000000000009a
>> X08=00000000bf716858 X09=0000000000000090 X10=0000000000000000 
>> X11=0000000000000046
>> X12=00000000a007e03a X13=0000000000000000 X14=0000000000000000 
>> X15=0000000000000000
>> X16=00000000bf716df0 X17=0000000000000000 X18=0000000000000000 
>> X19=00000000bf6f5f18
>> X20=0000000000000000 X21=0000000000000000 X22=0000000000000000 
>> X23=0000000000000000
>> X24=0000000000000000 X25=0000000000000000 X26=0000000000000000 
>> X27=0000000000000000
>> X28=0000000000000000 X29=0000000000000000 X30=0000000000000000 
>> PSTATE=60000305 (flags -ZC-)
> 
> Vaguely reminiscent of this bug:
> 
> https://bugzilla.redhat.com/show_bug.cgi?id=1194366
> 
> (See comment 7 in particular)

Must be it, I was using an old kernel.

Thanks!

Paolo

>> diff --git a/aio-posix.c b/aio-posix.c
>> index 268d14d..d2011d0 100644
>> --- a/aio-posix.c
>> +++ b/aio-posix.c
>> @@ -273,6 +273,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
>>          aio_context_acquire(ctx);
>>      }
>>  
>> +    /* This should be optimized... */
>> +    event_notifier_test_and_clear(&ctx->notifier);
>> +
>> +    if (blocking) {
>> +        atomic_sub(&ctx->notify_me, 2);
>> +    }
>> +
>>      /* if we have any readable fds, dispatch event */
>>      if (ret > 0) {
>>          for (i = 0; i < npfd; i++) {
>> @@ -283,10 +290,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
>>      npfd = 0;
>>      ctx->walking_handlers--;
>>  
>> -    if (blocking) {
>> -        atomic_sub(&ctx->notify_me, 2);
>> -    }
>> -
>>      /* Run dispatch even if there were no readable fds to run timers */
>>      if (aio_dispatch(ctx)) {
>>          progress = true;
>> diff --git a/aio-win32.c b/aio-win32.c
>> index 2bfd5f8..33809fd 100644
>> --- a/aio-win32.c
>> +++ b/aio-win32.c
>> @@ -326,6 +326,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
>>          if (timeout) {
>>              aio_context_acquire(ctx);
>>          }
>> +
>> +        /* This should be optimized... */
>> +        event_notifier_test_and_clear(&ctx->notifier);
>> +
>>          if (blocking) {
>>              assert(first);
>>              atomic_sub(&ctx->notify_me, 2);
>> diff --git a/async.c b/async.c
>> index 9204907..120e183 100644
>> --- a/async.c
>> +++ b/async.c
>> @@ -202,6 +202,9 @@ aio_ctx_check(GSource *source)
>>      AioContext *ctx = (AioContext *) source;
>>      QEMUBH *bh;
>>  
>> +    /* This should be optimized... */
>> +    event_notifier_test_and_clear(&ctx->notifier);
>> +
>>      atomic_and(&ctx->notify_me, ~1);
>>      for (bh = ctx->first_bh; bh; bh = bh->next) {
>>          if (!bh->deleted && bh->scheduled) {
>> @@ -280,6 +280,10 @@ static void aio_rfifolock_cb(void *opaque)
>>      aio_notify(opaque);
>>  }
>>  
>> +static void event_notifier_dummy_cb(EventNotifier *e)
>> +{
>> +}
>> +
>>  AioContext *aio_context_new(Error **errp)
>>  {
>>      int ret;
>> @@ -292,7 +296,7 @@ AioContext *aio_context_new(Error **errp)
>>          return NULL;
>>      }
>>      g_source_set_can_recurse(&ctx->source, true);
>> -    aio_set_event_notifier(ctx, &ctx->notifier, NULL);
>> +    aio_set_event_notifier(ctx, &ctx->notifier, event_notifier_dummy_cb);
>>      ctx->thread_pool = NULL;
>>      qemu_mutex_init(&ctx->bh_lock);
>>      rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
> 
> With this patch, I've got to 500 iterations without seeing the error.
> 
> Still testing it however ...
> 
> Rich.
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]