qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC qom-cpu v4 10/10] cpus: reclaim allocated vCPU obj


From: Chen Fan
Subject: Re: [Qemu-devel] [RFC qom-cpu v4 10/10] cpus: reclaim allocated vCPU objects
Date: Wed, 04 Dec 2013 10:28:17 +0800

On Thu, 2013-11-28 at 15:48 +0100, Igor Mammedov wrote:
> On Wed, 9 Oct 2013 17:43:18 +0800
> Chen Fan <address@hidden> wrote:
> 
> > After ACPI get a signal to eject a vCPU, then it will notify
> > the vCPU thread to exit in KVM, and the vCPU must be removed from CPU list,
> > before the vCPU really removed, there will release the all related vCPU 
> > objects.
> > 
> > Signed-off-by: Chen Fan <address@hidden>
> > ---
> >  cpus.c               | 46 ++++++++++++++++++++++++++++++++++++++++++++++
> >  hw/acpi/piix4.c      | 23 +++++++++++++++++------
> >  include/qom/cpu.h    | 10 ++++++++++
> >  include/sysemu/kvm.h |  1 +
> >  kvm-all.c            | 25 +++++++++++++++++++++++++
> >  5 files changed, 99 insertions(+), 6 deletions(-)
> > 
> > diff --git a/cpus.c b/cpus.c
> > index 4ace860..942af0a 100644
> > --- a/cpus.c
> > +++ b/cpus.c
> > @@ -714,6 +714,26 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void 
> > *data), void *data)
> >      qemu_cpu_kick(cpu);
> >  }
> >  
> > +static void qemu_kvm_destroy_vcpu(CPUState *cpu)
> > +{
> > +    CPU_REMOVE(cpu);
> > +
> > +    if (kvm_destroy_vcpu(cpu) < 0) {
> Does it actually do what it's supposed to do?
> If I recall correctly KVM patches were dimmed as not correct approach.
> 
> perhaps in case of KVM for now, it should return failure (i.e. no unplug
> for KVM).
yes, for now there must be to make some patches to release cpu for KVM.
hoping KVM developers could give some advices.

Thanks,
Chen

> 
> > +        fprintf(stderr, "kvm_destroy_vcpu failed.\n");
> > +        exit(1);
> > +    }
> > +
> > +    object_property_set_bool(OBJECT(cpu), false, "realized", NULL);
> > +    qdev_free(DEVICE(cpu));
> > +}
> > +
> > +static void qemu_tcg_destroy_vcpu(CPUState *cpu)
> > +{
> > +    CPU_REMOVE(cpu);
> > +    object_property_set_bool(OBJECT(cpu), false, "realized", NULL);
> > +    qdev_free(DEVICE(cpu));
> > +}
> > +
> >  static void flush_queued_work(CPUState *cpu)
> >  {
> >      struct qemu_work_item *wi;
> > @@ -805,6 +825,11 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
> >              }
> >          }
> >          qemu_kvm_wait_io_event(cpu);
> > +        if (cpu->exit && !cpu_can_run(cpu)) {
> > +            qemu_kvm_destroy_vcpu(cpu);
> > +            qemu_mutex_unlock(&qemu_global_mutex);
> > +            return NULL;
> > +        }
> >      }
> >  
> >      return NULL;
> > @@ -857,6 +882,7 @@ static void tcg_exec_all(void);
> >  static void *qemu_tcg_cpu_thread_fn(void *arg)
> >  {
> >      CPUState *cpu = arg;
> > +    CPUState *remove_cpu = NULL;
> >  
> >      qemu_tcg_init_cpu_signals();
> >      qemu_thread_get_self(cpu->thread);
> > @@ -889,6 +915,16 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
> >              }
> >          }
> >          qemu_tcg_wait_io_event();
> > +        CPU_FOREACH(cpu) {
> > +            if (cpu->exit && !cpu_can_run(cpu)) {
> > +                remove_cpu = cpu;
> > +                break;
> > +            }
> > +        }
> > +        if (remove_cpu) {
> > +            qemu_tcg_destroy_vcpu(remove_cpu);
> > +            remove_cpu = NULL;
> > +        }
> >      }
> >  
> >      return NULL;
> > @@ -1045,6 +1081,13 @@ void resume_all_vcpus(void)
> >      }
> >  }
> >  
> > +void cpu_remove(CPUState *cpu)
> > +{
> > +    cpu->stop = true;
> > +    cpu->exit = true;
> > +    qemu_cpu_kick(cpu);
> > +}
> > +
> >  static void qemu_tcg_init_vcpu(CPUState *cpu)
> >  {
> >      /* share a single thread for all cpus with TCG */
> > @@ -1219,6 +1262,9 @@ static void tcg_exec_all(void)
> >                  break;
> >              }
> >          } else if (cpu->stop || cpu->stopped) {
> > +            if (cpu->exit) {
> > +                next_cpu = CPU_NEXT(cpu);
> > +            }
> >              break;
> >          }
> >      }
> > diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
> > index fd27001..bde8123 100644
> > --- a/hw/acpi/piix4.c
> > +++ b/hw/acpi/piix4.c
> > @@ -612,10 +612,21 @@ static const MemoryRegionOps piix4_pci_ops = {
> >      },
> >  };
> >  
> > -static void acpi_piix_eject_vcpu(int64_t cpuid)
> > +static void acpi_piix_eject_vcpu(PIIX4PMState *s, int64_t cpuid)
> >  {
> > -    /* TODO: eject a vcpu, release allocated vcpu and exit the vcpu 
> > pthread.  */
> > -    PIIX4_DPRINTF("vcpu: %" PRIu64 " need to be ejected.\n", cpuid);
> > +    CPUStatus *g = &s->gpe_cpu;
> > +    CPUState *cpu;
> > +
> > +    CPU_FOREACH(cpu) {
> > +        CPUClass *cc = CPU_GET_CLASS(cpu);
> > +        int64_t id = cc->get_arch_id(cpu);
> > +
> > +        if (cpuid == id) {
> > +            g->old_sts[cpuid / 8] &= ~(1 << (cpuid % 8));
> > +            cpu_remove(cpu);
> > +            break;
> > +        }
> > +    }
> >  }
> >  
> >  static uint64_t cpu_status_read(void *opaque, hwaddr addr, unsigned int 
> > size)
> > @@ -634,7 +645,7 @@ static void cpu_status_write(void *opaque, hwaddr addr, 
> > uint64_t data,
> >      CPUStatus *cpus = &s->gpe_cpu;
> >      uint8_t val;
> >      int i;
> > -    int64_t cpuid = 0;
> > +    int64_t cpuid = -1;
> >  
> >      val = cpus->old_sts[addr] ^ data;
> >  
> > @@ -648,8 +659,8 @@ static void cpu_status_write(void *opaque, hwaddr addr, 
> > uint64_t data,
> >          }
> >      }
> >  
> > -    if (cpuid != 0) {
> > -        acpi_piix_eject_vcpu(cpuid);
> > +    if (cpuid != -1) {
> > +        acpi_piix_eject_vcpu(s, cpuid);
> >      }
> >  }
> >  
> > diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> > index 0238532..eb8d32b 100644
> > --- a/include/qom/cpu.h
> > +++ b/include/qom/cpu.h
> > @@ -181,6 +181,7 @@ struct CPUState {
> >      bool created;
> >      bool stop;
> >      bool stopped;
> > +    bool exit;
> >      volatile sig_atomic_t exit_request;
> >      volatile sig_atomic_t tcg_exit_req;
> >      uint32_t interrupt_request;
> > @@ -206,6 +207,7 @@ struct CPUState {
> >  QTAILQ_HEAD(CPUTailQ, CPUState);
> >  extern struct CPUTailQ cpus;
> >  #define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
> > +#define CPU_REMOVE(cpu) QTAILQ_REMOVE(&cpus, cpu, node)
> >  #define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
> >  #define CPU_FOREACH_SAFE(cpu, next_cpu) \
> >      QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
> > @@ -487,6 +489,14 @@ void cpu_exit(CPUState *cpu);
> >  void cpu_resume(CPUState *cpu);
> >  
> >  /**
> > + * qemu_remove_vcpu:
> > + * @cpu: The vCPU to remove.
> > + *
> > + * Requests the CPU @cpu to be removed.
> > + */
> > +void cpu_remove(CPUState *cpu);
> > +
> > +/**
> >   * qemu_init_vcpu:
> >   * @cpu: The vCPU to initialize.
> >   *
> > diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
> > index 3b25f27..f3f1279 100644
> > --- a/include/sysemu/kvm.h
> > +++ b/include/sysemu/kvm.h
> > @@ -167,6 +167,7 @@ int kvm_has_intx_set_mask(void);
> >  
> >  int kvm_init_vcpu(CPUState *cpu);
> >  int kvm_cpu_exec(CPUState *cpu);
> > +int kvm_destroy_vcpu(CPUState *cpu);
> >  
> >  #ifdef NEED_CPU_H
> >  
> > diff --git a/kvm-all.c b/kvm-all.c
> > index 4478969..605e00e 100644
> > --- a/kvm-all.c
> > +++ b/kvm-all.c
> > @@ -226,6 +226,31 @@ static void kvm_reset_vcpu(void *opaque)
> >      kvm_arch_reset_vcpu(cpu);
> >  }
> >  
> > +int kvm_destroy_vcpu(CPUState *cpu)
> > +{
> > +    KVMState *s = kvm_state;
> > +    long mmap_size;
> > +    int ret = 0;
> > +
> > +    DPRINTF("kvm_destroy_vcpu\n");
> > +
> > +    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
> > +    if (mmap_size < 0) {
> > +        ret = mmap_size;
> > +        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
> > +        goto err;
> > +    }
> > +
> > +    ret = munmap(cpu->kvm_run, mmap_size);
> > +    if (ret < 0) {
> > +        goto err;
> > +    }
> > +
> > +    close(cpu->kvm_fd);
> > +err:
> > +    return ret;
> > +}
> > +
> >  int kvm_init_vcpu(CPUState *cpu)
> >  {
> >      KVMState *s = kvm_state;
> 
> 





reply via email to

[Prev in Thread] Current Thread [Next in Thread]