qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH for-2.4 v2 1/2] vhost: add vhost_has_free_slot()


From: Igor Mammedov
Subject: Re: [Qemu-devel] [PATCH for-2.4 v2 1/2] vhost: add vhost_has_free_slot() interface
Date: Thu, 30 Jul 2015 09:04:54 +0200

On Thu, 30 Jul 2015 09:22:25 +0300
"Michael S. Tsirkin" <address@hidden> wrote:

> On Thu, Jul 30, 2015 at 08:16:54AM +0200, Igor Mammedov wrote:
> > On Wed, 29 Jul 2015 18:11:14 +0300
> > "Michael S. Tsirkin" <address@hidden> wrote:
> > 
> > > On Wed, Jul 29, 2015 at 01:49:48PM +0200, Igor Mammedov wrote:
> > > > it will allow for other parts of QEMU check if it's safe
> > > > to map memory region during hotplug/runtime.
> > > > That way hotplug path will have a chance to cancel
> > > > hotplug operation instead of crashing in vhost_commit().
> > > > 
> > > > Signed-off-by: Igor Mammedov <address@hidden>
> > > > ---
> > > > v2:
> > > >   * replace probbing with checking for
> > > >     /sys/module/vhost/parameters/max_mem_regions and
> > > >     if it's missing has non wrong value return
> > > >     hardcoded legacy limit (64 slots).
> > > > ---
> > > >  hw/virtio/vhost-backend.c         | 21 ++++++++++++++++++++-
> > > >  hw/virtio/vhost-user.c            |  8 +++++++-
> > > >  hw/virtio/vhost.c                 | 21 +++++++++++++++++++++
> > > >  include/hw/virtio/vhost-backend.h |  2 ++
> > > >  include/hw/virtio/vhost.h         |  1 +
> > > >  stubs/Makefile.objs               |  1 +
> > > >  stubs/vhost.c                     |  6 ++++++
> > > >  7 files changed, 58 insertions(+), 2 deletions(-)
> > > >  create mode 100644 stubs/vhost.c
> > > > 
> > > > diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
> > > > index 4d68a27..11ec669 100644
> > > > --- a/hw/virtio/vhost-backend.c
> > > > +++ b/hw/virtio/vhost-backend.c
> > > > @@ -11,6 +11,7 @@
> > > >  #include "hw/virtio/vhost.h"
> > > >  #include "hw/virtio/vhost-backend.h"
> > > >  #include "qemu/error-report.h"
> > > > +#include "linux/vhost.h"
> > > >  
> > > >  #include <sys/ioctl.h>
> > > >  
> > > > @@ -42,11 +43,29 @@ static int vhost_kernel_cleanup(struct vhost_dev 
> > > > *dev)
> > > >      return close(fd);
> > > >  }
> > > >  
> > > > +static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
> > > > +{
> > > > +    int limit = 64;
> > > > +    char *s;
> > > > +
> > > > +    if 
> > > > (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
> > > > +                            &s, NULL, NULL)) {
> > > > +        uint64_t val = g_ascii_strtoull(s, NULL, 10);
> > > > +        if (!((val == G_MAXUINT64 || !val) && errno)) {
> > > > +            return val;
> > > > +        }
> > > > +        error_report("ignoring invalid max_mem_regions value in vhost 
> > > > module:"
> > > > +                     " %s", s);
> > > > +    }
> > > > +    return limit;
> > > > +}
> > > > +
> > > >  static const VhostOps kernel_ops = {
> > > >          .backend_type = VHOST_BACKEND_TYPE_KERNEL,
> > > >          .vhost_call = vhost_kernel_call,
> > > >          .vhost_backend_init = vhost_kernel_init,
> > > > -        .vhost_backend_cleanup = vhost_kernel_cleanup
> > > > +        .vhost_backend_cleanup = vhost_kernel_cleanup,
> > > > +        .vhost_backend_memslots_limit = vhost_kernel_memslots_limit
> > > >  };
> > > >  
> > > >  int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType 
> > > > backend_type)
> > > > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > > > index e7ab829..acdfd04 100644
> > > > --- a/hw/virtio/vhost-user.c
> > > > +++ b/hw/virtio/vhost-user.c
> > > > @@ -343,9 +343,15 @@ static int vhost_user_cleanup(struct vhost_dev 
> > > > *dev)
> > > >      return 0;
> > > >  }
> > > >  
> > > > +static int vhost_user_memslots_limit(struct vhost_dev *dev)
> > > > +{
> > > > +    return VHOST_MEMORY_MAX_NREGIONS;
> > > > +}
> > > > +
> > > >  const VhostOps user_ops = {
> > > >          .backend_type = VHOST_BACKEND_TYPE_USER,
> > > >          .vhost_call = vhost_user_call,
> > > >          .vhost_backend_init = vhost_user_init,
> > > > -        .vhost_backend_cleanup = vhost_user_cleanup
> > > > +        .vhost_backend_cleanup = vhost_user_cleanup,
> > > > +        .vhost_backend_memslots_limit = vhost_user_memslots_limit
> > > >          };
> > > > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> > > > index 2712c6f..e964004 100644
> > > > --- a/hw/virtio/vhost.c
> > > > +++ b/hw/virtio/vhost.c
> > > > @@ -26,6 +26,18 @@
> > > >  
> > > >  static struct vhost_log *vhost_log;
> > > >  
> > > > +static int used_memslots;
> > > > +static int memslots_limit = -1;
> > > 
> > > I suspect this need to be in some structure, refcounted by vhost devices
> > > using it. vhost_kernel_memslots_limit would then be called when 1st
> > > vhost device is added.
> > 
> > vhost_kernel_memslots_limit() should be called for every added vhost device,
> > since mixed vhost backends may have different limit.
> > 
> > refcounting might help with disabling limit check when all vhost devices
> > are hot-unplugged and user will be able to plug in more memory and won't be
> > able to add vhost device with lower limit.
> > but I see such usecase highly not probable, hence it's considered
> > adding refcounting as over-engineered way.
> 
> I don't see what's not probable here. It's trivial to reproduce.
> One reason to do this is if you want to tweak the limit.
> vhost needs to be unloaded for this.
would be something like this suitable:

static struct {
   int used_memslots;
   int memslots_limit;
   int refcount;
} slots_limit;

> 
> 
> > > 
> > > > +
> > > > +bool vhost_has_free_slot(void)
> > > > +{
> > > > +    if (memslots_limit >= 0) {
> > > > +        return memslots_limit > used_memslots;
> > > > +    }
> > > > +
> > > > +    return true;
> > > 
> > > Pls rewrite this using logic operators.
> > > Something like
> > >   return memslots_limit < 0 || memslots_limit > used_memslots; ?
> > sure
> > 
> > > 
> > > > +}
> > > > +
> > > >  static void vhost_dev_sync_region(struct vhost_dev *dev,
> > > >                                    MemoryRegionSection *section,
> > > >                                    uint64_t mfirst, uint64_t mlast,
> > > > @@ -457,6 +469,7 @@ static void vhost_set_memory(MemoryListener 
> > > > *listener,
> > > >      dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, 
> > > > start_addr);
> > > >      dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, 
> > > > start_addr + size - 1);
> > > >      dev->memory_changed = true;
> > > > +    used_memslots = dev->mem->nregions;
> > > >  }
> > > >  
> > > >  static bool vhost_section(MemoryRegionSection *section)
> > > > @@ -1119,6 +1132,14 @@ int vhost_dev_start(struct vhost_dev *hdev, 
> > > > VirtIODevice *vdev)
> > > >      if (r < 0) {
> > > >          goto fail_features;
> > > >      }
> > > > +
> > > > +    r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
> > > > +    if (memslots_limit > 0) {
> > > > +        memslots_limit = MIN(memslots_limit, r);
> > > > +    } else {
> > > > +        memslots_limit = r;
> > > > +    }
> > > > +
> > > >      r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, 
> > > > hdev->mem);
> > > >      if (r < 0) {
> > > >          r = -errno;
> > > 
> > > Doing stuff on start but not stop is almost always a mistake.
> > > In this case, when all vhost devices go away, one can
> > > add more memory again, right?
> > yep, it's possible provided vhost_call(VHOST_SET_MEM_TABLE) succeeds,
> > if it's not then device addition fails gracefully.
> 
> VHOST_SET_MEM_TABLE on which device? All fds are gone.
I've meant above in context of adding vhost devices after
memory hotplug.

As for original question, the lowest limit will stay even if
all vhost devices are gone with this impl.


> 
> > > 
> > > > diff --git a/include/hw/virtio/vhost-backend.h 
> > > > b/include/hw/virtio/vhost-backend.h
> > > > index e472f29..28b6714 100644
> > > > --- a/include/hw/virtio/vhost-backend.h
> > > > +++ b/include/hw/virtio/vhost-backend.h
> > > > @@ -24,12 +24,14 @@ typedef int (*vhost_call)(struct vhost_dev *dev, 
> > > > unsigned long int request,
> > > >               void *arg);
> > > >  typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque);
> > > >  typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev);
> > > > +typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
> > > >  
> > > >  typedef struct VhostOps {
> > > >      VhostBackendType backend_type;
> > > >      vhost_call vhost_call;
> > > >      vhost_backend_init vhost_backend_init;
> > > >      vhost_backend_cleanup vhost_backend_cleanup;
> > > > +    vhost_backend_memslots_limit vhost_backend_memslots_limit;
> > > >  } VhostOps;
> > > >  
> > > >  extern const VhostOps user_ops;
> > > > diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> > > > index dd51050..17ff7b6 100644
> > > > --- a/include/hw/virtio/vhost.h
> > > > +++ b/include/hw/virtio/vhost.h
> > > > @@ -81,4 +81,5 @@ uint64_t vhost_get_features(struct vhost_dev *hdev, 
> > > > const int *feature_bits,
> > > >                              uint64_t features);
> > > >  void vhost_ack_features(struct vhost_dev *hdev, const int 
> > > > *feature_bits,
> > > >                          uint64_t features);
> > > > +bool vhost_has_free_slot(void);
> > > >  #endif
> > > > diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
> > > > index 9937a12..d2f1b21 100644
> > > > --- a/stubs/Makefile.objs
> > > > +++ b/stubs/Makefile.objs
> > > > @@ -38,3 +38,4 @@ stub-obj-$(CONFIG_WIN32) += fd-register.o
> > > >  stub-obj-y += cpus.o
> > > >  stub-obj-y += kvm.o
> > > >  stub-obj-y += qmp_pc_dimm_device_list.o
> > > > +stub-obj-y += vhost.o
> > > > diff --git a/stubs/vhost.c b/stubs/vhost.c
> > > > new file mode 100644
> > > > index 0000000..d346b85
> > > > --- /dev/null
> > > > +++ b/stubs/vhost.c
> > > > @@ -0,0 +1,6 @@
> > > > +#include "hw/virtio/vhost.h"
> > > > +
> > > > +bool vhost_has_free_slot(void)
> > > > +{
> > > > +    return true;
> > > > +}
> > > > -- 
> > > > 1.8.3.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]