qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glf


From: Jeff Cody
Subject: Re: [Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume
Date: Fri, 28 Oct 2016 13:04:37 -0400
User-agent: Mutt/1.5.24 (2015-08-30)

On Fri, Oct 28, 2016 at 12:44:24PM +0200, Niels de Vos wrote:
> On Thu, Oct 27, 2016 at 08:54:50PM +0530, Prasanna Kumar Kalever wrote:
> > Currently, for every drive accessed via gfapi we create a new glfs
> > instance (call glfs_new() followed by glfs_init()) which could consume
> > memory in few 100 MB's, from the table below it looks like for each
> > instance ~300 MB VSZ was consumed
> > 
> > Before:
> > -------
> > Disks   VSZ     RSS
> > 1       1098728 187756
> > 2       1430808 198656
> > 3       1764932 199704
> > 4       2084728 202684
> > 
> > This patch maintains a list of pre-opened glfs objects. On adding
> > a new drive belonging to the same gluster volume, we just reuse the
> > existing glfs object by updating its refcount.
> > 
> > With this approch we shrink up the unwanted memory consumption and
> > glfs_new/glfs_init calls for accessing a disk (file) if belongs to
> > same volume.
> > 
> > From below table notice that the memory usage after adding a disk
> > (which will reuse the existing glfs object hence) is in negligible
> > compared to before.
> > 
> > After:
> > ------
> > Disks   VSZ     RSS
> > 1       1101964 185768
> > 2       1109604 194920
> > 3       1114012 196036
> > 4       1114496 199868
> > 
> > Disks: number of -drive
> > VSZ: virtual memory size of the process in KiB
> > RSS: resident set size, the non-swapped physical memory (in kiloBytes)
> > 
> > VSZ and RSS are analyzed using 'ps aux' utility.
> > 
> > Signed-off-by: Prasanna Kumar Kalever <address@hidden>
> > ---
> > v2: Address comments from Jeff Cody on v1
> > v1: Initial patch
> > ---
> >  block/gluster.c | 94 
> > ++++++++++++++++++++++++++++++++++++++++++++++++---------
> >  1 file changed, 80 insertions(+), 14 deletions(-)
> > 
> > diff --git a/block/gluster.c b/block/gluster.c
> > index 01b479f..7e39201 100644
> > --- a/block/gluster.c
> > +++ b/block/gluster.c
> > @@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState {
> >  } BDRVGlusterReopenState;
> >  
> >  
> > +typedef struct GlfsPreopened {
> > +    char *volume;
> > +    glfs_t *fs;
> > +    int ref;
> > +} GlfsPreopened;
> > +
> > +typedef struct ListElement {
> > +    QLIST_ENTRY(ListElement) list;
> > +    GlfsPreopened saved;
> > +} ListElement;
> > +
> > +static QLIST_HEAD(glfs_list, ListElement) glfs_list;
> > +
> >  static QemuOptsList qemu_gluster_create_opts = {
> >      .name = "qemu-gluster-create-opts",
> >      .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
> > @@ -182,6 +195,57 @@ static QemuOptsList runtime_tcp_opts = {
> >      },
> >  };
> >  
> > +static void glfs_set_preopened(const char *volume, glfs_t *fs)
> > +{
> > +    ListElement *entry = NULL;
> > +
> > +    entry = g_new(ListElement, 1);
> > +
> > +    entry->saved.volume = g_strdup(volume);
> > +
> > +    entry->saved.fs = fs;
> > +    entry->saved.ref = 1;
> > +
> > +    QLIST_INSERT_HEAD(&glfs_list, entry, list);
> > +}
> > +
> > +static glfs_t *glfs_find_preopened(const char *volume)
> > +{
> > +    ListElement *entry = NULL;
> > +
> > +     QLIST_FOREACH(entry, &glfs_list, list) {
> 
> Indention seems off a space.
> 
> Does QLIST_FOREACH do locking? If not, it looks possible that
> glfs_find_preopened() and glfs_clear_preopened() race for accessing
> 'entry' and it may get g_free()'d before this dereference. Maybe the
> block layer prevents this from happening?
> 
> If others confirm that there is no explicit locking needed here, you can
> add my Reviewed-by line.
>

There should be no issue with locking - it will all be running in the same
thread.

I'll add your R-b when I pull it in, and adding my own:

Reviewed-by: Jeff Cody <address@hidden>

> 
> 
> > +        if (strcmp(entry->saved.volume, volume) == 0) {
> > +            entry->saved.ref++;
> > +            return entry->saved.fs;
> > +        }
> > +     }
> > +
> > +    return NULL;
> > +}
> > +
> > +static void glfs_clear_preopened(glfs_t *fs)
> > +{
> > +    ListElement *entry = NULL;
> > +
> > +    if (fs == NULL) {
> > +        return;
> > +    }
> > +
> > +    QLIST_FOREACH(entry, &glfs_list, list) {
> > +        if (entry->saved.fs == fs) {
> > +            if (--entry->saved.ref) {
> > +                return;
> > +            }
> > +
> > +            QLIST_REMOVE(entry, list);
> > +
> > +            glfs_fini(entry->saved.fs);
> > +            g_free(entry->saved.volume);
> > +            g_free(entry);
> > +        }
> > +    }
> > +}
> > +
> >  static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
> >  {
> >      char *p, *q;
> > @@ -319,11 +383,18 @@ static struct glfs 
> > *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
> >      int old_errno;
> >      GlusterServerList *server;
> >  
> > +    glfs = glfs_find_preopened(gconf->volume);
> > +    if (glfs) {
> > +        return glfs;
> > +    }
> > +
> >      glfs = glfs_new(gconf->volume);
> >      if (!glfs) {
> >          goto out;
> >      }
> >  
> > +    glfs_set_preopened(gconf->volume, glfs);
> > +
> >      for (server = gconf->server; server; server = server->next) {
> >          if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
> >              ret = glfs_set_volfile_server(glfs,
> > @@ -375,7 +446,7 @@ static struct glfs 
> > *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
> >  out:
> >      if (glfs) {
> >          old_errno = errno;
> > -        glfs_fini(glfs);
> > +        glfs_clear_preopened(glfs);
> >          errno = old_errno;
> >      }
> >      return NULL;
> > @@ -741,9 +812,9 @@ out:
> >      if (s->fd) {
> >          glfs_close(s->fd);
> >      }
> > -    if (s->glfs) {
> > -        glfs_fini(s->glfs);
> > -    }
> > +
> > +    glfs_clear_preopened(s->glfs);
> > +
> >      return ret;
> >  }
> >  
> > @@ -808,9 +879,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState 
> > *state)
> >      if (s->fd) {
> >          glfs_close(s->fd);
> >      }
> > -    if (s->glfs) {
> > -        glfs_fini(s->glfs);
> > -    }
> > +
> > +    glfs_clear_preopened(s->glfs);
> >  
> >      /* use the newly opened image / connection */
> >      s->fd         = reop_s->fd;
> > @@ -835,9 +905,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
> > *state)
> >          glfs_close(reop_s->fd);
> >      }
> >  
> > -    if (reop_s->glfs) {
> > -        glfs_fini(reop_s->glfs);
> > -    }
> > +    glfs_clear_preopened(reop_s->glfs);
> >  
> >      g_free(state->opaque);
> >      state->opaque = NULL;
> > @@ -955,9 +1023,7 @@ static int qemu_gluster_create(const char *filename,
> >  out:
> >      g_free(tmp);
> >      qapi_free_BlockdevOptionsGluster(gconf);
> > -    if (glfs) {
> > -        glfs_fini(glfs);
> > -    }
> > +    glfs_clear_preopened(glfs);
> >      return ret;
> >  }
> >  
> > @@ -1029,7 +1095,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
> >          glfs_close(s->fd);
> >          s->fd = NULL;
> >      }
> > -    glfs_fini(s->glfs);
> > +    glfs_clear_preopened(s->glfs);
> >  }
> >  
> >  static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
> > -- 
> > 2.7.4
> > 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]