qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 3/3] hw/9pfs: use g_strdup_printf() instead of P


From: Aneesh Kumar K.V
Subject: Re: [Qemu-devel] [PATCH 3/3] hw/9pfs: use g_strdup_printf() instead of PATH_MAX limitation
Date: Tue, 04 Mar 2014 00:59:45 +0530
User-agent: Notmuch/0.17+7~gc734dd75344e (http://notmuchmail.org) Emacs/24.3.1 (x86_64-pc-linux-gnu)

"Aneesh Kumar K.V" <address@hidden> writes:

> Chen Gang <address@hidden> writes:
> Can we keep this as 
>       v9fs_co_run_in_worker(
>         {
>                       
>                buf->data = __readlink(&s->ctx, path);
>
>         }
>
> I can do that change for you if you want. I will also have to go through
> the rest of the code to make sure we do free the memory in all error
> path. So the rest of the review is going to take time. Hope that is ok
>

I ended up with the below diff on top. The resulting tree is pushed to

https://github.com/kvaneesh/qemu/commits/for-upstream
and pass all the tuxera tests. Let me know what you think.

diff --git a/hw/9pfs/cofs.c b/hw/9pfs/cofs.c
index 739bad019e3a..42ee614e27f0 100644
--- a/hw/9pfs/cofs.c
+++ b/hw/9pfs/cofs.c
@@ -17,42 +17,55 @@
 #include "block/coroutine.h"
 #include "virtio-9p-coth.h"
 
+static ssize_t __readlink(V9fsState *s, V9fsPath *path, V9fsString *buf)
+{
+    ssize_t len, maxlen = PATH_MAX;
+
+    buf->data = g_malloc(PATH_MAX);
+    for(;;) {
+        len = s->ops->readlink(&s->ctx, path, buf->data, maxlen);
+        if (len < 0) {
+            g_free(buf->data);
+            buf->data = NULL;
+            buf->size = 0;
+            break;
+        } else if (len == maxlen) {
+            /*
+             * We dodn't have space to put the NULL or we have more
+             * to read. Increase the size and try again
+             */
+            maxlen *= 2;
+            g_free(buf->data);
+            buf->data = g_malloc(maxlen);
+            continue;
+        }
+        /*
+         * Null terminate the readlink output
+         */
+        buf->data[len] = '\0';
+        buf->size = len;
+        break;
+    }
+    return len;
+}
+
 int v9fs_co_readlink(V9fsPDU *pdu, V9fsPath *path, V9fsString *buf)
 {
     int err;
-    ssize_t len, maxlen = PATH_MAX;
     V9fsState *s = pdu->s;
 
     if (v9fs_request_cancelled(pdu)) {
         return -EINTR;
     }
-    buf->data = g_malloc(maxlen);
     v9fs_path_read_lock(s);
     v9fs_co_run_in_worker(
-        while (1) {
-            len = s->ops->readlink(&s->ctx, path,
-                                   buf->data, maxlen - 1);
-            if (len == maxlen - 1) {
-                g_free(buf->data);
-                maxlen *= 2;
-                buf->data = g_malloc(maxlen);
-                continue;
-            }
-            if (len > -1) {
-                buf->size = len;
-                buf->data[len] = 0;
-                err = 0;
-            } else {
+        {
+            err = __readlink(s, path, buf);
+            if (err < 0) {
                 err = -errno;
             }
-            break;
         });
     v9fs_path_unlock(s);
-    if (err) {
-        g_free(buf->data);
-        buf->data = NULL;
-        buf->size = 0;
-    }
     return err;
 }
 
diff --git a/hw/9pfs/virtio-9p-local.c b/hw/9pfs/virtio-9p-local.c
index 9e9cc319ec54..56b302c122b6 100644
--- a/hw/9pfs/virtio-9p-local.c
+++ b/hw/9pfs/virtio-9p-local.c
@@ -125,8 +125,7 @@ static int local_lstat(FsContext *fs_ctx, V9fsPath 
*fs_path, struct stat *stbuf)
     buffer = rpath(fs_ctx, path);
     err =  lstat(buffer, stbuf);
     if (err) {
-        g_free(buffer);
-        return err;
+        goto err_out;
     }
     if (fs_ctx->export_flags & V9FS_SM_MAPPED) {
         /* Actual credentials are part of extended attrs */
@@ -151,6 +150,7 @@ static int local_lstat(FsContext *fs_ctx, V9fsPath 
*fs_path, struct stat *stbuf)
         local_mapped_file_attr(fs_ctx, path, stbuf);
     }
 
+err_out:
     g_free(buffer);
     return err;
 }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]