qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v3 4/4] dma-helpers: Fix race condition of continue_


From: Fam Zheng
Subject: [Qemu-devel] [PATCH v3 4/4] dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel
Date: Mon, 16 Mar 2015 13:31:50 +0800

If DMA's owning thread cancels the IO while the bounce buffer's owning thread
is notifying the "cpu client list", a use-after-free happens:

     continue_after_map_failure               dma_aio_cancel
     ------------------------------------------------------------------
     aio_bh_new
                                              qemu_bh_delete
     qemu_bh_schedule (use after free)

Also, the old code doesn't run the bh in the right AioContext.

Fix both problems by passing a QEMUBH to cpu_register_map_client.

Signed-off-by: Fam Zheng <address@hidden>
---
 dma-helpers.c             | 17 ++++++++---------
 exec.c                    | 32 +++++++++++++++++++++-----------
 include/exec/cpu-common.h |  3 ++-
 3 files changed, 31 insertions(+), 21 deletions(-)

diff --git a/dma-helpers.c b/dma-helpers.c
index 6918572..1fddf6a 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque)
     dma_blk_cb(dbs, 0);
 }
 
-static void continue_after_map_failure(void *opaque)
-{
-    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
-
-    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
-    qemu_bh_schedule(dbs->bh);
-}
-
 static void dma_blk_unmap(DMAAIOCB *dbs)
 {
     int i;
@@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret)
 
     if (dbs->iov.size == 0) {
         trace_dma_map_wait(dbs);
-        cpu_register_map_client(dbs, continue_after_map_failure);
+        dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
+                             reschedule_dma, dbs);
+        cpu_register_map_client(dbs->bh);
         return;
     }
 
@@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb)
     if (dbs->acb) {
         blk_aio_cancel_async(dbs->acb);
     }
+    if (dbs->bh) {
+        cpu_unregister_map_client(dbs->bh);
+        qemu_bh_delete(dbs->bh);
+        dbs->bh = NULL;
+    }
 }
 
 
diff --git a/exec.c b/exec.c
index 20381a0..b15ca5e 100644
--- a/exec.c
+++ b/exec.c
@@ -2480,8 +2480,7 @@ typedef struct {
 static BounceBuffer bounce;
 
 typedef struct MapClient {
-    void *opaque;
-    void (*callback)(void *opaque);
+    QEMUBH *bh;
     QLIST_ENTRY(MapClient) link;
 } MapClient;
 
@@ -2489,30 +2488,29 @@ QemuMutex map_client_list_lock;
 static QLIST_HEAD(map_client_list, MapClient) map_client_list
     = QLIST_HEAD_INITIALIZER(map_client_list);
 
+static void cpu_unregister_map_client_do(MapClient *client);
 static void cpu_notify_map_clients_unlocked(void)
 {
     MapClient *client;
 
     while (!QLIST_EMPTY(&map_client_list)) {
         client = QLIST_FIRST(&map_client_list);
-        client->callback(client->opaque);
-        cpu_unregister_map_client(client);
+        qemu_bh_schedule(client->bh);
+        cpu_unregister_map_client_do(client);
     }
 }
 
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
+void cpu_register_map_client(QEMUBH *bh)
 {
     MapClient *client = g_malloc(sizeof(*client));
 
     qemu_mutex_lock(&map_client_list_lock);
-    client->opaque = opaque;
-    client->callback = callback;
+    client->bh = bh;
     QLIST_INSERT_HEAD(&map_client_list, client, link);
     if (!atomic_read(&bounce.in_use)) {
         cpu_notify_map_clients_unlocked();
     }
     qemu_mutex_unlock(&map_client_list_lock);
-    return client;
 }
 
 void cpu_exec_init_all(void)
@@ -2525,14 +2523,26 @@ void cpu_exec_init_all(void)
     qemu_mutex_init(&map_client_list_lock);
 }
 
-static void cpu_unregister_map_client(void *_client)
+static void cpu_unregister_map_client_do(MapClient *client)
 {
-    MapClient *client = (MapClient *)_client;
-
     QLIST_REMOVE(client, link);
     g_free(client);
 }
 
+void cpu_unregister_map_client(QEMUBH *bh)
+{
+    MapClient *client;
+
+    qemu_mutex_lock(&map_client_list_lock);
+    QLIST_FOREACH(client, &map_client_list, link) {
+        if (client->bh == bh) {
+            cpu_unregister_map_client_do(client);
+            break;
+        }
+    }
+    qemu_mutex_unlock(&map_client_list_lock);
+}
+
 static void cpu_notify_map_clients(void)
 {
     qemu_mutex_lock(&map_client_list_lock);
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index fcc3162..43428bd 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr,
                               int is_write);
 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
                                int is_write, hwaddr access_len);
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
+void cpu_register_map_client(QEMUBH *bh);
+void cpu_unregister_map_client(QEMUBH *bh);
 
 bool cpu_physical_memory_is_io(hwaddr phys_addr);
 
-- 
1.9.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]