qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 4/7] aio: remove process_queue callback and qemu_aio


From: Paolo Bonzini
Subject: [Qemu-devel] [PATCH 4/7] aio: remove process_queue callback and qemu_aio_process_queue
Date: Mon, 12 Mar 2012 19:22:25 +0100

Both unused after the previous patch.

Signed-off-by: Paolo Bonzini <address@hidden>
---
 aio.c              |   29 ++---------------------------
 block/curl.c       |   10 ++++------
 block/iscsi.c      |    4 ++--
 block/nbd.c        |    8 ++++----
 block/rbd.c        |    5 ++---
 block/sheepdog.c   |   11 +++++------
 linux-aio.c        |    2 +-
 posix-aio-compat.c |    3 +--
 qemu-aio.h         |   13 -------------
 9 files changed, 21 insertions(+), 64 deletions(-)

diff --git a/aio.c b/aio.c
index eb3bf42..f19b3c6 100644
--- a/aio.c
+++ b/aio.c
@@ -35,7 +35,6 @@ struct AioHandler
     IOHandler *io_read;
     IOHandler *io_write;
     AioFlushHandler *io_flush;
-    AioProcessQueue *io_process_queue;
     int deleted;
     void *opaque;
     QLIST_ENTRY(AioHandler) node;
@@ -58,7 +57,6 @@ int qemu_aio_set_fd_handler(int fd,
                             IOHandler *io_read,
                             IOHandler *io_write,
                             AioFlushHandler *io_flush,
-                            AioProcessQueue *io_process_queue,
                             void *opaque)
 {
     AioHandler *node;
@@ -91,7 +89,6 @@ int qemu_aio_set_fd_handler(int fd,
         node->io_read = io_read;
         node->io_write = io_write;
         node->io_flush = io_flush;
-        node->io_process_queue = io_process_queue;
         node->opaque = opaque;
     }
 
@@ -122,39 +119,17 @@ void qemu_aio_flush(void)
     } while (qemu_bh_poll() || ret > 0);
 }
 
-int qemu_aio_process_queue(void)
-{
-    AioHandler *node;
-    int ret = 0;
-
-    walking_handlers = 1;
-
-    QLIST_FOREACH(node, &aio_handlers, node) {
-        if (node->io_process_queue) {
-            if (node->io_process_queue(node->opaque)) {
-                ret = 1;
-            }
-        }
-    }
-
-    walking_handlers = 0;
-
-    return ret;
-}
-
 void qemu_aio_wait(void)
 {
     int ret;
 
-    if (qemu_bh_poll())
-        return;
-
     /*
      * If there are callbacks left that have been queued, we need to call then.
      * Return afterwards to avoid waiting needlessly in select().
      */
-    if (qemu_aio_process_queue())
+    if (qemu_bh_poll()) {
         return;
+    }
 
     do {
         AioHandler *node;
diff --git a/block/curl.c b/block/curl.c
index e9102e3..c5ff89f 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -89,19 +89,17 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int 
action,
     DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
     switch (action) {
         case CURL_POLL_IN:
-            qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, curl_aio_flush,
-                                    NULL, s);
+            qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, curl_aio_flush, 
s);
             break;
         case CURL_POLL_OUT:
-            qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, curl_aio_flush,
-                                    NULL, s);
+            qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, curl_aio_flush, 
s);
             break;
         case CURL_POLL_INOUT:
             qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do,
-                                    curl_aio_flush, NULL, s);
+                                    curl_aio_flush, s);
             break;
         case CURL_POLL_REMOVE:
-            qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL, NULL);
+            qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL);
             break;
     }
 
diff --git a/block/iscsi.c b/block/iscsi.c
index bd3ca11..5222726 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -108,7 +108,7 @@ iscsi_set_events(IscsiLun *iscsilun)
     qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), iscsi_process_read,
                            (iscsi_which_events(iscsi) & POLLOUT)
                            ? iscsi_process_write : NULL,
-                           iscsi_process_flush, NULL, iscsilun);
+                           iscsi_process_flush, iscsilun);
 }
 
 static void
@@ -682,7 +682,7 @@ static void iscsi_close(BlockDriverState *bs)
     IscsiLun *iscsilun = bs->opaque;
     struct iscsi_context *iscsi = iscsilun->iscsi;
 
-    qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL, NULL);
+    qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL);
     iscsi_destroy_context(iscsi);
     memset(iscsilun, 0, sizeof(IscsiLun));
 }
diff --git a/block/nbd.c b/block/nbd.c
index 161b299..524c9cf 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -191,7 +191,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct 
nbd_request *request,
     qemu_co_mutex_lock(&s->send_mutex);
     s->send_coroutine = qemu_coroutine_self();
     qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write,
-                            nbd_have_request, NULL, s);
+                            nbd_have_request, s);
     rc = nbd_send_request(s->sock, request);
     if (rc != -1 && iov) {
         ret = qemu_co_sendv(s->sock, iov, request->len, offset);
@@ -201,7 +201,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct 
nbd_request *request,
         }
     }
     qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL,
-                            nbd_have_request, NULL, s);
+                            nbd_have_request, s);
     s->send_coroutine = NULL;
     qemu_co_mutex_unlock(&s->send_mutex);
     return rc;
@@ -274,7 +274,7 @@ static int nbd_establish_connection(BlockDriverState *bs)
      * kick the reply mechanism.  */
     socket_set_nonblock(sock);
     qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL,
-                            nbd_have_request, NULL, s);
+                            nbd_have_request, s);
 
     s->sock = sock;
     s->size = size;
@@ -294,7 +294,7 @@ static void nbd_teardown_connection(BlockDriverState *bs)
     request.len = 0;
     nbd_send_request(s->sock, &request);
 
-    qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL, NULL);
+    qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL);
     closesocket(s->sock);
 }
 
diff --git a/block/rbd.c b/block/rbd.c
index 46a8579..6cd8448 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -504,7 +504,7 @@ static int qemu_rbd_open(BlockDriverState *bs, const char 
*filename, int flags)
     fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
     fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
     qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
-                            NULL, qemu_rbd_aio_flush_cb, NULL, s);
+                            NULL, qemu_rbd_aio_flush_cb, s);
 
 
     return 0;
@@ -525,8 +525,7 @@ static void qemu_rbd_close(BlockDriverState *bs)
 
     close(s->fds[0]);
     close(s->fds[1]);
-    qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL , NULL, NULL, NULL,
-        NULL);
+    qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL);
 
     rbd_close(s->image);
     rados_ioctx_destroy(s->io_ctx);
diff --git a/block/sheepdog.c b/block/sheepdog.c
index 00276f6f..60091c0 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -742,8 +742,7 @@ static int get_sheep_fd(BDRVSheepdogState *s)
         return -1;
     }
 
-    qemu_aio_set_fd_handler(fd, co_read_response, NULL, aio_flush_request,
-                            NULL, s);
+    qemu_aio_set_fd_handler(fd, co_read_response, NULL, aio_flush_request, s);
     return fd;
 }
 
@@ -912,7 +911,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState 
*s, AIOReq *aio_req,
     qemu_co_mutex_lock(&s->lock);
     s->co_send = qemu_coroutine_self();
     qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request,
-                            aio_flush_request, NULL, s);
+                            aio_flush_request, s);
     socket_set_cork(s->fd, 1);
 
     /* send a header */
@@ -934,7 +933,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState 
*s, AIOReq *aio_req,
 
     socket_set_cork(s->fd, 0);
     qemu_aio_set_fd_handler(s->fd, co_read_response, NULL,
-                            aio_flush_request, NULL, s);
+                            aio_flush_request, s);
     qemu_co_mutex_unlock(&s->lock);
 
     return 0;
@@ -1056,7 +1055,7 @@ static int sd_open(BlockDriverState *bs, const char 
*filename, int flags)
     g_free(buf);
     return 0;
 out:
-    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL, NULL);
+    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL);
     if (s->fd >= 0) {
         closesocket(s->fd);
     }
@@ -1270,7 +1269,7 @@ static void sd_close(BlockDriverState *bs)
         error_report("%s, %s", sd_strerror(rsp->result), s->name);
     }
 
-    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL, NULL);
+    qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL);
     closesocket(s->fd);
     g_free(s->addr);
 }
diff --git a/linux-aio.c b/linux-aio.c
index 15261ec..fa0fbf3 100644
--- a/linux-aio.c
+++ b/linux-aio.c
@@ -214,7 +214,7 @@ void *laio_init(void)
         goto out_close_efd;
 
     qemu_aio_set_fd_handler(s->efd, qemu_laio_completion_cb, NULL,
-        qemu_laio_flush_cb, NULL, s);
+        qemu_laio_flush_cb, s);
 
     return s;
 
diff --git a/posix-aio-compat.c b/posix-aio-compat.c
index 1066c60..68361f5 100644
--- a/posix-aio-compat.c
+++ b/posix-aio-compat.c
@@ -663,8 +663,7 @@ int paio_init(void)
     fcntl(s->rfd, F_SETFL, O_NONBLOCK);
     fcntl(s->wfd, F_SETFL, O_NONBLOCK);
 
-    qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush,
-        NULL, s);
+    qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, s);
 
     ret = pthread_attr_init(&attr);
     if (ret)
diff --git a/qemu-aio.h b/qemu-aio.h
index 230c2f7..0fc8409 100644
--- a/qemu-aio.h
+++ b/qemu-aio.h
@@ -41,11 +41,6 @@ void qemu_aio_release(void *p);
 /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
 typedef int (AioFlushHandler)(void *opaque);
 
-/* Runs all currently allowed AIO callbacks of completed requests in the
- * respective AIO backend. Returns 0 if no requests was handled, non-zero
- * if at least one queued request was handled. */
-typedef int (AioProcessQueue)(void *opaque);
-
 /* Flush any pending AIO operation. This function will block until all
  * outstanding AIO operations have been completed or cancelled. */
 void qemu_aio_flush(void);
@@ -56,13 +51,6 @@ void qemu_aio_flush(void);
  * result of executing I/O completion or bh callbacks. */
 void qemu_aio_wait(void);
 
-/*
- * Runs all currently allowed AIO callbacks of completed requests. Returns 0
- * if no requests were handled, non-zero if at least one request was
- * processed.
- */
-int qemu_aio_process_queue(void);
-
 /* Register a file descriptor and associated callbacks.  Behaves very similarly
  * to qemu_set_fd_handler2.  Unlike qemu_set_fd_handler2, these callbacks will
  * be invoked when using either qemu_aio_wait() or qemu_aio_flush().
@@ -74,7 +62,6 @@ int qemu_aio_set_fd_handler(int fd,
                             IOHandler *io_read,
                             IOHandler *io_write,
                             AioFlushHandler *io_flush,
-                            AioProcessQueue *io_process_queue,
                             void *opaque);
 
 #endif
-- 
1.7.7.6





reply via email to

[Prev in Thread] Current Thread [Next in Thread]