qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 09/21] Introduce event-tap.


From: Yoshiaki Tamura
Subject: [Qemu-devel] [PATCH 09/21] Introduce event-tap.
Date: Thu, 25 Nov 2010 15:06:48 +0900

event-tap controls when to start FT transaction, and provides proxy
functions to called from net/block devices.  While FT transaction, it
queues up net/block requests, and flush them when the transaction gets
completed.

Signed-off-by: Yoshiaki Tamura <address@hidden>
Signed-off-by: OHMURA Kei <address@hidden>
---
 Makefile.target |    1 +
 block.h         |    9 +
 event-tap.c     |  794 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 event-tap.h     |   34 +++
 net.h           |    4 +
 net/queue.c     |    1 +
 6 files changed, 843 insertions(+), 0 deletions(-)
 create mode 100644 event-tap.c
 create mode 100644 event-tap.h

diff --git a/Makefile.target b/Makefile.target
index 2800f47..3922d79 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -197,6 +197,7 @@ obj-y += rwhandler.o
 obj-$(CONFIG_KVM) += kvm.o kvm-all.o
 obj-$(CONFIG_NO_KVM) += kvm-stub.o
 LIBS+=-lz
+obj-y += event-tap.o
 
 QEMU_CFLAGS += $(VNC_TLS_CFLAGS)
 QEMU_CFLAGS += $(VNC_SASL_CFLAGS)
diff --git a/block.h b/block.h
index 78ecfac..0f07617 100644
--- a/block.h
+++ b/block.h
@@ -116,6 +116,12 @@ BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, 
int64_t sector_num,
 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
                                   QEMUIOVector *iov, int nb_sectors,
                                   BlockDriverCompletionFunc *cb, void *opaque);
+
+BlockDriverAIOCB *bdrv_aio_writev_proxy(BlockDriverState *bs,
+                                        int64_t sector_num, QEMUIOVector *iov,
+                                        int nb_sectors,
+                                        BlockDriverCompletionFunc *cb,
+                                        void *opaque);
 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
                                 BlockDriverCompletionFunc *cb, void *opaque);
 void bdrv_aio_cancel(BlockDriverAIOCB *acb);
@@ -134,6 +140,9 @@ typedef struct BlockRequest {
 
 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs,
     int num_reqs);
+int bdrv_aio_multiwrite_proxy(BlockDriverState *bs, BlockRequest *reqs,
+                              int num_reqs);
+
 
 /* sg packet commands */
 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf);
diff --git a/event-tap.c b/event-tap.c
new file mode 100644
index 0000000..cf7a38a
--- /dev/null
+++ b/event-tap.c
@@ -0,0 +1,794 @@
+/*
+ * Event Tap functions for QEMU
+ *
+ * Copyright (c) 2010 Nippon Telegraph and Telephone Corporation. 
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu-common.h"
+#include "block.h"
+#include "block_int.h"
+#include "ioport.h"
+#include "osdep.h"
+#include "sysemu.h"
+#include "hw/hw.h"
+#include "net.h"
+#include "event-tap.h"
+
+// #define DEBUG_EVENT_TAP
+
+#ifdef DEBUG_EVENT_TAP
+#define DPRINTF(fmt, ...) \
+    do { printf("event-tap: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+    do { } while (0)
+#endif
+
+static enum EVENT_TAP_STATE event_tap_state = EVENT_TAP_OFF;
+static BlockDriverAIOCB dummy_acb; /* we may need a pool for dummies */
+
+typedef struct EventTapIOport {
+    uint32_t address;
+    uint32_t data;    
+    int      index;
+} EventTapIOport;
+
+#define MMIO_BUF_SIZE 8
+
+typedef struct EventTapMMIO {
+    uint64_t address;
+    uint8_t  buf[MMIO_BUF_SIZE];
+    int      len;
+} EventTapMMIO;
+
+typedef struct EventTapNetReq {
+    char *device_name;
+    int iovcnt;
+    struct iovec *iov;
+    int vlan_id;
+    bool vlan_needed;
+    bool async;
+} EventTapNetReq;
+
+#define MAX_BLOCK_REQUEST 32
+
+typedef struct EventTapBlkReq {
+    char *device_name;
+    int num_reqs;
+    int num_cbs;
+    bool is_multiwrite;
+    BlockRequest reqs[MAX_BLOCK_REQUEST];
+    BlockDriverCompletionFunc *cb[MAX_BLOCK_REQUEST];
+    void *opaque[MAX_BLOCK_REQUEST];
+} EventTapBlkReq;
+
+#define EVENT_TAP_IOPORT (1 << 0)
+#define EVENT_TAP_MMIO   (1 << 1)
+#define EVENT_TAP_NET    (1 << 2)
+#define EVENT_TAP_BLK    (1 << 3)
+
+#define EVENT_TAP_TYPE_MASK (EVENT_TAP_NET - 1)
+
+typedef struct EventTapLog {
+    int mode;
+    union {
+        EventTapIOport ioport ;    
+        EventTapMMIO mmio;
+    };
+    union {
+        EventTapNetReq net_req;
+        EventTapBlkReq blk_req;
+    };
+    QTAILQ_ENTRY(EventTapLog) node;
+} EventTapLog;
+
+static EventTapLog *last_event_tap;
+
+static QTAILQ_HEAD(, EventTapLog) event_list;
+static QTAILQ_HEAD(, EventTapLog) event_pool;
+
+static int (*event_tap_cb)(void);
+static QEMUBH *event_tap_bh;
+static VMChangeStateEntry *vmstate;
+
+static void event_tap_bh_cb(void *p)
+{
+    event_tap_cb();
+    qemu_bh_delete(event_tap_bh);
+    event_tap_bh = NULL;
+}
+
+static int event_tap_schedule_bh(void)
+{
+    /* if bh is already set, we ignore it for now */
+    if (event_tap_bh) {
+        DPRINTF("event_tap_bh is already scheduled\n");
+        return 0;
+    }
+
+    event_tap_bh = qemu_bh_new(event_tap_bh_cb, NULL);
+    qemu_bh_schedule(event_tap_bh);
+
+    return 0;
+}
+
+static int event_tap_alloc_net_req(EventTapNetReq *net_req, 
+                                   VLANClientState *vc,
+                                   const struct iovec *iov, int iovcnt,
+                                   NetPacketSent *sent_cb, bool async)
+{
+    int i, ret = 0;
+
+    net_req->iovcnt = iovcnt;
+    net_req->async = async;
+    net_req->device_name = qemu_strdup(vc->name);
+
+    if (vc->vlan) {
+        net_req->vlan_needed = 1;
+        net_req->vlan_id = vc->vlan->id;
+    } else {
+        net_req->vlan_needed = 0;
+    }
+
+    net_req->iov = qemu_malloc(sizeof(struct iovec) * iovcnt);
+
+    for (i = 0; i < iovcnt; i++) {
+        net_req->iov[i].iov_base = qemu_malloc(iov[i].iov_len);
+        memcpy(net_req->iov[i].iov_base, iov[i].iov_base, iov[i].iov_len);
+        net_req->iov[i].iov_len = iov[i].iov_len;
+        ret += iov[i].iov_len;
+    }
+
+    return ret;
+}
+
+static void event_tap_alloc_blk_req(EventTapBlkReq *blk_req,
+                                    BlockDriverState *bs, BlockRequest *reqs,
+                                    int num_reqs, BlockDriverCompletionFunc 
*cb,
+                                    void *opaque, bool is_multiwrite)
+{
+    int i;
+
+    blk_req->num_reqs = num_reqs;
+    blk_req->num_cbs = num_reqs;
+    blk_req->device_name = qemu_strdup(bs->device_name);
+    blk_req->is_multiwrite = is_multiwrite;
+
+    for (i = 0; i < num_reqs; i++) {
+        blk_req->reqs[i].sector = reqs[i].sector;
+        blk_req->reqs[i].nb_sectors = reqs[i].nb_sectors;
+        blk_req->reqs[i].qiov = reqs[i].qiov;
+        blk_req->reqs[i].cb = cb;
+        blk_req->reqs[i].opaque = opaque;
+        blk_req->cb[i] = reqs[i].cb;
+        blk_req->opaque[i] = reqs[i].opaque;
+    }    
+}                                   
+
+static void *event_tap_alloc_log(void)
+{
+    EventTapLog *log;
+
+    if (QTAILQ_EMPTY(&event_pool)) {
+        log = qemu_mallocz(sizeof(EventTapLog));
+    } else {
+        log = QTAILQ_FIRST(&event_pool);
+        QTAILQ_REMOVE(&event_pool, log, node);
+    }
+
+    return log;
+}
+
+static void event_tap_free_log(EventTapLog *log)
+{
+    int i, mode = log->mode & ~EVENT_TAP_TYPE_MASK;
+
+    if (mode == EVENT_TAP_NET) {
+        EventTapNetReq *net_req = &log->net_req;
+        for (i = 0; i < net_req->iovcnt; i++) {
+            qemu_free(net_req->iov[i].iov_base);
+        }
+        qemu_free(net_req->iov);
+        qemu_free(net_req->device_name);
+    } else if (mode == EVENT_TAP_BLK) {
+        EventTapBlkReq *blk_req = &log->blk_req;
+
+        if (event_tap_state >= EVENT_TAP_LOAD) {
+            for (i = 0; i < blk_req->num_reqs; i++) {
+                qemu_free(blk_req->reqs[i].qiov->iov);
+                qemu_free(blk_req->reqs[i].qiov);
+            }
+        }
+        qemu_free(blk_req->device_name);
+    }
+
+    log->mode = 0;
+
+    /* return the log to event_pool */
+    QTAILQ_INSERT_HEAD(&event_pool, log, node);
+}
+
+static void event_tap_free_pool(void)
+{
+    EventTapLog *log, *next;
+
+    QTAILQ_FOREACH_SAFE(log, &event_pool, node, next) {
+        QTAILQ_REMOVE(&event_pool, log, node);
+        qemu_free(log);
+    }
+}
+
+/* This func is called by qemu_net_queue_flush() when a packet is appended */
+static void event_tap_net_cb(VLANClientState *vc, ssize_t len)
+{
+    DPRINTF("%s: %zd bytes packet was sended\n", vc->name, len);
+}
+
+static void event_tap_blk_cb(void *opaque, int ret)
+{
+    EventTapLog *log = container_of(opaque, EventTapLog, blk_req);
+    EventTapBlkReq *blk_req = opaque;
+    int i;
+
+    blk_req->num_cbs--;
+    if (blk_req->num_cbs == 0) {
+        /* all outstanding requests are flushed */
+        for (i = 0; i < blk_req->num_reqs; i++) {
+            blk_req->cb[i](blk_req->opaque[i], ret);
+        }
+        event_tap_free_log(log);
+    }
+}
+
+static int net_event_tap(VLANClientState *vc, const struct iovec *iov,
+                         int iovcnt, NetPacketSent *sent_cb, bool async)
+{
+    int ret = 0, empty;
+    EventTapLog *log = last_event_tap;
+
+    if (!log) {
+        DPRINTF("no last_event_tap\n");
+        log = event_tap_alloc_log();
+    }
+
+    if (log->mode & ~EVENT_TAP_TYPE_MASK) {
+        DPRINTF("last_event_tap already used %d\n",
+                log->mode & ~EVENT_TAP_TYPE_MASK);
+        return ret;
+    }
+
+    log->mode |= EVENT_TAP_NET;
+    ret = event_tap_alloc_net_req(&log->net_req, vc, iov, iovcnt, sent_cb,
+                                  async);
+
+    empty = QTAILQ_EMPTY(&event_list); 
+    QTAILQ_INSERT_TAIL(&event_list, log, node);
+    last_event_tap = NULL;
+
+    if (empty) {
+        event_tap_schedule_bh();
+    }
+
+    return ret;
+}
+
+static void bdrv_event_tap(BlockDriverState *bs, BlockRequest *reqs,
+                           int num_reqs, bool is_multiwrite)
+{
+    EventTapLog *log = last_event_tap;
+    int empty;
+
+    if (!log) {
+        DPRINTF("no last_event_tap\n");
+        log = event_tap_alloc_log();
+    }
+    if (log->mode & ~EVENT_TAP_TYPE_MASK) {
+        DPRINTF("last_event_tap already used\n");
+        return;
+    }
+
+    log->mode |= EVENT_TAP_BLK;
+    event_tap_alloc_blk_req(&log->blk_req, bs, reqs, num_reqs, 
event_tap_blk_cb,
+                            &log->blk_req, is_multiwrite);
+
+    empty = QTAILQ_EMPTY(&event_list); 
+    QTAILQ_INSERT_TAIL(&event_list, log, node);
+    last_event_tap = NULL;
+
+    if (empty) {
+        event_tap_schedule_bh();
+    }
+}
+
+BlockDriverAIOCB *bdrv_aio_writev_proxy(BlockDriverState *bs,
+                                        int64_t sector_num,
+                                        QEMUIOVector *iov,
+                                        int nb_sectors,
+                                        BlockDriverCompletionFunc *cb,
+                                        void *opaque)
+{
+    if (event_tap_state == EVENT_TAP_ON) {
+        BlockRequest req;
+
+        req.sector = sector_num;
+        req.nb_sectors = nb_sectors;
+        req.qiov = iov;
+        req.cb = cb;
+        req.opaque = opaque;
+        bdrv_event_tap(bs, &req, 1, 0);
+
+        /* return a dummy_acb pointer to prevent from failing */
+        return &dummy_acb;
+    }
+
+    return bdrv_aio_writev(bs, sector_num, iov, nb_sectors, cb, opaque);
+}
+
+int bdrv_aio_multiwrite_proxy(BlockDriverState *bs, BlockRequest *reqs,
+                              int num_reqs)
+{
+    if (event_tap_state == EVENT_TAP_ON) {
+        bdrv_event_tap(bs, reqs, num_reqs, 1);
+        return 0;
+    }
+
+    return bdrv_aio_multiwrite(bs, reqs, num_reqs);
+}
+
+void qemu_send_packet_proxy(VLANClientState *vc, const uint8_t *buf, int size)
+{
+    if (event_tap_state == EVENT_TAP_ON) {
+        struct iovec iov;
+        iov.iov_base = (uint8_t*)buf;
+        iov.iov_len = size;
+
+        net_event_tap(vc, &iov, 1, NULL, 0);
+        return;
+    }
+
+    return qemu_send_packet(vc, buf, size);
+}
+ssize_t qemu_sendv_packet_async_proxy(VLANClientState *vc,
+                                      const struct iovec *iov,
+                                      int iovcnt, NetPacketSent *sent_cb)
+{
+    if (event_tap_state == EVENT_TAP_ON) {
+        return net_event_tap(vc, iov, iovcnt, sent_cb, 1);
+    }
+
+    return qemu_sendv_packet_async(vc, iov, iovcnt, sent_cb);
+}
+
+int event_tap_register(int (*cb)(void))
+{
+    if (cb == NULL || event_tap_state != EVENT_TAP_OFF)
+        return -1;
+    if (event_tap_cb == NULL)
+        event_tap_cb = cb;
+
+    event_tap_state = EVENT_TAP_ON;
+
+    return 0;
+}
+
+int event_tap_unregister(void)
+{
+    if (event_tap_state == EVENT_TAP_OFF)
+        return -1;
+
+    event_tap_state = EVENT_TAP_OFF;
+    event_tap_cb = NULL;
+
+    event_tap_flush();
+    event_tap_free_pool();
+
+    return 0;
+}
+
+void event_tap_suspend(void)
+{
+    if (event_tap_state == EVENT_TAP_ON) {
+        event_tap_state = EVENT_TAP_SUSPEND;
+    }
+}
+
+void event_tap_resume(void)
+{
+    if (event_tap_state == EVENT_TAP_SUSPEND) {
+        event_tap_state = EVENT_TAP_ON;
+    }
+}
+
+int event_tap_get_state(void)
+{
+    return event_tap_state;
+}
+
+void event_tap_ioport(int index, uint32_t address, uint32_t data)
+{
+    if (event_tap_state != EVENT_TAP_ON) {
+        return;
+    }
+
+    if (!last_event_tap) {
+        last_event_tap = event_tap_alloc_log();
+    }
+
+    last_event_tap->mode = EVENT_TAP_IOPORT;
+    last_event_tap->ioport.index = index;
+    last_event_tap->ioport.address = address;
+    last_event_tap->ioport.data = data;
+}
+
+void event_tap_mmio(uint64_t address, uint8_t *buf, int len)
+{
+    if (event_tap_state != EVENT_TAP_ON || len > MMIO_BUF_SIZE) {
+        return;
+    }
+
+    if (!last_event_tap) {
+        last_event_tap = event_tap_alloc_log();
+    }
+
+    last_event_tap->mode = EVENT_TAP_MMIO;
+    last_event_tap->mmio.address = address;
+    last_event_tap->mmio.len = len;
+    memcpy(last_event_tap->mmio.buf, buf, len);
+}
+
+static void event_tap_net_flush(EventTapNetReq *net_req)
+{
+    VLANClientState *vc;
+    ssize_t len;
+
+    if (net_req->vlan_needed) {
+        vc = qemu_find_vlan_client_by_name(NULL, net_req->vlan_id,
+                                           net_req->device_name);
+    } else {
+        vc = qemu_find_netdev(net_req->device_name);
+    }
+
+    if (net_req->async) {
+        len = qemu_sendv_packet_async(vc, net_req->iov, net_req->iovcnt,
+                                      event_tap_net_cb);
+        if (len == 0) {
+            DPRINTF("This packet is appended\n");
+        }
+    } else {
+        qemu_send_packet(vc, net_req->iov[0].iov_base,
+                         net_req->iov[0].iov_len);
+    }
+}
+
+static void event_tap_blk_flush(EventTapBlkReq *blk_req)
+{
+    BlockDriverState *bs;
+
+    bs = bdrv_find(blk_req->device_name);
+
+    if (blk_req->is_multiwrite) {
+        bdrv_aio_multiwrite(bs, blk_req->reqs, blk_req->num_reqs);
+    } else {
+        bdrv_aio_writev(bs, blk_req->reqs[0].sector, blk_req->reqs[0].qiov,
+                        blk_req->reqs[0].nb_sectors, blk_req->cb[0],
+                        blk_req->opaque[0]);
+    }
+}
+
+/* returns 1 if the queue gets emtpy */
+int event_tap_flush_one(void)
+{
+    EventTapLog *log;
+
+    if (QTAILQ_EMPTY(&event_list)) {
+        return 1;
+    }
+
+    log = QTAILQ_FIRST(&event_list);
+    switch (log->mode & ~EVENT_TAP_TYPE_MASK) {
+    case EVENT_TAP_NET:
+        event_tap_net_flush(&log->net_req);
+        QTAILQ_REMOVE(&event_list, log, node);
+        event_tap_free_log(log);
+        break;
+    case EVENT_TAP_BLK:
+        event_tap_blk_flush(&log->blk_req);
+        QTAILQ_REMOVE(&event_list, log, node);
+        break;
+    default:
+        fprintf(stderr, "Unknown state %d\n", log->mode);
+        return -1;
+    }
+
+    return QTAILQ_EMPTY(&event_list);
+}
+
+void event_tap_flush(void)
+{
+    int ret;
+    do {
+        ret = event_tap_flush_one();
+    } while (ret == 0);
+}
+
+static void event_tap_replay(void *opaque, int running, int reason)
+{
+    EventTapLog *log, *next;
+
+    if (!running) {
+        return;
+    }
+
+    if (event_tap_state != EVENT_TAP_LOAD) {
+        return;
+    }
+
+    event_tap_state = EVENT_TAP_REPLAY;
+
+    QTAILQ_FOREACH(log, &event_list, node) {
+        EventTapBlkReq *blk_req;
+
+        /* event resume */
+        switch (log->mode & ~EVENT_TAP_TYPE_MASK) {
+        case EVENT_TAP_NET:
+            event_tap_net_flush(&log->net_req);
+            break;
+        case EVENT_TAP_BLK:
+            blk_req = &log->blk_req;
+            if ((log->mode & EVENT_TAP_TYPE_MASK) == EVENT_TAP_IOPORT) {
+                switch (log->ioport.index) {
+                case 0:
+                    cpu_outb(log->ioport.address, log->ioport.data);
+                    break;
+                case 1:
+                    cpu_outw(log->ioport.address, log->ioport.data);
+                    break;
+                case 2:
+                    cpu_outl(log->ioport.address, log->ioport.data);
+                    break;
+                }
+            } else {
+                /* EVENT_TAP_MMIO */
+                cpu_physical_memory_rw(log->mmio.address,
+                                       log->mmio.buf,
+                                       log->mmio.len, 1);
+            }
+            break;
+        case 0:
+            DPRINTF("No event\n");
+            break;
+        default:
+            fprintf(stderr, "Unknown state %d\n", log->mode);
+            return;
+        }
+    }
+
+    /* remove event logs from queue */
+    QTAILQ_FOREACH_SAFE(log, &event_list, node, next) {
+        QTAILQ_REMOVE(&event_list, log, node);
+        event_tap_free_log(log);
+    }
+
+    event_tap_state = EVENT_TAP_OFF;
+    qemu_del_vm_change_state_handler(vmstate);
+}
+
+static inline void event_tap_ioport_save(QEMUFile *f, EventTapIOport *ioport)
+{
+    qemu_put_be32(f, ioport->index);
+    qemu_put_be32(f, ioport->address);
+    qemu_put_byte(f, ioport->data);
+}
+
+static inline void event_tap_ioport_load(QEMUFile *f,
+                                         EventTapIOport *ioport)
+{
+    ioport->index = qemu_get_be32(f);
+    ioport->address = qemu_get_be32(f);
+    ioport->data = qemu_get_byte(f);
+}
+
+static inline void event_tap_mmio_save(QEMUFile *f, EventTapMMIO *mmio)
+{
+    qemu_put_be64(f, mmio->address);
+    qemu_put_byte(f, mmio->len);
+    qemu_put_buffer(f, mmio->buf, mmio->len);
+}
+
+static inline void event_tap_mmio_load(QEMUFile *f, EventTapMMIO *mmio)
+{
+    mmio->address = qemu_get_be64(f);
+    mmio->len = qemu_get_byte(f);
+    qemu_get_buffer(f, mmio->buf, mmio->len);
+}
+
+static void event_tap_net_save(QEMUFile *f, EventTapNetReq *net_req)
+{
+    int i, len;
+
+    len = strlen(net_req->device_name);
+    qemu_put_byte(f, len);
+    qemu_put_buffer(f, (uint8_t *)net_req->device_name, len);
+    qemu_put_byte(f, net_req->vlan_id);
+    qemu_put_byte(f, net_req->vlan_needed);
+    qemu_put_byte(f, net_req->iovcnt);
+
+    for (i = 0; i < net_req->iovcnt; i++) {
+        qemu_put_be64(f, net_req->iov[i].iov_len);
+        qemu_put_buffer(f, (uint8_t *)net_req->iov[i].iov_base,
+                        net_req->iov[i].iov_len);
+    }
+}
+
+static void event_tap_net_load(QEMUFile *f, EventTapNetReq *net_req)
+{
+    int i, len;
+
+    len = qemu_get_byte(f);
+    net_req->device_name = qemu_malloc(len + 1);
+    qemu_get_buffer(f, (uint8_t *)net_req->device_name, len);
+    net_req->device_name[len] = '\0';
+    net_req->vlan_id = qemu_get_byte(f);
+    net_req->vlan_needed = qemu_get_byte(f);
+    net_req->iovcnt = qemu_get_byte(f);
+    net_req->iov = qemu_malloc(sizeof(struct iovec) * net_req->iovcnt);
+
+    for (i = 0; i < net_req->iovcnt; i++) {
+        net_req->iov[i].iov_len = qemu_get_be64(f);
+        net_req->iov[i].iov_base = qemu_malloc(net_req->iov[i].iov_len);
+        qemu_get_buffer(f, (uint8_t *)net_req->iov[i].iov_base,
+                        net_req->iov[i].iov_len);
+    }
+}
+
+static void event_tap_blk_save(QEMUFile *f, EventTapBlkReq *blk_req)
+{
+    BlockRequest *req;
+    ram_addr_t page_addr;
+    int i, j, len;
+
+    len = strlen(blk_req->device_name);
+    qemu_put_byte(f, len);
+    qemu_put_buffer(f, (uint8_t *)blk_req->device_name, len);
+    qemu_put_byte(f, blk_req->num_reqs);
+
+    for (i = 0; i < blk_req->num_reqs; i++) {
+        req = &blk_req->reqs[i];
+        qemu_put_be64(f, req->sector);
+        qemu_put_be32(f, req->nb_sectors);
+        qemu_put_byte(f, req->qiov->niov);
+        for (j = 0; j < req->qiov->niov; j++) {
+            page_addr =
+                qemu_ram_addr_from_host_nofail(req->qiov->iov[j].iov_base);
+            qemu_put_be64(f, page_addr);
+            qemu_put_be64(f, req->qiov->iov[j].iov_len);
+        }
+    }
+}
+
+static void event_tap_blk_load(QEMUFile *f, EventTapBlkReq *blk_req)
+{
+    BlockRequest *req;
+    ram_addr_t page_addr;
+    int i, j, len;
+
+    len = qemu_get_byte(f);
+    blk_req->device_name = qemu_malloc(len + 1);
+    qemu_get_buffer(f, (uint8_t *)blk_req->device_name, len);
+    blk_req->device_name[len] = '\0';
+    blk_req->num_reqs = qemu_get_byte(f);
+
+    for (i = 0; i < blk_req->num_reqs; i++) {
+        req = &blk_req->reqs[i];
+        req->sector = qemu_get_be64(f);
+        req->nb_sectors = qemu_get_be32(f);
+        req->qiov = qemu_malloc(sizeof(QEMUIOVector));
+        req->qiov->niov = qemu_get_byte(f);
+        req->qiov->iov = qemu_malloc(sizeof(struct iovec) * req->qiov->niov);
+        for (j = 0; j < req->qiov->niov; j++) {
+            page_addr = qemu_get_be64(f);
+            req->qiov->iov[j].iov_base = qemu_get_ram_ptr(page_addr);
+            req->qiov->iov[j].iov_len = qemu_get_be64(f);
+        }
+    }
+}
+
+static void event_tap_save(QEMUFile *f, void *opaque)
+{
+    EventTapLog *log;
+
+    QTAILQ_FOREACH(log, &event_list, node) {
+        qemu_put_byte(f, log->mode);
+        DPRINTF("log->mode=%d\n", log->mode);
+        switch (log->mode & EVENT_TAP_TYPE_MASK) {
+        case EVENT_TAP_IOPORT:
+            event_tap_ioport_save(f, &log->ioport);
+            break;
+        case EVENT_TAP_MMIO:
+            event_tap_mmio_save(f, &log->mmio);
+            break;
+        case 0:
+            DPRINTF("No event\n");
+            break;
+        default:
+            fprintf(stderr, "Unknown state %d\n", log->mode);
+            return;
+        }
+
+        switch (log->mode & ~EVENT_TAP_TYPE_MASK) {
+        case EVENT_TAP_NET:
+            event_tap_net_save(f, &log->net_req);
+            break;
+        case EVENT_TAP_BLK:
+            event_tap_blk_save(f, &log->blk_req);
+            break;
+        default:
+            fprintf(stderr, "Unknown state %d\n", log->mode);
+            return;
+        }
+    }
+
+    qemu_put_byte(f, 0); /* EOF */
+}
+
+static int event_tap_load(QEMUFile *f, void *opaque, int version_id)
+{
+    EventTapLog *log, *next;
+    int mode;
+
+    event_tap_state = EVENT_TAP_LOAD;
+
+    QTAILQ_FOREACH_SAFE(log, &event_list, node, next) {
+        QTAILQ_REMOVE(&event_list, log, node);
+        event_tap_free_log(log);
+    }
+
+    /* loop until EOF */
+    while ((mode = qemu_get_byte(f)) != 0) {
+        EventTapLog *log = event_tap_alloc_log();
+
+        log->mode = mode;
+        switch (log->mode & EVENT_TAP_TYPE_MASK) {
+        case EVENT_TAP_IOPORT:
+            event_tap_ioport_load(f, &log->ioport);
+            break;
+        case EVENT_TAP_MMIO:
+            event_tap_mmio_load(f, &log->mmio);
+            break;
+        case 0:
+            DPRINTF("No event\n");
+            break;
+        default:
+            fprintf(stderr, "Unknown state %d\n", log->mode);
+            return -1;
+        }
+
+        switch (log->mode & ~EVENT_TAP_TYPE_MASK) {
+        case EVENT_TAP_NET:
+            event_tap_net_load(f, &log->net_req);
+            break;
+        case EVENT_TAP_BLK:
+            event_tap_blk_load(f, &log->blk_req);
+            break;
+        default:
+            fprintf(stderr, "Unknown state %d\n", log->mode);
+            return -1;
+        }
+
+        QTAILQ_INSERT_TAIL(&event_list, log, node);
+    }
+
+    return 0;
+}
+
+void event_tap_init(void)
+{
+    QTAILQ_INIT(&event_list);
+    QTAILQ_INIT(&event_pool);    
+    register_savevm(NULL, "event-tap", 0, 1,
+                    event_tap_save, event_tap_load, &last_event_tap);
+    vmstate = qemu_add_vm_change_state_handler(event_tap_replay, NULL);
+}
diff --git a/event-tap.h b/event-tap.h
new file mode 100644
index 0000000..61b9bbc
--- /dev/null
+++ b/event-tap.h
@@ -0,0 +1,34 @@
+/*
+ * Event Tap functions for QEMU
+ *
+ * Copyright (c) 2010 Nippon Telegraph and Telephone Corporation. 
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef EVENT_TAP_H
+#define EVENT_TAP_H
+
+#include "qemu-common.h"
+
+enum EVENT_TAP_STATE {
+    EVENT_TAP_OFF,
+    EVENT_TAP_ON,
+    EVENT_TAP_SUSPEND,
+    EVENT_TAP_LOAD,
+    EVENT_TAP_REPLAY,
+};
+
+int event_tap_register(int (*cb)(void));
+int event_tap_unregister(void);
+void event_tap_suspend(void);
+void event_tap_resume(void);
+int event_tap_get_state(void);
+void event_tap_ioport(int index, uint32_t address, uint32_t data);
+void event_tap_mmio(uint64_t address, uint8_t *buf, int len);
+void event_tap_init(void);
+void event_tap_flush(void);
+int event_tap_flush_one(void);
+
+#endif
diff --git a/net.h b/net.h
index 44c31a9..93fd403 100644
--- a/net.h
+++ b/net.h
@@ -105,6 +105,10 @@ ssize_t qemu_sendv_packet(VLANClientState *vc, const 
struct iovec *iov,
 ssize_t qemu_sendv_packet_async(VLANClientState *vc, const struct iovec *iov,
                                 int iovcnt, NetPacketSent *sent_cb);
 void qemu_send_packet(VLANClientState *vc, const uint8_t *buf, int size);
+void qemu_send_packet_proxy(VLANClientState *vc, const uint8_t *buf, int size);
+ssize_t qemu_sendv_packet_async_proxy(VLANClientState *vc,
+                                      const struct iovec *iov,
+                                      int iovcnt, NetPacketSent *sent_cb);
 ssize_t qemu_send_packet_raw(VLANClientState *vc, const uint8_t *buf, int 
size);
 ssize_t qemu_send_packet_async(VLANClientState *vc, const uint8_t *buf,
                                int size, NetPacketSent *sent_cb);
diff --git a/net/queue.c b/net/queue.c
index 2ea6cd0..e7a35b0 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -258,3 +258,4 @@ void qemu_net_queue_flush(NetQueue *queue)
         qemu_free(packet);
     }
 }
+
-- 
1.7.1.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]