[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH 10/12] virtio-net: multiqueue support
From: |
Blue Swirl |
Subject: |
Re: [Qemu-devel] [PATCH 10/12] virtio-net: multiqueue support |
Date: |
Fri, 28 Dec 2012 17:52:57 +0000 |
On Fri, Dec 28, 2012 at 10:32 AM, Jason Wang <address@hidden> wrote:
> This patch implements both userspace and vhost support for multiple queue
> virtio-net (VIRTIO_NET_F_MQ). This is done by introducing an array of
> VirtIONetQueue to VirtIONet.
>
> Signed-off-by: Jason Wang <address@hidden>
> ---
> hw/virtio-net.c | 318
> ++++++++++++++++++++++++++++++++++++++++++-------------
> hw/virtio-net.h | 27 +++++-
> 2 files changed, 271 insertions(+), 74 deletions(-)
>
> diff --git a/hw/virtio-net.c b/hw/virtio-net.c
> index c6f0915..aaeef1b 100644
> --- a/hw/virtio-net.c
> +++ b/hw/virtio-net.c
> @@ -45,7 +45,7 @@ typedef struct VirtIONet
> VirtIODevice vdev;
> uint8_t mac[ETH_ALEN];
> uint16_t status;
> - VirtIONetQueue vq;
> + VirtIONetQueue vqs[MAX_QUEUE_NUM];
> VirtQueue *ctrl_vq;
> NICState *nic;
> uint32_t tx_timeout;
> @@ -70,14 +70,23 @@ typedef struct VirtIONet
> } mac_table;
> uint32_t *vlans;
> DeviceState *qdev;
> + int multiqueue;
> + uint16_t max_queues;
> + uint16_t curr_queues;
> } VirtIONet;
>
> -static VirtIONetQueue *virtio_net_get_queue(NetClientState *nc)
> +static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
> {
> VirtIONet *n = qemu_get_nic_opaque(nc);
>
> - return &n->vq;
> + return &n->vqs[nc->queue_index];
> }
> +
> +static int vq2q(int queue_index)
> +{
> + return queue_index / 2;
> +}
> +
> /* TODO
> * - we could suppress RX interrupt if we were so inclined.
> */
> @@ -93,6 +102,7 @@ static void virtio_net_get_config(VirtIODevice *vdev,
> uint8_t *config)
> struct virtio_net_config netcfg;
>
> stw_p(&netcfg.status, n->status);
> + stw_p(&netcfg.max_virtqueue_pairs, n->max_queues);
> memcpy(netcfg.mac, n->mac, ETH_ALEN);
> memcpy(config, &netcfg, sizeof(netcfg));
> }
> @@ -116,31 +126,33 @@ static bool virtio_net_started(VirtIONet *n, uint8_t
> status)
> (n->status & VIRTIO_NET_S_LINK_UP) && n->vdev.vm_running;
> }
>
> -static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
> +static void virtio_net_vhost_status(VirtIONet *n, int queue_index,
> + uint8_t status)
> {
> - VirtIONetQueue *q = &n->vq;
> + NetClientState *nc = qemu_get_subqueue(n->nic, queue_index);
> + VirtIONetQueue *q = &n->vqs[queue_index];
>
> - if (!qemu_get_queue(n->nic)->peer) {
> + if (!nc->peer) {
> return;
> }
> - if (qemu_get_queue(n->nic)->peer->info->type !=
> NET_CLIENT_OPTIONS_KIND_TAP) {
> + if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
> return;
> }
>
> - if (!tap_get_vhost_net(qemu_get_queue(n->nic)->peer)) {
> + if (!tap_get_vhost_net(nc->peer)) {
> return;
> }
> - if (!!q->vhost_started == virtio_net_started(n, status) &&
> - !qemu_get_queue(n->nic)->peer->link_down) {
> + if (!!q->vhost_started ==
> + (virtio_net_started(n, status) && !nc->peer->link_down)) {
> return;
> }
> if (!q->vhost_started) {
> int r;
> - if
> (!vhost_net_query(tap_get_vhost_net(qemu_get_queue(n->nic)->peer), &n->vdev))
> {
> + if (!vhost_net_query(tap_get_vhost_net(nc->peer), &n->vdev)) {
> return;
> }
> - r = vhost_net_start(tap_get_vhost_net(qemu_get_queue(n->nic)->peer),
> - &n->vdev, 0);
> + r = vhost_net_start(tap_get_vhost_net(nc->peer), &n->vdev,
> + queue_index * 2);
> if (r < 0) {
> error_report("unable to start vhost net: %d: "
> "falling back on userspace virtio", -r);
> @@ -148,7 +160,7 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t
> status)
> q->vhost_started = 1;
> }
> } else {
> - vhost_net_stop(tap_get_vhost_net(qemu_get_queue(n->nic)->peer),
> &n->vdev);
> + vhost_net_stop(tap_get_vhost_net(nc->peer), &n->vdev);
> q->vhost_started = 0;
> }
> }
> @@ -156,26 +168,35 @@ static void virtio_net_vhost_status(VirtIONet *n,
> uint8_t status)
> static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
> {
> VirtIONet *n = to_virtio_net(vdev);
> - VirtIONetQueue *q = &n->vq;
> + int i;
>
> - virtio_net_vhost_status(n, status);
> + for (i = 0; i < n->max_queues; i++) {
> + VirtIONetQueue *q = &n->vqs[i];
> + uint8_t queue_status = status;
>
> - if (!q->tx_waiting) {
> - return;
> - }
> + if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
> + queue_status = 0;
> + }
>
> - if (virtio_net_started(n, status) && !q->vhost_started) {
> - if (q->tx_timer) {
> - qemu_mod_timer(q->tx_timer,
> - qemu_get_clock_ns(vm_clock) + n->tx_timeout);
> - } else {
> - qemu_bh_schedule(q->tx_bh);
> + virtio_net_vhost_status(n, i, queue_status);
> +
> + if (!q->tx_waiting) {
> + continue;
> }
> - } else {
> - if (q->tx_timer) {
> - qemu_del_timer(q->tx_timer);
> +
> + if (virtio_net_started(n, status) && !q->vhost_started) {
> + if (q->tx_timer) {
> + qemu_mod_timer(q->tx_timer,
> + qemu_get_clock_ns(vm_clock) + n->tx_timeout);
> + } else {
> + qemu_bh_schedule(q->tx_bh);
> + }
> } else {
> - qemu_bh_cancel(q->tx_bh);
> + if (q->tx_timer) {
> + qemu_del_timer(q->tx_timer);
> + } else {
> + qemu_bh_cancel(q->tx_bh);
> + }
> }
> }
> }
> @@ -207,6 +228,8 @@ static void virtio_net_reset(VirtIODevice *vdev)
> n->nomulti = 0;
> n->nouni = 0;
> n->nobcast = 0;
> + /* multiqueue is disalbed by default */
> + n->curr_queues = 1;
>
> /* Flush any MAC and VLAN filter table state */
> n->mac_table.in_use = 0;
> @@ -245,18 +268,72 @@ static int peer_has_ufo(VirtIONet *n)
>
> static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
> {
> + int i;
> + NetClientState *nc;
> +
> n->mergeable_rx_bufs = mergeable_rx_bufs;
>
> n->guest_hdr_len = n->mergeable_rx_bufs ?
> sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct
> virtio_net_hdr);
>
> - if (peer_has_vnet_hdr(n) &&
> - tap_has_vnet_hdr_len(qemu_get_queue(n->nic)->peer,
> n->guest_hdr_len)) {
> - tap_set_vnet_hdr_len(qemu_get_queue(n->nic)->peer, n->guest_hdr_len);
> - n->host_hdr_len = n->guest_hdr_len;
> + for (i = 0; i < n->max_queues; i++) {
> + nc = qemu_get_subqueue(n->nic, i);
> +
> + if (peer_has_vnet_hdr(n) &&
> + tap_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
> + tap_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
> + n->host_hdr_len = n->guest_hdr_len;
> + }
> }
> }
>
> +static int peer_attach(VirtIONet *n, int index)
> +{
> + NetClientState *nc = qemu_get_subqueue(n->nic, index);
> + int ret;
> +
> + if (!nc->peer) {
> + ret = -1;
> + } else if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
> + ret = -1;
> + } else {
> + ret = tap_attach(nc->peer);
> + }
> +
> + return ret;
> +}
> +
> +static int peer_detach(VirtIONet *n, int index)
> +{
> + NetClientState *nc = qemu_get_subqueue(n->nic, index);
> + int ret;
> +
> + if (!nc->peer) {
> + ret = -1;
> + } else if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
> + ret = -1;
> + } else {
> + ret = tap_detach(nc->peer);
> + }
> +
> + return ret;
> +}
> +
> +static void virtio_net_set_queues(VirtIONet *n)
> +{
> + int i;
> +
> + for (i = 0; i < n->max_queues; i++) {
> + if (i < n->curr_queues) {
> + assert(!peer_attach(n, i));
> + } else {
> + assert(!peer_detach(n, i));
> + }
> + }
> +}
> +
> +static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue, int
> ctrl);
> +
> static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t
> features)
> {
> VirtIONet *n = to_virtio_net(vdev);
> @@ -308,25 +385,33 @@ static uint32_t virtio_net_bad_features(VirtIODevice
> *vdev)
> static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
> {
> VirtIONet *n = to_virtio_net(vdev);
> + int i;
> +
> + virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)),
> + !!(features & (1 << VIRTIO_NET_F_CTRL_VQ)));
>
> virtio_net_set_mrg_rx_bufs(n, !!(features & (1 <<
> VIRTIO_NET_F_MRG_RXBUF)));
>
> if (n->has_vnet_hdr) {
> - tap_set_offload(qemu_get_queue(n->nic)->peer,
> + tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer,
> (features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
> (features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
> (features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
> (features >> VIRTIO_NET_F_GUEST_ECN) & 1,
> (features >> VIRTIO_NET_F_GUEST_UFO) & 1);
> }
> - if (!qemu_get_queue(n->nic)->peer ||
> - qemu_get_queue(n->nic)->peer->info->type !=
> NET_CLIENT_OPTIONS_KIND_TAP) {
> - return;
> - }
> - if (!tap_get_vhost_net(qemu_get_queue(n->nic)->peer)) {
> - return;
> +
> + for (i = 0; i < n->max_queues; i++) {
> + NetClientState *nc = qemu_get_subqueue(n->nic, i);
> +
> + if (!nc->peer || nc->peer->info->type !=
> NET_CLIENT_OPTIONS_KIND_TAP) {
> + continue;
> + }
> + if (!tap_get_vhost_net(nc->peer)) {
> + continue;
> + }
> + vhost_net_ack_features(tap_get_vhost_net(nc->peer), features);
> }
> - vhost_net_ack_features(tap_get_vhost_net(qemu_get_queue(n->nic)->peer),
> features);
> }
>
> static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
> @@ -436,6 +521,35 @@ static int virtio_net_handle_vlan_table(VirtIONet *n,
> uint8_t cmd,
> return VIRTIO_NET_OK;
> }
>
> +static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
> + VirtQueueElement *elem)
> +{
> + struct virtio_net_ctrl_mq s;
> +
> + if (elem->out_num != 2 ||
> + elem->out_sg[1].iov_len != sizeof(struct virtio_net_ctrl_mq)) {
> + error_report("virtio-net ctrl invalid steering command");
> + return VIRTIO_NET_ERR;
> + }
> +
> + if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> + return VIRTIO_NET_ERR;
> + }
> +
> + memcpy(&s, elem->out_sg[1].iov_base, sizeof(struct virtio_net_ctrl_mq));
> +
> + if (s.virtqueue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
> + s.virtqueue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
> + s.virtqueue_pairs > n->max_queues) {
> + return VIRTIO_NET_ERR;
> + }
> +
> + n->curr_queues = s.virtqueue_pairs;
> + virtio_net_set_queues(n);
> + virtio_net_set_status(&n->vdev, n->vdev.status);
> +
> + return VIRTIO_NET_OK;
> +}
> static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
> {
> VirtIONet *n = to_virtio_net(vdev);
> @@ -464,6 +578,8 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev,
> VirtQueue *vq)
> status = virtio_net_handle_mac(n, ctrl.cmd, &elem);
> else if (ctrl.class == VIRTIO_NET_CTRL_VLAN)
> status = virtio_net_handle_vlan_table(n, ctrl.cmd, &elem);
> + else if (ctrl.class == VIRTIO_NET_CTRL_MQ)
Please add braces.
> + status = virtio_net_handle_mq(n, ctrl.cmd, &elem);
>
> stb_p(elem.in_sg[elem.in_num - 1].iov_base, status);
>
> @@ -477,19 +593,24 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev,
> VirtQueue *vq)
> static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
> {
> VirtIONet *n = to_virtio_net(vdev);
> + int queue_index = vq2q(virtio_get_queue_index(vq));
>
> - qemu_flush_queued_packets(qemu_get_queue(n->nic));
> + qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
> }
>
> static int virtio_net_can_receive(NetClientState *nc)
> {
> VirtIONet *n = qemu_get_nic_opaque(nc);
> - VirtIONetQueue *q = virtio_net_get_queue(nc);
> + VirtIONetQueue *q = virtio_net_get_subqueue(nc);
>
> if (!n->vdev.vm_running) {
> return 0;
> }
>
> + if (nc->queue_index >= n->curr_queues) {
> + return 0;
> + }
> +
> if (!virtio_queue_ready(q->rx_vq) ||
> !(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
> return 0;
> @@ -620,14 +741,15 @@ static int receive_filter(VirtIONet *n, const uint8_t
> *buf, int size)
> static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
> size_t size)
> {
> VirtIONet *n = qemu_get_nic_opaque(nc);
> - VirtIONetQueue *q = virtio_net_get_queue(nc);
> + VirtIONetQueue *q = virtio_net_get_subqueue(nc);
> struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
> struct virtio_net_hdr_mrg_rxbuf mhdr;
> unsigned mhdr_cnt = 0;
> size_t offset, i, guest_offset;
>
> - if (!virtio_net_can_receive(qemu_get_queue(n->nic)))
> + if (!virtio_net_can_receive(nc)) {
> return -1;
> + }
>
> /* hdr_len refers to the header we supply to the guest */
> if (!virtio_net_has_buffers(q, size + n->guest_hdr_len -
> n->host_hdr_len)) {
> @@ -720,7 +842,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
> static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
> {
> VirtIONet *n = qemu_get_nic_opaque(nc);
> - VirtIONetQueue *q = virtio_net_get_queue(nc);
> + VirtIONetQueue *q = virtio_net_get_subqueue(nc);
>
> virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
> virtio_notify(&n->vdev, q->tx_vq);
> @@ -737,6 +859,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
> VirtIONet *n = q->n;
> VirtQueueElement elem;
> int32_t num_packets = 0;
> + int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
> if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
> return num_packets;
> }
> @@ -778,8 +901,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
>
> len = n->guest_hdr_len;
>
> - ret = qemu_sendv_packet_async(qemu_get_queue(n->nic), out_sg,
> out_num,
> - virtio_net_tx_complete);
> + ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
> + out_sg, out_num,
> virtio_net_tx_complete);
> if (ret == 0) {
> virtio_queue_set_notification(q->tx_vq, 0);
> q->async_tx.elem = elem;
> @@ -802,7 +925,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
> static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
> {
> VirtIONet *n = to_virtio_net(vdev);
> - VirtIONetQueue *q = &n->vq;
> + VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
>
> /* This happens when device was stopped but VCPU wasn't. */
> if (!n->vdev.vm_running) {
> @@ -826,7 +949,7 @@ static void virtio_net_handle_tx_timer(VirtIODevice
> *vdev, VirtQueue *vq)
> static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
> {
> VirtIONet *n = to_virtio_net(vdev);
> - VirtIONetQueue *q = &n->vq;
> + VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
>
> if (unlikely(q->tx_waiting)) {
> return;
> @@ -894,10 +1017,49 @@ static void virtio_net_tx_bh(void *opaque)
> }
> }
>
> +static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue, int ctrl)
> +{
> + VirtIODevice *vdev = &n->vdev;
> + int i;
> +
> + n->multiqueue = multiqueue;
> +
> + if (!multiqueue)
> + n->curr_queues = 1;
Ditto. Didn't checkpatch.pl catch these or did you not check?
> +
> + for (i = 2; i <= n->max_queues * 2 + 1; i++) {
> + virtio_del_queue(vdev, i);
> + }
> +
> + for (i = 1; i < n->max_queues; i++) {
> + n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
> + if (n->vqs[i].tx_timer) {
> + n->vqs[i].tx_vq =
> + virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
> + n->vqs[i].tx_timer = qemu_new_timer_ns(vm_clock,
> + virtio_net_tx_timer,
> + &n->vqs[i]);
> + } else {
> + n->vqs[i].tx_vq =
> + virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
> + n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
> + }
> +
> + n->vqs[i].tx_waiting = 0;
> + n->vqs[i].n = n;
> + }
> +
> + if (ctrl) {
> + n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
> + }
> +
> + virtio_net_set_queues(n);
> +}
> +
> static void virtio_net_save(QEMUFile *f, void *opaque)
> {
> VirtIONet *n = opaque;
> - VirtIONetQueue *q = &n->vq;
> + VirtIONetQueue *q = &n->vqs[0];
>
> /* At this point, backend must be stopped, otherwise
> * it might keep writing to memory. */
> @@ -926,9 +1088,8 @@ static void virtio_net_save(QEMUFile *f, void *opaque)
> static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
> {
> VirtIONet *n = opaque;
> - VirtIONetQueue *q = &n->vq;
> - int i;
> - int ret;
> + VirtIONetQueue *q = &n->vqs[0];
> + int ret, i;
>
> if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
> return -EINVAL;
> @@ -1044,6 +1205,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf
> *conf,
> virtio_net_conf *net)
> {
> VirtIONet *n;
> + int i;
>
> n = (VirtIONet *)virtio_common_init("virtio-net", VIRTIO_ID_NET,
> sizeof(struct virtio_net_config),
> @@ -1056,8 +1218,11 @@ VirtIODevice *virtio_net_init(DeviceState *dev,
> NICConf *conf,
> n->vdev.bad_features = virtio_net_bad_features;
> n->vdev.reset = virtio_net_reset;
> n->vdev.set_status = virtio_net_set_status;
> - n->vq.rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
> - n->vq.n = n;
> + n->vqs[0].rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
> + n->max_queues = conf->queues;
> + n->curr_queues = 1;
> + n->vqs[0].n = n;
> + n->tx_timeout = net->txtimer;
>
> if (net->tx && strcmp(net->tx, "timer") && strcmp(net->tx, "bh")) {
> error_report("virtio-net: "
> @@ -1067,14 +1232,14 @@ VirtIODevice *virtio_net_init(DeviceState *dev,
> NICConf *conf,
> }
>
> if (net->tx && !strcmp(net->tx, "timer")) {
> - n->vq.tx_vq = virtio_add_queue(&n->vdev, 256,
> - virtio_net_handle_tx_timer);
> - n->vq.tx_timer = qemu_new_timer_ns(vm_clock,
> - virtio_net_tx_timer, &n->vq);
> - n->tx_timeout = net->txtimer;
> + n->vqs[0].tx_vq = virtio_add_queue(&n->vdev, 256,
> + virtio_net_handle_tx_timer);
> + n->vqs[0].tx_timer = qemu_new_timer_ns(vm_clock, virtio_net_tx_timer,
> + &n->vqs[0]);
> } else {
> - n->vq.tx_vq = virtio_add_queue(&n->vdev, 256,
> virtio_net_handle_tx_bh);
> - n->vq.tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vq);
> + n->vqs[0].tx_vq = virtio_add_queue(&n->vdev, 256,
> + virtio_net_handle_tx_bh);
> + n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
> }
> n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
> qemu_macaddr_default_if_unset(&conf->macaddr);
> @@ -1084,7 +1249,9 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf
> *conf,
> n->nic = qemu_new_nic(&net_virtio_info, conf,
> object_get_typename(OBJECT(dev)), dev->id, n);
> peer_test_vnet_hdr(n);
> if (peer_has_vnet_hdr(n)) {
> - tap_using_vnet_hdr(qemu_get_queue(n->nic)->peer, 1);
> + for (i = 0; i < n->max_queues; i++) {
> + tap_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, 1);
> + }
> n->host_hdr_len = sizeof(struct virtio_net_hdr);
> } else {
> n->host_hdr_len = 0;
> @@ -1092,7 +1259,7 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf
> *conf,
>
> qemu_format_nic_info_str(qemu_get_queue(n->nic), conf->macaddr.a);
>
> - n->vq.tx_waiting = 0;
> + n->vqs[0].tx_waiting = 0;
> n->tx_burst = net->txburst;
> virtio_net_set_mrg_rx_bufs(n, 0);
> n->promisc = 1; /* for compatibility */
> @@ -1113,23 +1280,28 @@ VirtIODevice *virtio_net_init(DeviceState *dev,
> NICConf *conf,
> void virtio_net_exit(VirtIODevice *vdev)
> {
> VirtIONet *n = DO_UPCAST(VirtIONet, vdev, vdev);
> - VirtIONetQueue *q = &n->vq;
> + int i;
>
> /* This will stop vhost backend if appropriate. */
> virtio_net_set_status(vdev, 0);
>
> - qemu_purge_queued_packets(qemu_get_queue(n->nic));
> -
> unregister_savevm(n->qdev, "virtio-net", n);
>
> g_free(n->mac_table.macs);
> g_free(n->vlans);
>
> - if (q->tx_timer) {
> - qemu_del_timer(q->tx_timer);
> - qemu_free_timer(q->tx_timer);
> - } else {
> - qemu_bh_delete(q->tx_bh);
> + for (i = 0; i < n->max_queues; i++) {
> + VirtIONetQueue *q = &n->vqs[i];
> + NetClientState *nc = qemu_get_subqueue(n->nic, i);
> +
> + qemu_purge_queued_packets(nc);
> +
> + if (q->tx_timer) {
> + qemu_del_timer(q->tx_timer);
> + qemu_free_timer(q->tx_timer);
> + } else {
> + qemu_bh_delete(q->tx_bh);
> + }
> }
>
> qemu_del_nic(n->nic);
> diff --git a/hw/virtio-net.h b/hw/virtio-net.h
> index 36aa463..bc5857a 100644
> --- a/hw/virtio-net.h
> +++ b/hw/virtio-net.h
> @@ -44,6 +44,8 @@
> #define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
> #define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
> #define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
> +#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
> + * Steering */
>
> #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
>
> @@ -72,6 +74,8 @@ struct virtio_net_config
> uint8_t mac[ETH_ALEN];
> /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
> uint16_t status;
> + /* Max virtqueue pairs supported by the device */
> + uint16_t max_virtqueue_pairs;
> } QEMU_PACKED;
>
> /* This is the first element of the scatter-gather list. If you don't
> @@ -168,6 +172,26 @@ struct virtio_net_ctrl_mac {
> #define VIRTIO_NET_CTRL_VLAN_ADD 0
> #define VIRTIO_NET_CTRL_VLAN_DEL 1
>
> +/*
> + * Control Multiqueue
> + *
> + * The command VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
> + * enables multiqueue, specifying the number of the transmit and
> + * receive queues that will be used. After the command is consumed and acked
> by
> + * the device, the device will not steer new packets on receive virtqueues
> + * other than specified nor read from transmit virtqueues other than
> specified.
> + * Accordingly, driver should not transmit new packets on virtqueues other
> than
> + * specified.
> + */
> +struct virtio_net_ctrl_mq {
VirtIONetCtrlMQ and please don't forget the typedef.
> + uint16_t virtqueue_pairs;
> +};
> +
> +#define VIRTIO_NET_CTRL_MQ 4
> + #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
> + #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
> + #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
> +
> #define DEFINE_VIRTIO_NET_FEATURES(_state, _field) \
> DEFINE_VIRTIO_COMMON_FEATURES(_state, _field), \
> DEFINE_PROP_BIT("csum", _state, _field, VIRTIO_NET_F_CSUM, true), \
> @@ -186,5 +210,6 @@ struct virtio_net_ctrl_mac {
> DEFINE_PROP_BIT("ctrl_vq", _state, _field, VIRTIO_NET_F_CTRL_VQ,
> true), \
> DEFINE_PROP_BIT("ctrl_rx", _state, _field, VIRTIO_NET_F_CTRL_RX,
> true), \
> DEFINE_PROP_BIT("ctrl_vlan", _state, _field, VIRTIO_NET_F_CTRL_VLAN,
> true), \
> - DEFINE_PROP_BIT("ctrl_rx_extra", _state, _field,
> VIRTIO_NET_F_CTRL_RX_EXTRA, true)
> + DEFINE_PROP_BIT("ctrl_rx_extra", _state, _field,
> VIRTIO_NET_F_CTRL_RX_EXTRA, true), \
> + DEFINE_PROP_BIT("mq", _state, _field, VIRTIO_NET_F_MQ, true)
> #endif
> --
> 1.7.1
>
>
- [Qemu-devel] [PATCH 05/12] net: multiqueue support, (continued)
- [Qemu-devel] [PATCH 05/12] net: multiqueue support, Jason Wang, 2012/12/28
- [Qemu-devel] [PATCH 03/12] net: introduce qemu_get_nic(), Jason Wang, 2012/12/28
- [Qemu-devel] [PATCH 06/12] vhost: multiqueue support, Jason Wang, 2012/12/28
- [Qemu-devel] [PATCH 07/12] virtio: introduce virtio_queue_del(), Jason Wang, 2012/12/28
- [Qemu-devel] [PATCH 08/12] virtio: add a queue_index to VirtQueue, Jason Wang, 2012/12/28
- [Qemu-devel] [PATCH 09/12] virtio-net: separate virtqueue from VirtIONet, Jason Wang, 2012/12/28
- [Qemu-devel] [PATCH 11/12] virtio-net: migration support for multiqueue, Jason Wang, 2012/12/28
- [Qemu-devel] [PATCH 12/12] virtio-net: compat multiqueue support, Jason Wang, 2012/12/28
- [Qemu-devel] [PATCH 10/12] virtio-net: multiqueue support, Jason Wang, 2012/12/28
- Re: [Qemu-devel] [PATCH 10/12] virtio-net: multiqueue support,
Blue Swirl <=