[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 7/9] xen/9pfs: implement in/out_iov_from_pdu and
From: |
Stefano Stabellini |
Subject: |
[Qemu-devel] [PATCH v2 7/9] xen/9pfs: implement in/out_iov_from_pdu and vmarshal/vunmarshal |
Date: |
Mon, 13 Mar 2017 16:55:58 -0700 |
Implement xen_9pfs_init_in/out_iov_from_pdu and
xen_9pfs_pdu_vmarshal/vunmarshall by creating new sg pointing to the
data on the ring.
This is safe as we only handle one request per ring at any given time.
Signed-off-by: Stefano Stabellini <address@hidden>
CC: address@hidden
CC: address@hidden
CC: Aneesh Kumar K.V <address@hidden>
CC: Greg Kurz <address@hidden>
---
hw/9pfs/xen-9p-backend.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 89 insertions(+), 2 deletions(-)
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 741dd31..d72a749 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -48,12 +48,77 @@ typedef struct Xen9pfsDev {
struct Xen9pfsRing *rings;
} Xen9pfsDev;
+static void xen_9pfs_in_sg(struct Xen9pfsRing *ring,
+ struct iovec *in_sg,
+ int *num,
+ uint32_t idx,
+ uint32_t size)
+{
+ RING_IDX cons, prod, masked_prod, masked_cons;
+
+ cons = ring->intf->in_cons;
+ prod = ring->intf->in_prod;
+ xen_rmb();
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+ if (masked_prod < masked_cons) {
+ in_sg[0].iov_base = ring->ring.in + masked_prod;
+ in_sg[0].iov_len = masked_cons - masked_prod;
+ *num = 1;
+ } else {
+ in_sg[0].iov_base = ring->ring.in + masked_prod;
+ in_sg[0].iov_len = XEN_9PFS_RING_SIZE - masked_prod;
+ in_sg[1].iov_base = ring->ring.in;
+ in_sg[1].iov_len = masked_cons;
+ *num = 2;
+ }
+}
+
+static void xen_9pfs_out_sg(struct Xen9pfsRing *ring,
+ struct iovec *out_sg,
+ int *num,
+ uint32_t idx)
+{
+ RING_IDX cons, prod, masked_prod, masked_cons;
+
+ cons = ring->intf->out_cons;
+ prod = ring->intf->out_prod;
+ xen_rmb();
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+ if (masked_cons < masked_prod) {
+ out_sg[0].iov_base = ring->ring.out + masked_cons;
+ out_sg[0].iov_len = ring->out_size;
+ *num = 1;
+ } else {
+ if (ring->out_size > (XEN_9PFS_RING_SIZE - masked_cons)) {
+ out_sg[0].iov_base = ring->ring.out + masked_cons;
+ out_sg[0].iov_len = XEN_9PFS_RING_SIZE - masked_cons;
+ out_sg[1].iov_base = ring->ring.out;
+ out_sg[1].iov_len = ring->out_size - (XEN_9PFS_RING_SIZE -
masked_cons);
+ *num = 2;
+ } else {
+ out_sg[0].iov_base = ring->ring.out + masked_cons;
+ out_sg[0].iov_len = ring->out_size;
+ *num = 1;
+ }
+ }
+}
+
static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu,
size_t offset,
const char *fmt,
va_list ap)
{
- return 0;
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev,
state);
+ struct iovec in_sg[2];
+ int num;
+
+ xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
+ in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512));
+ return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap);
}
static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
@@ -61,13 +126,27 @@ static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu,
const char *fmt,
va_list ap)
{
- return 0;
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev,
state);
+ struct iovec out_sg[2];
+ int num;
+
+ xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings],
+ out_sg, &num, pdu->idx);
+ return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap);
}
static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
struct iovec **piov,
unsigned int *pniov)
{
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev,
state);
+ struct Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag %
xen_9pfs->num_rings];
+ struct iovec *sg = g_malloc0(sizeof(*sg)*2);
+ int num;
+
+ xen_9pfs_out_sg(ring, sg, &num, pdu->idx);
+ *piov = sg;
+ *pniov = num;
}
static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
@@ -75,6 +154,14 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
unsigned int *pniov,
size_t size)
{
+ struct Xen9pfsDev *xen_9pfs = container_of(pdu->s, struct Xen9pfsDev,
state);
+ struct Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag %
xen_9pfs->num_rings];
+ struct iovec *sg = g_malloc0(sizeof(*sg)*2);
+ int num;
+
+ xen_9pfs_in_sg(ring, sg, &num, pdu->idx, size);
+ *piov = sg;
+ *pniov = num;
}
static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
--
1.9.1
- Re: [Qemu-devel] [PATCH v2 2/9] xen: import ring.h from xen, (continued)
[Qemu-devel] [PATCH v2 4/9] xen/9pfs: introduce Xen 9pfs backend, Stefano Stabellini, 2017/03/13
[Qemu-devel] [PATCH v2 5/9] xen/9pfs: connect to the frontend, Stefano Stabellini, 2017/03/13
Re: [Qemu-devel] [PATCH v2 5/9] xen/9pfs: connect to the frontend, Greg Kurz, 2017/03/15
Re: [Qemu-devel] [PATCH v2 5/9] xen/9pfs: connect to the frontend, Stefano Stabellini, 2017/03/15
[Qemu-devel] [PATCH v2 7/9] xen/9pfs: implement in/out_iov_from_pdu and vmarshal/vunmarshal,
Stefano Stabellini <=
[Qemu-devel] [PATCH v2 9/9] xen/9pfs: build and register Xen 9pfs backend, Stefano Stabellini, 2017/03/13
[Qemu-devel] [PATCH v2 6/9] xen/9pfs: receive requests from the frontend, Stefano Stabellini, 2017/03/13
Re: [Qemu-devel] [PATCH v2 6/9] xen/9pfs: receive requests from the frontend, Greg Kurz, 2017/03/15
Re: [Qemu-devel] [PATCH v2 6/9] xen/9pfs: receive requests from the frontend, Stefano Stabellini, 2017/03/15
[Qemu-devel] [PATCH v2 8/9] xen/9pfs: send responses back to the frontend, Stefano Stabellini, 2017/03/13