qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] vmxnet3: Fix reading/writing guest memory speci


From: Dmitry Fleytman
Subject: Re: [Qemu-devel] [PATCH] vmxnet3: Fix reading/writing guest memory specially when behind an IOMMU
Date: Mon, 20 Jun 2016 10:19:52 +0300

> On 18 Jun 2016, at 13:24 PM, KarimAllah Ahmed <address@hidden> wrote:
> 
> When a PCI device lives behind an IOMMU, it should use 'pci_dma_*' family of
> functions when any transfer from/to guest memory is required while
> 'cpu_physical_memory_*' family of functions completely bypass any MMU/IOMMU in
> the system.
> 
> vmxnet3 was exclusively using 'cpu_physical_memory_*' family of functions 
> which
> works fine with the default QEMU setup where IOMMU is not enabled but fails
> miserably when IOMMU is enabled. This commit converts all such instances in
> favor of 'pci_dma_*'
> 
> Cc: Dmitry Fleytman <address@hidden>

Acked-by: Dmitry Fleytman <address@hidden <mailto:address@hidden>>

> Cc: Jason Wang <address@hidden>
> Cc: address@hidden
> Cc: Anthony Liguori <address@hidden>
> Signed-off-by: KarimAllah Ahmed <address@hidden>
> ---
> hw/net/vmware_utils.h  |  55 +++++++------
> hw/net/vmxnet3.c       | 207 +++++++++++++++++++++++++++----------------------
> hw/net/vmxnet_tx_pkt.c |  19 ++---
> hw/net/vmxnet_tx_pkt.h |   8 +-
> 4 files changed, 161 insertions(+), 128 deletions(-)
> 
> diff --git a/hw/net/vmware_utils.h b/hw/net/vmware_utils.h
> index c0dbb2f..5500601 100644
> --- a/hw/net/vmware_utils.h
> +++ b/hw/net/vmware_utils.h
> @@ -26,97 +26,104 @@
>  *
>  */
> static inline void
> -vmw_shmem_read(hwaddr addr, void *buf, int len)
> +vmw_shmem_read(PCIDevice *d, hwaddr addr, void *buf, int len)
> {
>     VMW_SHPRN("SHMEM r: %" PRIx64 ", len: %d to %p", addr, len, buf);
> -    cpu_physical_memory_read(addr, buf, len);
> +    pci_dma_read(d, addr, buf, len);
> }
> 
> static inline void
> -vmw_shmem_write(hwaddr addr, void *buf, int len)
> +vmw_shmem_write(PCIDevice *d, hwaddr addr, void *buf, int len)
> {
>     VMW_SHPRN("SHMEM w: %" PRIx64 ", len: %d to %p", addr, len, buf);
> -    cpu_physical_memory_write(addr, buf, len);
> +    pci_dma_write(d, addr, buf, len);
> }
> 
> static inline void
> -vmw_shmem_rw(hwaddr addr, void *buf, int len, int is_write)
> +vmw_shmem_rw(PCIDevice *d, hwaddr addr, void *buf, int len, int is_write)
> {
>     VMW_SHPRN("SHMEM r/w: %" PRIx64 ", len: %d (to %p), is write: %d",
>               addr, len, buf, is_write);
> 
> -    cpu_physical_memory_rw(addr, buf, len, is_write);
> +    if (is_write)
> +        pci_dma_write(d, addr, buf, len);
> +    else
> +        pci_dma_read(d, addr, buf, len);
> }
> 
> static inline void
> -vmw_shmem_set(hwaddr addr, uint8_t val, int len)
> +vmw_shmem_set(PCIDevice *d, hwaddr addr, uint8_t val, int len)
> {
>     int i;
>     VMW_SHPRN("SHMEM set: %" PRIx64 ", len: %d (value 0x%X)", addr, len, val);
> 
>     for (i = 0; i < len; i++) {
> -        cpu_physical_memory_write(addr + i, &val, 1);
> +        pci_dma_write(d, addr + i, &val, 1);
>     }
> }
> 
> static inline uint32_t
> -vmw_shmem_ld8(hwaddr addr)
> +vmw_shmem_ld8(PCIDevice *d, hwaddr addr)
> {
> -    uint8_t res = ldub_phys(&address_space_memory, addr);
> +    uint8_t res;
> +    pci_dma_read(d, addr, &res, 1);
>     VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res);
>     return res;
> }
> 
> static inline void
> -vmw_shmem_st8(hwaddr addr, uint8_t value)
> +vmw_shmem_st8(PCIDevice *d, hwaddr addr, uint8_t value)
> {
>     VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value);
> -    stb_phys(&address_space_memory, addr, value);
> +    pci_dma_write(d, addr, &value, 1);
> }
> 
> static inline uint32_t
> -vmw_shmem_ld16(hwaddr addr)
> +vmw_shmem_ld16(PCIDevice *d, hwaddr addr)
> {
> -    uint16_t res = lduw_le_phys(&address_space_memory, addr);
> +    uint16_t res;
> +    pci_dma_read(d, addr, &res, 2);
>     VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res);
>     return res;
> }
> 
> static inline void
> -vmw_shmem_st16(hwaddr addr, uint16_t value)
> +vmw_shmem_st16(PCIDevice *d, hwaddr addr, uint16_t value)
> {
>     VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value);
> -    stw_le_phys(&address_space_memory, addr, value);
> +    pci_dma_write(d, addr, &value, 2);
> }
> 
> static inline uint32_t
> -vmw_shmem_ld32(hwaddr addr)
> +vmw_shmem_ld32(PCIDevice *d, hwaddr addr)
> {
> -    uint32_t res = ldl_le_phys(&address_space_memory, addr);
> +    uint32_t res;
> +    pci_dma_read(d, addr, &res, 4);
>     VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res);
>     return res;
> }
> 
> static inline void
> -vmw_shmem_st32(hwaddr addr, uint32_t value)
> +vmw_shmem_st32(PCIDevice *d, hwaddr addr, uint32_t value)
> {
>     VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value);
> -    stl_le_phys(&address_space_memory, addr, value);
> +    pci_dma_write(d, addr, &value, 4);
> }
> 
> static inline uint64_t
> -vmw_shmem_ld64(hwaddr addr)
> +vmw_shmem_ld64(PCIDevice *d, hwaddr addr)
> {
> -    uint64_t res = ldq_le_phys(&address_space_memory, addr);
> +    uint64_t res;
> +    pci_dma_read(d, addr, &res, 8);
>     VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res);
>     return res;
> }
> 
> static inline void
> -vmw_shmem_st64(hwaddr addr, uint64_t value)
> +vmw_shmem_st64(PCIDevice *d, hwaddr addr, uint64_t value)
> {
>     VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value);
> -    stq_le_phys(&address_space_memory, addr, value);
> +    pci_dma_write(d, addr, &value, 8);
> }
> 
> /* Macros for simplification of operations on array-style registers */
> diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
> index 20f26b7..367f775 100644
> --- a/hw/net/vmxnet3.c
> +++ b/hw/net/vmxnet3.c
> @@ -74,54 +74,54 @@
> #define VMXNET3_MAX_NMSIX_INTRS   (1)
> 
> /* Macros for rings descriptors access */
> -#define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \
> -    (vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
> +#define VMXNET3_READ_TX_QUEUE_DESCR8(_d, dpa, field) \
> +    (vmw_shmem_ld8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
> 
> -#define VMXNET3_WRITE_TX_QUEUE_DESCR8(dpa, field, value) \
> -    (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value)))
> +#define VMXNET3_WRITE_TX_QUEUE_DESCR8(_d, dpa, field, value) \
> +    (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, 
> value)))
> 
> -#define VMXNET3_READ_TX_QUEUE_DESCR32(dpa, field) \
> -    (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
> +#define VMXNET3_READ_TX_QUEUE_DESCR32(_d, dpa, field) \
> +    (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
> 
> -#define VMXNET3_WRITE_TX_QUEUE_DESCR32(dpa, field, value) \
> -    (vmw_shmem_st32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), 
> value))
> +#define VMXNET3_WRITE_TX_QUEUE_DESCR32(_d, dpa, field, value) \
> +    (vmw_shmem_st32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), 
> value))
> 
> -#define VMXNET3_READ_TX_QUEUE_DESCR64(dpa, field) \
> -    (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
> +#define VMXNET3_READ_TX_QUEUE_DESCR64(_d, dpa, field) \
> +    (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
> 
> -#define VMXNET3_WRITE_TX_QUEUE_DESCR64(dpa, field, value) \
> -    (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), 
> value))
> +#define VMXNET3_WRITE_TX_QUEUE_DESCR64(_d, dpa, field, value) \
> +    (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), 
> value))
> 
> -#define VMXNET3_READ_RX_QUEUE_DESCR64(dpa, field) \
> -    (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
> +#define VMXNET3_READ_RX_QUEUE_DESCR64(_d, dpa, field) \
> +    (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
> 
> -#define VMXNET3_READ_RX_QUEUE_DESCR32(dpa, field) \
> -    (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
> +#define VMXNET3_READ_RX_QUEUE_DESCR32(_d, dpa, field) \
> +    (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
> 
> -#define VMXNET3_WRITE_RX_QUEUE_DESCR64(dpa, field, value) \
> -    (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), 
> value))
> +#define VMXNET3_WRITE_RX_QUEUE_DESCR64(_d, dpa, field, value) \
> +    (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), 
> value))
> 
> -#define VMXNET3_WRITE_RX_QUEUE_DESCR8(dpa, field, value) \
> -    (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
> +#define VMXNET3_WRITE_RX_QUEUE_DESCR8(_d, dpa, field, value) \
> +    (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), 
> value))
> 
> /* Macros for guest driver shared area access */
> -#define VMXNET3_READ_DRV_SHARED64(shpa, field) \
> -    (vmw_shmem_ld64(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
> +#define VMXNET3_READ_DRV_SHARED64(_d, shpa, field) \
> +    (vmw_shmem_ld64(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
> 
> -#define VMXNET3_READ_DRV_SHARED32(shpa, field) \
> -    (vmw_shmem_ld32(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
> +#define VMXNET3_READ_DRV_SHARED32(_d, shpa, field) \
> +    (vmw_shmem_ld32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
> 
> -#define VMXNET3_WRITE_DRV_SHARED32(shpa, field, val) \
> -    (vmw_shmem_st32(shpa + offsetof(struct Vmxnet3_DriverShared, field), 
> val))
> +#define VMXNET3_WRITE_DRV_SHARED32(_d, shpa, field, val) \
> +    (vmw_shmem_st32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), 
> val))
> 
> -#define VMXNET3_READ_DRV_SHARED16(shpa, field) \
> -    (vmw_shmem_ld16(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
> +#define VMXNET3_READ_DRV_SHARED16(_d, shpa, field) \
> +    (vmw_shmem_ld16(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
> 
> -#define VMXNET3_READ_DRV_SHARED8(shpa, field) \
> -    (vmw_shmem_ld8(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
> +#define VMXNET3_READ_DRV_SHARED8(_d, shpa, field) \
> +    (vmw_shmem_ld8(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
> 
> -#define VMXNET3_READ_DRV_SHARED(shpa, field, b, l) \
> -    (vmw_shmem_read(shpa + offsetof(struct Vmxnet3_DriverShared, field), b, 
> l))
> +#define VMXNET3_READ_DRV_SHARED(_d, shpa, field, b, l) \
> +    (vmw_shmem_read(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), 
> b, l))
> 
> #define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag))
> 
> @@ -147,7 +147,8 @@ typedef struct {
>     uint8_t gen;
> } Vmxnet3Ring;
> 
> -static inline void vmxnet3_ring_init(Vmxnet3Ring *ring,
> +static inline void vmxnet3_ring_init(PCIDevice *d,
> +                                  Vmxnet3Ring *ring,
>                                      hwaddr pa,
>                                      size_t size,
>                                      size_t cell_size,
> @@ -160,7 +161,7 @@ static inline void vmxnet3_ring_init(Vmxnet3Ring *ring,
>     ring->next = 0;
> 
>     if (zero_region) {
> -        vmw_shmem_set(pa, 0, size * cell_size);
> +        vmw_shmem_set(d, pa, 0, size * cell_size);
>     }
> }
> 
> @@ -190,14 +191,16 @@ static inline hwaddr 
> vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring)
>     return ring->pa + ring->next * ring->cell_size;
> }
> 
> -static inline void vmxnet3_ring_read_curr_cell(Vmxnet3Ring *ring, void *buff)
> +static inline void vmxnet3_ring_read_curr_cell(PCIDevice *d, Vmxnet3Ring 
> *ring,
> +                                            void *buff)
> {
> -    vmw_shmem_read(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
> +    vmw_shmem_read(d, vmxnet3_ring_curr_cell_pa(ring), buff, 
> ring->cell_size);
> }
> 
> -static inline void vmxnet3_ring_write_curr_cell(Vmxnet3Ring *ring, void 
> *buff)
> +static inline void vmxnet3_ring_write_curr_cell(PCIDevice *d, Vmxnet3Ring 
> *ring,
> +                                             void *buff)
> {
> -    vmw_shmem_write(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
> +    vmw_shmem_write(d, vmxnet3_ring_curr_cell_pa(ring), buff, 
> ring->cell_size);
> }
> 
> static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring)
> @@ -456,9 +459,9 @@ vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int 
> lidx, bool is_masked)
>     vmxnet3_update_interrupt_line_state(s, lidx);
> }
> 
> -static bool vmxnet3_verify_driver_magic(hwaddr dshmem)
> +static bool vmxnet3_verify_driver_magic(PCIDevice *d, hwaddr dshmem)
> {
> -    return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC);
> +    return (VMXNET3_READ_DRV_SHARED32(d, dshmem, magic) == 
> VMXNET3_REV1_MAGIC);
> }
> 
> #define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF)
> @@ -526,13 +529,14 @@ vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int 
> qidx)
> static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t 
> tx_ridx)
> {
>     struct Vmxnet3_TxCompDesc txcq_descr;
> +    PCIDevice *d = PCI_DEVICE(s);
> 
>     VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
> 
>     txcq_descr.txdIdx = tx_ridx;
>     txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
> 
> -    vmxnet3_ring_write_curr_cell(&s->txq_descr[qidx].comp_ring, &txcq_descr);
> +    vmxnet3_ring_write_curr_cell(d, &s->txq_descr[qidx].comp_ring, 
> &txcq_descr);
> 
>     /* Flush changes in TX descriptor before changing the counter value */
>     smp_wmb();
> @@ -688,13 +692,14 @@ vmxnet3_pop_next_tx_descr(VMXNET3State *s,
>                           uint32_t *descr_idx)
> {
>     Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring;
> +    PCIDevice *d = PCI_DEVICE(s);
> 
> -    vmxnet3_ring_read_curr_cell(ring, txd);
> +    vmxnet3_ring_read_curr_cell(d, ring, txd);
>     if (txd->gen == vmxnet3_ring_curr_gen(ring)) {
>         /* Only read after generation field verification */
>         smp_rmb();
>         /* Re-read to be sure we got the latest version */
> -        vmxnet3_ring_read_curr_cell(ring, txd);
> +        vmxnet3_ring_read_curr_cell(d, ring, txd);
>         VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring);
>         *descr_idx = vmxnet3_ring_curr_cell_idx(ring);
>         vmxnet3_inc_tx_consumption_counter(s, qidx);
> @@ -731,6 +736,7 @@ func_exit:
> static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx)
> {
>     struct Vmxnet3_TxDesc txd;
> +    PCIDevice *d = PCI_DEVICE(s);
>     uint32_t txd_idx;
>     uint32_t data_len;
>     hwaddr data_pa;
> @@ -746,7 +752,7 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int 
> qidx)
>             data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE;
>             data_pa = le64_to_cpu(txd.addr);
> 
> -            if (!vmxnet_tx_pkt_add_raw_fragment(s->tx_pkt,
> +            if (!vmxnet_tx_pkt_add_raw_fragment(d, s->tx_pkt,
>                                                 data_pa,
>                                                 data_len)) {
>                 s->skip_current_tx_pkt = true;
> @@ -773,7 +779,7 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int 
> qidx)
>             vmxnet3_complete_packet(s, qidx, txd_idx);
>             s->tx_sop = true;
>             s->skip_current_tx_pkt = false;
> -            vmxnet_tx_pkt_reset(s->tx_pkt);
> +            vmxnet_tx_pkt_reset(d, s->tx_pkt);
>         }
>     }
> }
> @@ -782,9 +788,11 @@ static inline void
> vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx,
>                            struct Vmxnet3_RxDesc *dbuf, uint32_t *didx)
> {
> +    PCIDevice *d = PCI_DEVICE(s);
> +
>     Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx];
>     *didx = vmxnet3_ring_curr_cell_idx(ring);
> -    vmxnet3_ring_read_curr_cell(ring, dbuf);
> +    vmxnet3_ring_read_curr_cell(d, ring, dbuf);
> }
> 
> static inline uint8_t
> @@ -797,12 +805,13 @@ static inline hwaddr
> vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen)
> {
>     uint8_t ring_gen;
> +    PCIDevice *d = PCI_DEVICE(s);
>     struct Vmxnet3_RxCompDesc rxcd;
> 
>     hwaddr daddr =
>         vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring);
> 
> -    cpu_physical_memory_read(daddr, &rxcd, sizeof(struct 
> Vmxnet3_RxCompDesc));
> +    pci_dma_read(d, daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc));
>     ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring);
> 
>     if (rxcd.gen != ring_gen) {
> @@ -1023,7 +1032,7 @@ nocsum:
> }
> 
> static void
> -vmxnet3_physical_memory_writev(const struct iovec *iov,
> +vmxnet3_physical_memory_writev(PCIDevice *d, const struct iovec *iov,
>                                size_t start_iov_off,
>                                hwaddr target_addr,
>                                size_t bytes_to_copy)
> @@ -1036,7 +1045,7 @@ vmxnet3_physical_memory_writev(const struct iovec *iov,
>             size_t chunk_len =
>                 MIN((curr_off + iov->iov_len) - start_iov_off, bytes_to_copy);
> 
> -            cpu_physical_memory_write(target_addr + copied,
> +            pci_dma_write(d, target_addr + copied,
>                                       iov->iov_base + start_iov_off - 
> curr_off,
>                                       chunk_len);
> 
> @@ -1055,6 +1064,7 @@ static bool
> vmxnet3_indicate_packet(VMXNET3State *s)
> {
>     struct Vmxnet3_RxDesc rxd;
> +    PCIDevice *d = PCI_DEVICE(s);
>     bool is_head = true;
>     uint32_t rxd_idx;
>     uint32_t rx_ridx = 0;
> @@ -1088,7 +1098,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
>         }
> 
>         chunk_size = MIN(bytes_left, rxd.len);
> -        vmxnet3_physical_memory_writev(data, bytes_copied,
> +        vmxnet3_physical_memory_writev(d, data, bytes_copied,
>                                        le64_to_cpu(rxd.addr), chunk_size);
>         bytes_copied += chunk_size;
>         bytes_left -= chunk_size;
> @@ -1096,7 +1106,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
>         vmxnet3_dump_rx_descr(&rxd);
> 
>         if (ready_rxcd_pa != 0) {
> -            cpu_physical_memory_write(ready_rxcd_pa, &rxcd, sizeof(rxcd));
> +            pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
>         }
> 
>         memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc));
> @@ -1127,7 +1137,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
>     if (ready_rxcd_pa != 0) {
>         rxcd.eop = 1;
>         rxcd.err = (bytes_left != 0);
> -        cpu_physical_memory_write(ready_rxcd_pa, &rxcd, sizeof(rxcd));
> +        pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
> 
>         /* Flush RX descriptor changes */
>         smp_wmb();
> @@ -1224,9 +1234,10 @@ static void vmxnet3_reset_mac(VMXNET3State *s)
> 
> static void vmxnet3_deactivate_device(VMXNET3State *s)
> {
> +    PCIDevice *d = PCI_DEVICE(s);
>     if (s->device_active) {
>         VMW_CBPRN("Deactivating vmxnet3...");
> -        vmxnet_tx_pkt_reset(s->tx_pkt);
> +        vmxnet_tx_pkt_reset(d, s->tx_pkt);
>         vmxnet_tx_pkt_uninit(s->tx_pkt);
>         vmxnet_rx_pkt_uninit(s->rx_pkt);
>         s->device_active = false;
> @@ -1246,7 +1257,9 @@ static void vmxnet3_reset(VMXNET3State *s)
> 
> static void vmxnet3_update_rx_mode(VMXNET3State *s)
> {
> -    s->rx_mode = VMXNET3_READ_DRV_SHARED32(s->drv_shmem,
> +    PCIDevice *d = PCI_DEVICE(s);
> +
> +    s->rx_mode = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem,
>                                            devRead.rxFilterConf.rxMode);
>     VMW_CFPRN("RX mode: 0x%08X", s->rx_mode);
> }
> @@ -1254,9 +1267,10 @@ static void vmxnet3_update_rx_mode(VMXNET3State *s)
> static void vmxnet3_update_vlan_filters(VMXNET3State *s)
> {
>     int i;
> +    PCIDevice *d = PCI_DEVICE(s);
> 
>     /* Copy configuration from shared memory */
> -    VMXNET3_READ_DRV_SHARED(s->drv_shmem,
> +    VMXNET3_READ_DRV_SHARED(d, s->drv_shmem,
>                             devRead.rxFilterConf.vfTable,
>                             s->vlan_table,
>                             sizeof(s->vlan_table));
> @@ -1277,8 +1291,10 @@ static void vmxnet3_update_vlan_filters(VMXNET3State 
> *s)
> 
> static void vmxnet3_update_mcast_filters(VMXNET3State *s)
> {
> +    PCIDevice *d = PCI_DEVICE(s);
> +
>     uint16_t list_bytes =
> -        VMXNET3_READ_DRV_SHARED16(s->drv_shmem,
> +        VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem,
>                                   devRead.rxFilterConf.mfTableLen);
> 
>     s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]);
> @@ -1295,10 +1311,10 @@ static void vmxnet3_update_mcast_filters(VMXNET3State 
> *s)
>     } else {
>         int i;
>         hwaddr mcast_list_pa =
> -            VMXNET3_READ_DRV_SHARED64(s->drv_shmem,
> +            VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem,
>                                       devRead.rxFilterConf.mfTablePA);
> 
> -        cpu_physical_memory_read(mcast_list_pa, s->mcast_list, list_bytes);
> +        pci_dma_read(d, mcast_list_pa, s->mcast_list, list_bytes);
>         VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len);
>         for (i = 0; i < s->mcast_list_len; i++) {
>             VMW_CFPRN("\t" VMXNET_MF, VMXNET_MA(s->mcast_list[i].a));
> @@ -1323,18 +1339,19 @@ static uint32_t 
> vmxnet3_get_interrupt_config(VMXNET3State *s)
> static void vmxnet3_fill_stats(VMXNET3State *s)
> {
>     int i;
> +    PCIDevice *d = PCI_DEVICE(s);
> 
>     if (!s->device_active)
>         return;
> 
>     for (i = 0; i < s->txq_num; i++) {
> -        cpu_physical_memory_write(s->txq_descr[i].tx_stats_pa,
> +        pci_dma_write(d, s->txq_descr[i].tx_stats_pa,
>                                   &s->txq_descr[i].txq_stats,
>                                   sizeof(s->txq_descr[i].txq_stats));
>     }
> 
>     for (i = 0; i < s->rxq_num; i++) {
> -        cpu_physical_memory_write(s->rxq_descr[i].rx_stats_pa,
> +        pci_dma_write(d, s->rxq_descr[i].rx_stats_pa,
>                                   &s->rxq_descr[i].rxq_stats,
>                                   sizeof(s->rxq_descr[i].rxq_stats));
>     }
> @@ -1343,8 +1360,9 @@ static void vmxnet3_fill_stats(VMXNET3State *s)
> static void vmxnet3_adjust_by_guest_type(VMXNET3State *s)
> {
>     struct Vmxnet3_GOSInfo gos;
> +    PCIDevice *d = PCI_DEVICE(s);
> 
> -    VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.misc.driverInfo.gos,
> +    VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, devRead.misc.driverInfo.gos,
>                             &gos, sizeof(gos));
>     s->rx_packets_compound =
>         (gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true;
> @@ -1364,13 +1382,14 @@ vmxnet3_dump_conf_descr(const char *name,
> static void vmxnet3_update_pm_state(VMXNET3State *s)
> {
>     struct Vmxnet3_VariableLenConfDesc pm_descr;
> +    PCIDevice *d = PCI_DEVICE(s);
> 
>     pm_descr.confLen =
> -        VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confLen);
> +        VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, 
> devRead.pmConfDesc.confLen);
>     pm_descr.confVer =
> -        VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confVer);
> +        VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, 
> devRead.pmConfDesc.confVer);
>     pm_descr.confPA =
> -        VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.pmConfDesc.confPA);
> +        VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, 
> devRead.pmConfDesc.confPA);
> 
>     vmxnet3_dump_conf_descr("PM State", &pm_descr);
> }
> @@ -1379,8 +1398,9 @@ static void vmxnet3_update_features(VMXNET3State *s)
> {
>     uint32_t guest_features;
>     int rxcso_supported;
> +    PCIDevice *d = PCI_DEVICE(s);
> 
> -    guest_features = VMXNET3_READ_DRV_SHARED32(s->drv_shmem,
> +    guest_features = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem,
>                                                devRead.misc.uptFeatures);
> 
>     rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM);
> @@ -1455,12 +1475,13 @@ static void vmxnet3_activate_device(VMXNET3State *s)
> {
>     int i;
>     static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1;
> +    PCIDevice *d = PCI_DEVICE(s);
>     hwaddr qdescr_table_pa;
>     uint64_t pa;
>     uint32_t size;
> 
>     /* Verify configuration consistency */
> -    if (!vmxnet3_verify_driver_magic(s->drv_shmem)) {
> +    if (!vmxnet3_verify_driver_magic(d, s->drv_shmem)) {
>         VMW_ERPRN("Device configuration received from driver is invalid");
>         return;
>     }
> @@ -1476,11 +1497,11 @@ static void vmxnet3_activate_device(VMXNET3State *s)
>     vmxnet3_update_pm_state(s);
>     vmxnet3_setup_rx_filtering(s);
>     /* Cache fields from shared memory */
> -    s->mtu = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.misc.mtu);
> +    s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu);
>     VMW_CFPRN("MTU is %u", s->mtu);
> 
>     s->max_rx_frags =
> -        VMXNET3_READ_DRV_SHARED16(s->drv_shmem, devRead.misc.maxNumRxSG);
> +        VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, devRead.misc.maxNumRxSG);
> 
>     if (s->max_rx_frags == 0) {
>         s->max_rx_frags = 1;
> @@ -1489,24 +1510,24 @@ static void vmxnet3_activate_device(VMXNET3State *s)
>     VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags);
> 
>     s->event_int_idx =
> -        VMXNET3_READ_DRV_SHARED8(s->drv_shmem, 
> devRead.intrConf.eventIntrIdx);
> +        VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, 
> devRead.intrConf.eventIntrIdx);
>     assert(vmxnet3_verify_intx(s, s->event_int_idx));
>     VMW_CFPRN("Events interrupt line is %u", s->event_int_idx);
> 
>     s->auto_int_masking =
> -        VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.autoMask);
> +        VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.autoMask);
>     VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking);
> 
>     s->txq_num =
> -        VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numTxQueues);
> +        VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numTxQueues);
>     s->rxq_num =
> -        VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues);
> +        VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numRxQueues);
> 
>     VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num);
>     vmxnet3_validate_queues(s);
> 
>     qdescr_table_pa =
> -        VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA);
> +        VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.misc.queueDescPA);
>     VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, 
> qdescr_table_pa);
> 
>     /*
> @@ -1522,25 +1543,25 @@ static void vmxnet3_activate_device(VMXNET3State *s)
> 
>         /* Read interrupt number for this TX queue */
>         s->txq_descr[i].intr_idx =
> -            VMXNET3_READ_TX_QUEUE_DESCR8(qdescr_pa, conf.intrIdx);
> +            VMXNET3_READ_TX_QUEUE_DESCR8(d, qdescr_pa, conf.intrIdx);
>         assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx));
> 
>         VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx);
> 
>         /* Read rings memory locations for TX queues */
> -        pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.txRingBasePA);
> -        size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.txRingSize);
> +        pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA);
> +        size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize);
> 
> -        vmxnet3_ring_init(&s->txq_descr[i].tx_ring, pa, size,
> +        vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size,
>                           sizeof(struct Vmxnet3_TxDesc), false);
>         VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring);
> 
>         s->max_tx_frags += size;
> 
>         /* TXC ring */
> -        pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.compRingBasePA);
> -        size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.compRingSize);
> -        vmxnet3_ring_init(&s->txq_descr[i].comp_ring, pa, size,
> +        pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, 
> conf.compRingBasePA);
> +        size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, 
> conf.compRingSize);
> +        vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size,
>                           sizeof(struct Vmxnet3_TxCompDesc), true);
>         VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring);
> 
> @@ -1551,7 +1572,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
>                sizeof(s->txq_descr[i].txq_stats));
> 
>         /* Fill device-managed parameters for queues */
> -        VMXNET3_WRITE_TX_QUEUE_DESCR32(qdescr_pa,
> +        VMXNET3_WRITE_TX_QUEUE_DESCR32(d, qdescr_pa,
>                                        ctrl.txThreshold,
>                                        VMXNET3_DEF_TX_THRESHOLD);
>     }
> @@ -1570,7 +1591,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
> 
>         /* Read interrupt number for this RX queue */
>         s->rxq_descr[i].intr_idx =
> -            VMXNET3_READ_TX_QUEUE_DESCR8(qd_pa, conf.intrIdx);
> +            VMXNET3_READ_TX_QUEUE_DESCR8(d, qd_pa, conf.intrIdx);
>         assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx));
> 
>         VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx);
> @@ -1578,18 +1599,18 @@ static void vmxnet3_activate_device(VMXNET3State *s)
>         /* Read rings memory locations */
>         for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) {
>             /* RX rings */
> -            pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.rxRingBasePA[j]);
> -            size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.rxRingSize[j]);
> -            vmxnet3_ring_init(&s->rxq_descr[i].rx_ring[j], pa, size,
> +            pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, 
> conf.rxRingBasePA[j]);
> +            size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, 
> conf.rxRingSize[j]);
> +            vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size,
>                               sizeof(struct Vmxnet3_RxDesc), false);
>             VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d",
>                       i, j, pa, size);
>         }
> 
>         /* RXC ring */
> -        pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.compRingBasePA);
> -        size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.compRingSize);
> -        vmxnet3_ring_init(&s->rxq_descr[i].comp_ring, pa, size,
> +        pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA);
> +        size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize);
> +        vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size,
>                           sizeof(struct Vmxnet3_RxCompDesc), true);
>         VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size);
> 
> @@ -1756,19 +1777,21 @@ static uint64_t 
> vmxnet3_get_command_status(VMXNET3State *s)
> static void vmxnet3_set_events(VMXNET3State *s, uint32_t val)
> {
>     uint32_t events;
> +    PCIDevice *d = PCI_DEVICE(s);
> 
>     VMW_CBPRN("Setting events: 0x%x", val);
> -    events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) | val;
> -    VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
> +    events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) | val;
> +    VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events);
> }
> 
> static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val)
> {
> +    PCIDevice *d = PCI_DEVICE(s);
>     uint32_t events;
> 
>     VMW_CBPRN("Clearing events: 0x%x", val);
> -    events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) & ~val;
> -    VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
> +    events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) & ~val;
> +    VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events);
> }
> 
> static void
> diff --git a/hw/net/vmxnet_tx_pkt.c b/hw/net/vmxnet_tx_pkt.c
> index 91e1e08..ec18e47 100644
> --- a/hw/net/vmxnet_tx_pkt.c
> +++ b/hw/net/vmxnet_tx_pkt.c
> @@ -17,13 +17,14 @@
> 
> #include "qemu/osdep.h"
> #include "hw/hw.h"
> -#include "vmxnet_tx_pkt.h"
> +#include "hw/pci/pci.h"
> +#include "net/checksum.h"
> #include "net/eth.h"
> +#include "net/net.h"
> +#include "net/tap.h"
> #include "qemu-common.h"
> #include "qemu/iov.h"
> -#include "net/checksum.h"
> -#include "net/tap.h"
> -#include "net/net.h"
> +#include "vmxnet_tx_pkt.h"
> 
> enum {
>     VMXNET_TX_PKT_VHDR_FRAG = 0,
> @@ -338,8 +339,8 @@ void vmxnet_tx_pkt_setup_vlan_header(struct VmxnetTxPkt 
> *pkt, uint16_t vlan)
>     }
> }
> 
> -bool vmxnet_tx_pkt_add_raw_fragment(struct VmxnetTxPkt *pkt, hwaddr pa,
> -    size_t len)
> +bool vmxnet_tx_pkt_add_raw_fragment(PCIDevice *d, struct VmxnetTxPkt *pkt,
> +    hwaddr pa, size_t len)
> {
>     hwaddr mapped_len = 0;
>     struct iovec *ventry;
> @@ -353,7 +354,7 @@ bool vmxnet_tx_pkt_add_raw_fragment(struct VmxnetTxPkt 
> *pkt, hwaddr pa,
>     ventry = &pkt->raw[pkt->raw_frags];
>     mapped_len = len;
> 
> -    ventry->iov_base = cpu_physical_memory_map(pa, &mapped_len, false);
> +    ventry->iov_base = pci_dma_map(d, pa, &mapped_len, 
> DMA_DIRECTION_TO_DEVICE);
>     ventry->iov_len = mapped_len;
>     pkt->raw_frags += !!ventry->iov_base;
> 
> @@ -390,7 +391,7 @@ void vmxnet_tx_pkt_dump(struct VmxnetTxPkt *pkt)
> #endif
> }
> 
> -void vmxnet_tx_pkt_reset(struct VmxnetTxPkt *pkt)
> +void vmxnet_tx_pkt_reset(PCIDevice *d, struct VmxnetTxPkt *pkt)
> {
>     int i;
> 
> @@ -415,7 +416,7 @@ void vmxnet_tx_pkt_reset(struct VmxnetTxPkt *pkt)
>     assert(pkt->raw);
>     for (i = 0; i < pkt->raw_frags; i++) {
>         assert(pkt->raw[i].iov_base);
> -        cpu_physical_memory_unmap(pkt->raw[i].iov_base, pkt->raw[i].iov_len,
> +        pci_dma_unmap(d, pkt->raw[i].iov_base, pkt->raw[i].iov_len,
>                                   false, pkt->raw[i].iov_len);
>         pkt->raw[i].iov_len = 0;
>     }
> diff --git a/hw/net/vmxnet_tx_pkt.h b/hw/net/vmxnet_tx_pkt.h
> index f51e98a..b61b181 100644
> --- a/hw/net/vmxnet_tx_pkt.h
> +++ b/hw/net/vmxnet_tx_pkt.h
> @@ -75,13 +75,14 @@ void vmxnet_tx_pkt_setup_vlan_header(struct VmxnetTxPkt 
> *pkt, uint16_t vlan);
> /**
>  * populate data fragment into pkt context.
>  *
> + * @d:              PCI device
>  * @pkt:            packet
>  * @pa:             physical address of fragment
>  * @len:            length of fragment
>  *
>  */
> -bool vmxnet_tx_pkt_add_raw_fragment(struct VmxnetTxPkt *pkt, hwaddr pa,
> -    size_t len);
> +bool vmxnet_tx_pkt_add_raw_fragment(struct PCIDevice *d,
> +    struct VmxnetTxPkt *pkt, hwaddr pa, size_t len);
> 
> /**
>  * fix ip header fields and calculate checksums needed.
> @@ -120,10 +121,11 @@ void vmxnet_tx_pkt_dump(struct VmxnetTxPkt *pkt);
> /**
>  * reset tx packet private context (needed to be called between packets)
>  *
> + * @d:              PCI device
>  * @pkt:            packet
>  *
>  */
> -void vmxnet_tx_pkt_reset(struct VmxnetTxPkt *pkt);
> +void vmxnet_tx_pkt_reset(PCIDevice *d, struct VmxnetTxPkt *pkt);
> 
> /**
>  * Send packet to qemu. handles sw offloads if vhdr is not supported.
> -- 
> 2.8.2
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]