qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] Re: [PATCH 4/7] ide: use the PCI memory access interface


From: Michael S. Tsirkin
Subject: [Qemu-devel] Re: [PATCH 4/7] ide: use the PCI memory access interface
Date: Thu, 2 Sep 2010 08:19:11 +0300
User-agent: Mutt/1.5.20 (2009-12-10)

On Sat, Aug 28, 2010 at 05:54:55PM +0300, Eduard - Gabriel Munteanu wrote:
> Emulated PCI IDE controllers now use the memory access interface. This
> also allows an emulated IOMMU to translate and check accesses.
> 
> Map invalidation results in cancelling DMA transfers. Since the guest OS
> can't properly recover the DMA results in case the mapping is changed,
> this is a fairly good approximation.
> 
> Signed-off-by: Eduard - Gabriel Munteanu <address@hidden>
> ---
>  dma-helpers.c     |   46 +++++++++++++++++++++++++++++++++++++++++-----
>  dma.h             |   21 ++++++++++++++++++++-
>  hw/ide/core.c     |   15 ++++++++-------
>  hw/ide/internal.h |   39 +++++++++++++++++++++++++++++++++++++++
>  hw/ide/macio.c    |    4 ++--
>  hw/ide/pci.c      |    7 +++++++
>  6 files changed, 117 insertions(+), 15 deletions(-)
> 
> diff --git a/dma-helpers.c b/dma-helpers.c
> index 712ed89..a0dcdb8 100644
> --- a/dma-helpers.c
> +++ b/dma-helpers.c
> @@ -10,12 +10,36 @@
>  #include "dma.h"
>  #include "block_int.h"
>  
> -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
> +static void *qemu_sglist_default_map(void *opaque,
> +                                     QEMUSGInvalMapFunc *inval_cb,
> +                                     void *inval_opaque,
> +                                     target_phys_addr_t addr,
> +                                     target_phys_addr_t *len,
> +                                     int is_write)
> +{
> +    return cpu_physical_memory_map(addr, len, is_write);
> +}
> +
> +static void qemu_sglist_default_unmap(void *opaque,
> +                                      void *buffer,
> +                                      target_phys_addr_t len,
> +                                      int is_write,
> +                                      target_phys_addr_t access_len)
> +{
> +    cpu_physical_memory_unmap(buffer, len, is_write, access_len);
> +}
> +
> +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint,
> +                      QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap, void 
> *opaque)
>  {
>      qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
>      qsg->nsg = 0;
>      qsg->nalloc = alloc_hint;
>      qsg->size = 0;
> +
> +    qsg->map = map ? map : qemu_sglist_default_map;
> +    qsg->unmap = unmap ? unmap : qemu_sglist_default_unmap;
> +    qsg->opaque = opaque;
>  }
>  
>  void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
> @@ -73,12 +97,23 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
>      int i;
>  
>      for (i = 0; i < dbs->iov.niov; ++i) {
> -        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
> -                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
> -                                  dbs->iov.iov[i].iov_len);
> +        dbs->sg->unmap(dbs->sg->opaque,
> +                       dbs->iov.iov[i].iov_base,
> +                       dbs->iov.iov[i].iov_len, !dbs->is_write,
> +                       dbs->iov.iov[i].iov_len);
>      }
>  }
>  
> +static void dma_bdrv_cancel(void *opaque)
> +{
> +    DMAAIOCB *dbs = opaque;
> +
> +    bdrv_aio_cancel(dbs->acb);
> +    dma_bdrv_unmap(dbs);
> +    qemu_iovec_destroy(&dbs->iov);
> +    qemu_aio_release(dbs);
> +}
> +
>  static void dma_bdrv_cb(void *opaque, int ret)
>  {
>      DMAAIOCB *dbs = (DMAAIOCB *)opaque;
> @@ -100,7 +135,8 @@ static void dma_bdrv_cb(void *opaque, int ret)
>      while (dbs->sg_cur_index < dbs->sg->nsg) {
>          cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
>          cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
> -        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
> +        mem = dbs->sg->map(dbs->sg->opaque, dma_bdrv_cancel, dbs,
> +                           cur_addr, &cur_len, !dbs->is_write);
>          if (!mem)
>              break;
>          qemu_iovec_add(&dbs->iov, mem, cur_len);
> diff --git a/dma.h b/dma.h
> index f3bb275..d48f35c 100644
> --- a/dma.h
> +++ b/dma.h
> @@ -15,6 +15,19 @@
>  #include "hw/hw.h"
>  #include "block.h"
>  
> +typedef void QEMUSGInvalMapFunc(void *opaque);
> +typedef void *QEMUSGMapFunc(void *opaque,
> +                            QEMUSGInvalMapFunc *inval_cb,
> +                            void *inval_opaque,
> +                            target_phys_addr_t addr,
> +                            target_phys_addr_t *len,
> +                            int is_write);
> +typedef void QEMUSGUnmapFunc(void *opaque,
> +                             void *buffer,
> +                             target_phys_addr_t len,
> +                             int is_write,
> +                             target_phys_addr_t access_len);
> +
>  typedef struct {
>      target_phys_addr_t base;
>      target_phys_addr_t len;
> @@ -25,9 +38,15 @@ typedef struct {
>      int nsg;
>      int nalloc;
>      target_phys_addr_t size;
> +
> +    QEMUSGMapFunc *map;
> +    QEMUSGUnmapFunc *unmap;
> +    void *opaque;
>  } QEMUSGList;
>  
> -void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
> +void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint,
> +                      QEMUSGMapFunc *map, QEMUSGUnmapFunc *unmap,
> +                      void *opaque);
>  void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
>                       target_phys_addr_t len);
>  void qemu_sglist_destroy(QEMUSGList *qsg);
> diff --git a/hw/ide/core.c b/hw/ide/core.c
> index af52c2c..024a125 100644
> --- a/hw/ide/core.c
> +++ b/hw/ide/core.c
> @@ -436,7 +436,8 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
>      } prd;
>      int l, len;
>  
> -    qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1);
> +    qemu_sglist_init(&s->sg, s->nsector / (IDE_PAGE_SIZE / 512) + 1,
> +                     bm->map, bm->unmap, bm->opaque);
>      s->io_buffer_size = 0;
>      for(;;) {
>          if (bm->cur_prd_len == 0) {
> @@ -444,7 +445,7 @@ static int dma_buf_prepare(BMDMAState *bm, int is_write)
>              if (bm->cur_prd_last ||
>                  (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE)
>                  return s->io_buffer_size != 0;
> -            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
> +            bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8);
>              bm->cur_addr += 8;
>              prd.addr = le32_to_cpu(prd.addr);
>              prd.size = le32_to_cpu(prd.size);
> @@ -527,7 +528,7 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
>              if (bm->cur_prd_last ||
>                  (bm->cur_addr - bm->addr) >= IDE_PAGE_SIZE)
>                  return 0;
> -            cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
> +            bmdma_memory_read(bm, bm->cur_addr, (uint8_t *)&prd, 8);
>              bm->cur_addr += 8;
>              prd.addr = le32_to_cpu(prd.addr);
>              prd.size = le32_to_cpu(prd.size);
> @@ -542,11 +543,11 @@ static int dma_buf_rw(BMDMAState *bm, int is_write)
>              l = bm->cur_prd_len;
>          if (l > 0) {
>              if (is_write) {
> -                cpu_physical_memory_write(bm->cur_prd_addr,
> -                                          s->io_buffer + s->io_buffer_index, 
> l);
> +                bmdma_memory_write(bm, bm->cur_prd_addr,
> +                                   s->io_buffer + s->io_buffer_index, l);
>              } else {
> -                cpu_physical_memory_read(bm->cur_prd_addr,
> -                                          s->io_buffer + s->io_buffer_index, 
> l);
> +                bmdma_memory_read(bm, bm->cur_prd_addr,
> +                                  s->io_buffer + s->io_buffer_index, l);
>              }
>              bm->cur_prd_addr += l;
>              bm->cur_prd_len -= l;
> diff --git a/hw/ide/internal.h b/hw/ide/internal.h
> index 4165543..f686d38 100644
> --- a/hw/ide/internal.h
> +++ b/hw/ide/internal.h
> @@ -477,6 +477,24 @@ struct IDEDeviceInfo {
>  #define BM_CMD_START     0x01
>  #define BM_CMD_READ      0x08
>  
> +typedef void BMDMAInvalMapFunc(void *opaque);
> +typedef void BMDMARWFunc(void *opaque,
> +                         target_phys_addr_t addr,
> +                         uint8_t *buf,
> +                         target_phys_addr_t len,
> +                         int is_write);
> +typedef void *BMDMAMapFunc(void *opaque,
> +                           BMDMAInvalMapFunc *inval_cb,
> +                           void *inval_opaque,
> +                           target_phys_addr_t addr,
> +                           target_phys_addr_t *len,
> +                           int is_write);
> +typedef void BMDMAUnmapFunc(void *opaque,
> +                            void *buffer,
> +                            target_phys_addr_t len,
> +                            int is_write,
> +                            target_phys_addr_t access_len);
> +
>  struct BMDMAState {
>      uint8_t cmd;
>      uint8_t status;
> @@ -496,8 +514,29 @@ struct BMDMAState {
>      int64_t sector_num;
>      uint32_t nsector;
>      QEMUBH *bh;
> +
> +    BMDMARWFunc *rw;
> +    BMDMAMapFunc *map;
> +    BMDMAUnmapFunc *unmap;
> +    void *opaque;
>  };
>  
> +static inline void bmdma_memory_read(BMDMAState *bm,
> +                                     target_phys_addr_t addr,
> +                                     uint8_t *buf,
> +                                     target_phys_addr_t len)
> +{
> +    bm->rw(bm->opaque, addr, buf, len, 0);
> +}
> +
> +static inline void bmdma_memory_write(BMDMAState *bm,
> +                                      target_phys_addr_t addr,
> +                                      uint8_t *buf,
> +                                      target_phys_addr_t len)
> +{
> +    bm->rw(bm->opaque, addr, buf, len, 1);
> +}
> +

Here again, I am concerned about indirection and pointer chaising on data path.
Can we have an iommu pointer in the device, and do a fast path in case
there is no iommu?

>  static inline IDEState *idebus_active_if(IDEBus *bus)
>  {
>      return bus->ifs + bus->unit;
> diff --git a/hw/ide/macio.c b/hw/ide/macio.c
> index bd1c73e..962ae13 100644
> --- a/hw/ide/macio.c
> +++ b/hw/ide/macio.c
> @@ -79,7 +79,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int 
> ret)
>  
>      s->io_buffer_size = io->len;
>  
> -    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
> +    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL, NULL, 
> NULL);
>      qemu_sglist_add(&s->sg, io->addr, io->len);
>      io->addr += io->len;
>      io->len = 0;
> @@ -141,7 +141,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
>      s->io_buffer_index = 0;
>      s->io_buffer_size = io->len;
>  
> -    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
> +    qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL, NULL, 
> NULL);
>      qemu_sglist_add(&s->sg, io->addr, io->len);
>      io->addr += io->len;
>      io->len = 0;
> diff --git a/hw/ide/pci.c b/hw/ide/pci.c
> index 4d95cc5..5879044 100644
> --- a/hw/ide/pci.c
> +++ b/hw/ide/pci.c
> @@ -183,4 +183,11 @@ void pci_ide_create_devs(PCIDevice *dev, DriveInfo 
> **hd_table)
>              continue;
>          ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
>      }
> +
> +    for (i = 0; i < 2; i++) {
> +        d->bmdma[i].rw = (void *) pci_memory_rw;
> +        d->bmdma[i].map = (void *) pci_memory_map;
> +        d->bmdma[i].unmap = (void *) pci_memory_unmap;
> +        d->bmdma[i].opaque = dev;
> +    }
>  }

These casts show something is wrong with the API, IMO.

> -- 
> 1.7.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to address@hidden
> More majordomo info at  http://vger.kernel.org/majordomo-info.html



reply via email to

[Prev in Thread] Current Thread [Next in Thread]