qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 3/4] hw/i386: Introduce AMD IOMMU


From: David Kiarie
Subject: [Qemu-devel] [PATCH 3/4] hw/i386: Introduce AMD IOMMU
Date: Fri, 9 Oct 2015 05:53:56 +0300

From: David <address@hidden>

Introduce basic AMD IOMMU emulation in Qemu. IOMMU implements event logging and
host translation which should allow nested PCI passthrough.It also implemented
a very basic IOTLB implementation

Signed-off-by: David Kiarie <address@hidden>
---
 hw/i386/Makefile.objs |    1 +
 hw/i386/amd_iommu.c   | 1266 +++++++++++++++++++++++++++++++++++++++++++++++++
 hw/i386/amd_iommu.h   |  363 ++++++++++++++
 3 files changed, 1630 insertions(+)
 create mode 100644 hw/i386/amd_iommu.c
 create mode 100644 hw/i386/amd_iommu.h

diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
index c250deb..85fa90f 100644
--- a/hw/i386/Makefile.objs
+++ b/hw/i386/Makefile.objs
@@ -3,6 +3,7 @@ obj-y += multiboot.o
 obj-y += pc.o pc_piix.o pc_q35.o
 obj-y += pc_sysfw.o
 obj-y += intel_iommu.o
+obj-y += amd_iommu.o
 obj-$(CONFIG_XEN) += ../xenpv/ xen/
 
 obj-y += kvmvapic.o
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
new file mode 100644
index 0000000..092f64e
--- /dev/null
+++ b/hw/i386/amd_iommu.c
@@ -0,0 +1,1266 @@
+/*
+ * QEMU emulation of AMD IOMMU (AMD-Vi)
+ *
+ * Copyright (C) 2011 Eduard - Gabriel Munteanu
+ * Copyright (C) 2015 David Kiarie, <address@hidden>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Cache implementation inspired by hw/i386/intel_iommu.c
+ *
+ */
+#include "hw/i386/amd_iommu.h"
+
+#define DEBUG_AMD_IOMMU
+#ifdef DEBUG_AMD_IOMMU
+enum {
+    DEBUG_GENERAL, DEBUG_CAPAB, DEBUG_MMIO, DEBUG_ELOG,
+    DEBUG_CACHE, DEBUG_COMMAND, DEBUG_MMU
+};
+
+#define IOMMU_DBGBIT(x)   (1 << DEBUG_##x)
+static int iommu_dbgflags = IOMMU_DBGBIT(GENERAL) | IOMMU_DBGBIT(MMU) | 
IOMMU_DBGBIT(MMIO);
+//| IOMMU_DBGBIT(CAPAB) | IOMMU_DBGBIT(ELOG) | IOMMU_DBGBIT(CACHE) | 
IOMMU_DBGBIT(COMMAND);
+
+#define IOMMU_DPRINTF(what, fmt, ...) do { \
+    if (iommu_dbgflags & IOMMU_DBGBIT(what)) { \
+        fprintf(stderr, "(amd-iommu)%s: " fmt "\n", __func__, \
+                ## __VA_ARGS__); } \
+    } while (0)
+#else
+#define IOMMU_DPRINTF(what, fmt, ...) do {} while (0)
+#endif
+
+/* helper functions - FIXME - provide for reading one byte */
+static uint16_t amd_iommu_readw(AMDIOMMUState *s, hwaddr addr)
+{
+    return lduw_le_p(&s->mmior[addr]);
+}
+
+static uint32_t amd_iommu_readl(AMDIOMMUState *s, hwaddr addr)
+{
+    return ldl_le_p(&s->mmior[addr]);
+}
+
+static uint64_t amd_iommu_readq(AMDIOMMUState *s, hwaddr addr)
+{
+    return ldq_le_p(&s->mmior[addr]);
+}
+
+static void amd_iommu_writew(AMDIOMMUState *s, hwaddr addr, uint16_t val)
+{
+    stw_le_p(&s->mmior[addr], val);
+}
+
+static void amd_iommu_writel(AMDIOMMUState *s, hwaddr addr, uint32_t val)
+{
+    stl_le_p(&s->mmior[addr], val);
+}
+
+static void amd_iommu_writeq(AMDIOMMUState *s, hwaddr addr, uint64_t val)
+{
+    stq_le_p(&s->mmior[addr], val);
+}
+
+static void amd_iommu_log_event(AMDIOMMUState *s, uint16_t *evt)
+{
+    /* event logging not enabled */
+    if(!s->evtlog_enabled || *(uint64_t*)&s->mmior[MMIO_STATUS] 
+       | MMIO_STATUS_EVTLOG_OF){
+        return;
+    }
+
+    /* event log buffer full */
+    if(s->evtlog_tail >= s->evtlog_len) {
+        *(uint64_t*)&s->mmior[MMIO_STATUS] |= MMIO_STATUS_EVTLOG_OF;
+        /* generate interrupt */
+    }
+
+    if(dma_memory_write(&address_space_memory, s->evtlog_len + s->evtlog_tail,
+       &evt, EVENT_LEN)){
+        IOMMU_DPRINTF(ELOG, "error: fail to write at address 0x%"PRIx64
+                      " + offset 0x%"PRIx32, s->evtlog, s->evtlog_tail);
+    }
+
+     s->evtlog_tail += EVENT_LEN;
+     *(uint64_t*)&s->mmior[MMIO_STATUS] |= MMIO_STATUS_EVTLOG_INTR;
+}
+
+/* log an error encountered page-walking 
+ *
+ * @addr: virtual address in translation request
+ */
+static void amd_iommu_page_fault(AMDIOMMUState *s, uint16_t devid,
+                                 dma_addr_t addr, uint8_t info)
+{
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+    uint8_t status;
+
+    info |= EVENT_IOPF_I;
+
+    /* encode information */
+    *(uint16_t*)&evt[0] = devid;
+    *(uint16_t*)&evt[3] = info;
+    *(uint64_t*)&evt[4] = cpu_to_be64(addr);
+
+    /* log a page fault */
+    amd_iommu_log_event(s, evt);
+
+    /* Abort the translation */
+    status = pci_get_word(s->dev.config + PCI_STATUS);
+    pci_set_word(s->dev.config + PCI_STATUS, 
+                 status | PCI_STATUS_SIG_TARGET_ABORT);
+}
+/*
+ * log a master abort accessing device table 
+ *  @devtab : address of device table entry
+ *  @info : error flags
+ */
+static void amd_iommu_log_devtab_error(AMDIOMMUState *s, uint16_t devid,
+                                       dma_addr_t devtab, uint8_t info)
+{
+
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+    uint8_t status;
+
+    info |= EVENT_DEV_TAB_HW_ERROR;
+
+    /* encode information */
+    *(uint16_t*)&evt[0] = devid;
+    *(uint8_t*)&evt[3]  = info;
+    *(uint64_t*)&evt[4] = cpu_to_be64(devtab);
+
+    amd_iommu_log_event(s, evt);
+
+    /* Abort the translation */
+    status = pci_get_word(s->dev.config + PCI_STATUS);
+    pci_set_word(s->dev.config + PCI_STATUS,
+                 status | PCI_STATUS_SIG_TARGET_ABORT);
+}
+
+/* log a master abort encountered during a page-walk
+ *  @addr : address that couldn't be accessed
+ */
+static void amd_iommu_log_pagetab_error(AMDIOMMUState *s, uint16_t devid,
+                                        dma_addr_t addr, uint16_t info)
+{
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+    uint8_t status;
+
+    info |= EVENT_PAGE_TAB_HW_ERROR;
+
+    /* encode information */
+    *(uint16_t*)&evt[0] = devid;
+    *(uint8_t*)&evt[3]  = info;
+    *(uint64_t*)&evt[4] = (cpu_to_be64(addr) >> 3);
+
+    amd_iommu_log_event(s, evt);
+
+    /* Abort the translation */
+    status = pci_get_word(s->dev.config + PCI_STATUS);
+    pci_set_word(s->dev.config + PCI_STATUS,
+                status | PCI_STATUS_SIG_TARGET_ABORT);
+
+}
+
+/* log an event trying to access command buffer 
+ *   @addr : address that couldn't be accessed 
+ */
+static void amd_iommu_log_command_error(AMDIOMMUState *s, dma_addr_t addr)
+{
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+
+    /* encode information */
+    *(uint8_t*)&evt[3]  = (uint8_t)EVENT_COMMAND_HW_ERROR;
+    *(uint64_t*)&evt[4] = (cpu_to_be64(addr) >> 3);
+
+    amd_iommu_log_event(s, evt);
+
+    /* Abort the translation */
+    uint8_t status = pci_get_word(s->dev.config + PCI_STATUS);
+    pci_set_word(s->dev.config + PCI_STATUS,
+                 status | PCI_STATUS_SIG_TARGET_ABORT);
+}
+
+/* log an illegal comand event  
+ *   @addr : address of illegal command  
+ */
+static void amd_iommu_log_illegalcom_error(AMDIOMMUState *s, uint8_t info,
+                                           dma_addr_t addr)
+{
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+
+    /* encode information */
+    *(uint8_t*)&evt[3]  = (uint8_t)EVENT_ILLEGAL_COMMAND_ERROR;
+    *(uint64_t*)&evt[4] = (cpu_to_be64(addr) >> 3);
+
+    amd_iommu_log_event(s, evt);
+}
+
+/* log an error accessing device table
+ *
+ *  @devid : device owning the table entry
+ *  @devtab : address of device table entry
+ *  @info : error flags
+ */
+static void amd_iommu_log_illegaldevtab_error(AMDIOMMUState *s, uint16_t devid,
+                                              dma_addr_t addr, uint16_t info)
+{
+    IOMMU_DPRINTF(ELOG, "");
+
+    uint16_t evt[8];
+
+    info |= EVENT_ILLEGAL_DEVTAB_ENTRY;
+
+    *(uint16_t*)&evt[0] = devid;
+    *(uint8_t*)&evt[3]  = info;
+    *(uint64_t*)&evt[4] = (cpu_to_be64(addr) >> 3);
+
+    amd_iommu_log_event(s, evt);
+}
+
+static gboolean amd_iommu_uint64_equal(gconstpointer v1, gconstpointer v2)
+{
+    return *((const uint64_t *)v1) == *((const uint64_t *)v2);
+}
+
+static guint amd_iommu_uint64_hash(gconstpointer v)
+{
+    return (guint)*(const uint64_t *)v;
+}
+
+static IOMMUIOTLBEntry* amd_iommu_iotlb_lookup(AMDIOMMUState *s, hwaddr addr,
+                                             uint64_t devid)
+{
+    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
+                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
+
+    return g_hash_table_lookup(s->iotlb, &key);
+}
+
+static void amd_iommu_iotlb_reset(AMDIOMMUState *s)
+{
+    g_hash_table_remove_all(s->iotlb);
+}
+
+static gboolean amd_iommu_iotlb_remove_by_devid(gpointer key, gpointer value,
+                                                gpointer user_data)
+{
+    IOMMUIOTLBEntry *entry = (IOMMUIOTLBEntry *)value;
+    uint16_t devid = *(uint16_t*)user_data;
+    return entry->devid == devid;
+}
+
+static void amd_iommu_iotlb_remove_page(AMDIOMMUState *s, hwaddr addr,
+                                        uint64_t devid)
+{
+    uint64_t key = (addr >> IOMMU_PAGE_SHIFT_4K) |
+                   ((uint64_t)(devid) << IOMMU_DEVID_SHIFT);
+
+    g_hash_table_remove(s->iotlb, &key);
+}
+
+static void amd_iommu_invalidate_iotlb(AMDIOMMUState *s, uint64_t *cmd)
+{
+    /* extract device id */
+    uint16_t devid = cmd[2] & INVAL_DEV_ID_MASK;
+    /* if invalidation of more than one page requested */
+    if(INVAL_ALL(cmd[3])){
+            g_hash_table_foreach_remove(s->iotlb, 
amd_iommu_iotlb_remove_by_devid,
+                                        &devid);
+    } else {
+        hwaddr addr = (hwaddr)(cmd[3] & INVAL_ADDR_MASK);
+        amd_iommu_iotlb_remove_page(s, addr, devid);
+    }
+}
+
+static void amd_iommu_update_iotlb(AMDIOMMUState *s, uint16_t devid,
+                                   uint64_t gpa, uint64_t spa, uint64_t perms)
+{
+    IOMMUIOTLBEntry *entry = g_malloc(sizeof(*entry));
+    uint64_t *key = g_malloc(sizeof(key));
+    uint64_t gfn = spa >> IOMMU_PAGE_SHIFT_4K;
+
+    IOMMU_DPRINTF(CACHE, " update iotlb devid: %02x:%02x.%x gpa 0x%"PRIx64 
+                  "hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
+                  PCI_FUNC(devid), gpa, spa);
+
+    entry->gfn = gfn;
+    entry->perms = perms;
+    *key = gfn | ((uint64_t)(devid));
+    g_hash_table_replace(s->iotlb, key, entry);
+}
+
+/* execute a completion wait command */
+static void amd_iommu_completion_wait(AMDIOMMUState *s, uint8_t *cmd)
+{
+    unsigned int addr;
+
+    /* completion store */
+    if(cmd[0] & COM_COMPLETION_STORE_MASK){
+        addr = le64_to_cpu(*(uint64_t*)cmd) & COM_STORE_ADDRESS_MASK;
+        if(dma_memory_write(&address_space_memory, addr, cmd + 8, 8)){
+            IOMMU_DPRINTF(ELOG, "error: fail to write at address 0%x"PRIx64,
+                          addr);
+        }
+
+    }
+
+    /* set completion interrupt */
+    if (cmd[0] & COM_COMPLETION_INTR){
+        s->mmior[MMIO_STATUS] |= MMIO_STATUS_COMWAIT_INTR;
+    }
+}
+
+/* not honouring reserved bits is regarded as an illegal command */
+static void amd_iommu_cmdbuf_exec(AMDIOMMUState *s)
+{
+    unsigned type;
+    uint8_t cmd[IOMMU_COMMAND_SIZE];
+
+    IOMMU_DPRINTF(COMMAND, "");
+    memset(cmd, 0, IOMMU_COMMAND_SIZE);
+
+    if(dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head, cmd,
+       IOMMU_COMMAND_SIZE)){
+        IOMMU_DPRINTF(COMMAND, "error: fail to access memory at 0x%"PRIx64
+                      " + %"PRIu8, s->cmdbuf, s->cmdbuf_head);
+        amd_iommu_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
+        return;
+    }
+
+    type = cmd[CMDBUF_ID_BYTE] >> CMDBUF_ID_RSHIFT;
+    switch(type){
+        case CMD_COMPLETION_WAIT:
+            if(*(uint32_t*)&cmd[1] & COMPLETION_WAIT_RSVD){
+                amd_iommu_log_illegalcom_error(s, type,
+                                               s->cmdbuf + s->cmdbuf_head);
+                return;
+            }
+            /* pretend to wait for command execution to complete */
+            IOMMU_DPRINTF(COMMAND, "completion wait requested");
+            amd_iommu_completion_wait(s, cmd);
+            break;
+
+        case CMD_INVAL_DEVTAB_ENTRY:
+            /* This command should invalidate internal caches of which there 
isn't */
+            if(*(uint64_t*)&cmd[0] & CMD_INVAL_DEV_RSVD || 
*(uint64_t*)&cmd[2]){
+                amd_iommu_log_illegalcom_error(s, type, 
+                                               s->cmdbuf + s->cmdbuf_head);
+                return;
+            }
+
+            IOMMU_DPRINTF(COMMAND, "IOTLB entries for device --- invalidated");
+            break;
+
+        case CMD_INVAL_IOMMU_PAGES:
+            if(*(uint64_t*)&cmd[0] & INVAL_IOMMU_PAGES_RSVD 
+               || *(uint32_t*)&cmd[2] & 0x00000ff0){
+                amd_iommu_log_illegalcom_error(s, type, 
+                                               s->cmdbuf + s->cmdbuf_head);
+                return;
+            }
+
+            IOMMU_DPRINTF(COMMAND, "IOMMU pages invalidated");
+            break;
+
+        case CMD_INVAL_IOTLB_PAGES:
+            if(*(uint32_t*)&cmd[2] & INVAL_IOTLB_PAGES_RSVD){
+                amd_iommu_log_illegalcom_error(s, type,
+                                               s->cmdbuf + s->cmdbuf_head);
+                return;
+            }
+
+            amd_iommu_invalidate_iotlb(s, (uint64_t*)cmd);
+            IOMMU_DPRINTF(COMMAND, "IOTLB pages invalidated");
+            break;
+
+        case CMD_INVAL_INTR_TABLE:
+            if((*(uint64_t*)&cmd[0] & INVAL_INTR_TABLE_RSVD) 
+               || *(uint64_t*)&cmd[2]){
+                amd_iommu_log_illegalcom_error(s, type,
+                                               s->cmdbuf + s->cmdbuf_head);
+                return;
+            }
+
+            IOMMU_DPRINTF(COMMAND, "interrupt table invalidated");
+            break;
+
+        case CMD_PREFETCH_IOMMU_PAGES:
+            if((*(uint64_t*)&cmd[0] & PRF_IOMMU_PAGES_RSVD)
+              || (*(uint32_t*)&cmd[3] & 0x00000fd4)){
+                amd_iommu_log_illegalcom_error(s, type,
+                                               s->cmdbuf + s->cmdbuf_head);
+                return;
+            }
+
+            IOMMU_DPRINTF(COMMAND, "Pre-fetch of IOMMU pages requested");
+            break;
+
+        case CMD_COMPLETE_PPR_REQUEST:
+            if((*(uint64_t*)&cmd[0] & COMPLETE_PPR_RQ_RSVD)
+              || *(uint64_t*)&cmd[3] & 0xffff000000000000){
+                amd_iommu_log_illegalcom_error(s, type,
+                                               s->cmdbuf + s->cmdbuf_head);
+                return;
+            }
+
+            IOMMU_DPRINTF(COMMAND, "Execution of PPR queue requested");
+            break;
+
+        case CMD_INVAL_IOMMU_ALL:
+            if((*(uint64_t*)&cmd[0] & INVAL_IOMMU_ALL_RSVD) 
+               || *(uint64_t*)&cmd[2]){
+                amd_iommu_log_illegalcom_error(s, type,
+                                               s->cmdbuf + s->cmdbuf_head);
+                return;
+            }
+
+            amd_iommu_iotlb_reset(s);
+            IOMMU_DPRINTF(COMMAND, "Invalidation of all IOMMU cache 
requested");
+            break;
+
+        default:
+            IOMMU_DPRINTF(COMMAND, "unhandled command %d", type);
+            /* log illegal command */
+            amd_iommu_log_illegalcom_error(s, type,
+                                           s->cmdbuf + s->cmdbuf_head);
+            break;
+    }
+
+}
+
+static void amd_iommu_cmdbuf_run(AMDIOMMUState *s)
+{
+    IOMMU_DPRINTF(COMMAND, "");
+
+    uint64_t *mmio_cmdbuf_head = (uint64_t*)s->mmior + MMIO_COMMAND_HEAD;
+
+    if(!s->cmdbuf_enabled){
+        IOMMU_DPRINTF(COMMAND, "Command buffer not enabled");
+        return;
+    }
+
+    while(s->cmdbuf_head != s->cmdbuf_tail) {
+        /* check if there is work to do. */
+        IOMMU_DPRINTF(COMMAND, "COMMAND BUFFER head at %x COMMAND BUFFER "
+                               "tail at %x", s->cmdbuf_head, s->cmdbuf_tail);
+         amd_iommu_cmdbuf_exec(s);
+         s->cmdbuf_head += CMDBUF_ENTRY_SIZE;
+
+        /* wrap head pointer */
+        if (s->cmdbuf_head >= s->cmdbuf_len * CMDBUF_ENTRY_SIZE) {
+            s->cmdbuf_head = 0;
+        }
+    }
+
+    *mmio_cmdbuf_head = cpu_to_le64(s->cmdbuf_head);
+}
+
+/* System Software might never read from some of this fields but anyways */
+static uint64_t amd_iommu_mmio_read(void *opaque, hwaddr addr, unsigned size)
+{
+    AMDIOMMUState *s = opaque;
+
+    uint64_t val = -1;
+    if(addr + size > MMIO_SIZE) {
+        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIX64
+                      ", got 0x%"PRIx64 " %d", (uint64_t)MMIO_SIZE, addr, 
size);
+        return (uint64_t) - 1;
+    }
+
+    if(size == 2){
+        val = amd_iommu_readw(s, addr);
+    } else if(size == 4){
+        val = amd_iommu_readl(s, addr);
+    } else if(size == 8){
+        val = amd_iommu_readq(s, addr);
+    }
+
+    switch(addr & ~0x07){
+        case MMIO_DEVICE_TABLE:
+            IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_COMMAND_BASE:
+            IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_BASE read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_EVENT_BASE:
+            IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_CONTROL:
+            IOMMU_DPRINTF(MMIO, "MMIO_CONTROL read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_EXCL_BASE:
+            IOMMU_DPRINTF(MMIO, "MMIO_EXCL_BASE read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_EXCL_LIMIT:
+            IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_COMMAND_HEAD:
+            IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_COMMAND_TAIL:
+            IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_TAIL read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_EVENT_HEAD:
+            IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_EVENT_TAIL:
+            IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_STATUS:
+            IOMMU_DPRINTF(MMIO, "MMIO_STATUS read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        case MMIO_EXT_FEATURES:
+            IOMMU_DPRINTF(MMIO, "MMIO_EXT_FEATURES read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+            break;
+
+        default:
+            IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO read addr 0x%"PRIx64
+                          ", size %d offset 0x%"PRIx64, addr, size,
+                          addr & ~0x07);
+        }
+
+    return val;
+}
+
+static void iommu_handle_control_write(AMDIOMMUState *s)
+{
+    /* 
+     * read whatever is already written in case
+     * software is writing in chucks less than 8 bytes 
+     */
+    unsigned long control = amd_iommu_readq(s, MMIO_CONTROL);
+    s->enabled = !!(control & MMIO_CONTROL_IOMMUEN);
+
+    s->ats_enabled = !!(control & MMIO_CONTROL_HTTUNEN);
+    s->evtlog_enabled = s->enabled && !!(control & MMIO_CONTROL_EVENTLOGEN);
+
+    s->evtlog_intr = !!(control & MMIO_CONTROL_EVENTINTEN);
+    s->completion_wait_intr = !!(control & MMIO_CONTROL_COMWAITINTEN);
+    s->cmdbuf_enabled = s->enabled && !!(control & MMIO_CONTROL_CMDBUFLEN);
+
+    /* update the flags depending on the control register */
+    if(s->cmdbuf_enabled) {
+        (*(uint64_t*)&s->mmior[MMIO_STATUS]) |= MMIO_STATUS_CMDBUF_RUN;
+    } else {
+        (*(uint64_t*)&s->mmior[MMIO_STATUS]) &= ~MMIO_STATUS_CMDBUF_RUN;
+    }
+    if (s->evtlog_enabled) {
+        (*(uint64_t*)&s->mmior[MMIO_STATUS]) |= MMIO_STATUS_EVTLOG_RUN;
+    } else {
+        (*(uint64_t*)&s->mmior[MMIO_STATUS]) &= ~MMIO_STATUS_EVTLOG_RUN;
+    }
+
+    IOMMU_DPRINTF(MMIO, "MMIO_STATUS state 0x%"PRIx64, control);
+
+    amd_iommu_cmdbuf_run(s);
+}
+
+static inline void iommu_handle_devtab_write(AMDIOMMUState *s)
+{
+    uint64_t device_base = amd_iommu_readq(s, MMIO_DEVICE_TABLE);
+    s->devtab = (dma_addr_t)(device_base & MMIO_CMDBUF_BASE_MASK);
+
+    /* set device table length */
+    s->devtab_len = ((device_base & MMIO_DEVTAB_SIZE_MASK) + 1 *
+                    (MMIO_DEVTAB_SIZE_UNIT / MMIO_DEVTAB_ENTRY_SIZE));
+}
+
+static inline void iommu_handle_cmdhead_write(AMDIOMMUState *s)
+{
+    uint64_t cmdbuf = amd_iommu_readq(s, MMIO_COMMAND_HEAD);
+    s->cmdbuf_head = cmdbuf & MMIO_CMDBUF_HEAD_MASK;
+}
+
+static inline void iommu_handle_cmdbase_write(AMDIOMMUState *s)
+{
+    uint64_t command_base = amd_iommu_readq(s, MMIO_COMMAND_BASE);
+    s->cmdbuf = (dma_addr_t)(command_base & MMIO_CMDBUF_BASE_MASK);
+    s->cmdbuf_len = 1UL << (s->mmior[MMIO_CMDBUF_SIZE_BYTE] 
+                    & MMIO_CMDBUF_SIZE_MASK);
+    s->cmdbuf_head = s->cmdbuf_tail = 0;
+}
+
+static inline void iommu_handle_cmdtail_write(AMDIOMMUState *s)
+{
+    uint64_t val = amd_iommu_readq(s, MMIO_COMMAND_TAIL);
+    s->cmdbuf_tail = val & MMIO_CMDBUF_TAIL_MASK;
+}
+
+static inline void iommu_handle_excllim_write(AMDIOMMUState *s)
+{
+    uint64_t val = amd_iommu_readq(s, MMIO_EXCL_LIMIT);
+    s->excl_limit = (val & MMIO_EXCL_LIMIT_MASK ) | MMIO_EXCL_LIMIT_LOW;
+}
+
+static inline void iommu_handle_evtbase_write(AMDIOMMUState *s)
+{
+    uint64_t val = amd_iommu_readq(s, MMIO_EVENT_BASE);
+    s->evtlog = val & MMIO_EVTLOG_BASE_MASK;
+    s->evtlog_len = 1UL << (*(uint64_t*)&s->mmior[MMIO_EVTLOG_SIZE_BYTE]
+                    & MMIO_EVTLOG_SIZE_MASK);
+}           
+
+static inline void iommu_handle_evttail_write(AMDIOMMUState *s)
+{
+    uint64_t val = amd_iommu_readq(s, MMIO_EVENT_TAIL);
+    s->evtlog_tail = val & MMIO_EVTLOG_TAIL_MASK;
+}
+
+static inline void iommu_handle_evthead_write(AMDIOMMUState *s)
+{
+    uint64_t val = amd_iommu_readq(s, MMIO_EVENT_HEAD);
+    s->evtlog_head = val & MMIO_EVTLOG_HEAD_MASK;
+}
+
+static inline void iommu_handle_pprbase_write(AMDIOMMUState *s)
+{
+    uint64_t val = amd_iommu_readq(s, MMIO_PPR_BASE);
+    s->ppr_log = val & MMIO_PPRLOG_BASE_MASK;
+    s->pprlog_len = 1UL << (*(uint64_t*)&s->mmior[MMIO_PPRLOG_SIZE_BYTE]
+                    & MMIO_PPRLOG_SIZE_MASK);
+}
+
+static inline void iommu_handle_pprhead_write(AMDIOMMUState *s)
+{
+    uint64_t val = amd_iommu_readq(s, MMIO_PPR_HEAD);
+    s->pprlog_head = val & MMIO_PPRLOG_HEAD_MASK;
+}
+
+static inline void iommu_handle_pprtail_write(AMDIOMMUState *s)
+{
+    uint64_t val = amd_iommu_readq(s, MMIO_PPR_TAIL);
+    s->pprlog_tail = val & MMIO_PPRLOG_TAIL_MASK;
+}
+
+/* FIXME: something might go wrong if System Software writes in chunks
+ * of one byte but linux writes in chunks of 4 bytes so currently it
+ * works correctly with linux but will definitely be busted if software
+ * reads/writes 8 bytes
+ */
+static void amd_iommu_mmio_write(void *opaque, hwaddr addr, uint64_t val,
+                                 unsigned size)
+{
+
+    AMDIOMMUState *s = opaque;
+    unsigned long offset = addr & 0x07;
+
+    if(addr + size > MMIO_SIZE) {
+        IOMMU_DPRINTF(MMIO, "error: addr outside region: max 0x%"PRIx64
+                      ", got 0x%"PRIx64 " %d",(uint64_t)MMIO_SIZE, addr, size);
+        return;
+    }
+
+    switch(addr & ~0x07){
+        case MMIO_CONTROL:
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr,  val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+
+            IOMMU_DPRINTF(MMIO, "MMIO_CONTROL write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            iommu_handle_control_write(s);
+            break;
+
+        case MMIO_DEVICE_TABLE:
+            IOMMU_DPRINTF(MMIO, "MMIO_DEVICE_TABLE write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+
+            /* set device table address
+             *   This also suffers from inability to tell whether software 
+             *   is done writing
+             */
+            if(offset || (size == 8)){
+                iommu_handle_devtab_write(s);
+            }
+            break;
+
+        case MMIO_COMMAND_HEAD:
+            IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_HEAD write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+
+            iommu_handle_cmdhead_write(s);
+            amd_iommu_cmdbuf_run(s);
+            break;
+
+        case MMIO_COMMAND_BASE:
+            IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_BASE write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+
+            /* FIXME - make sure System Software has finished writing incase 
+             * it writes in chucks less than 8 bytes in a robust way.As for 
+             * now, this hacks works for the linux driver
+             */
+            if(offset || (size == 8)){
+                iommu_handle_cmdbase_write(s);
+            }
+            break;
+
+        case MMIO_COMMAND_TAIL:
+            IOMMU_DPRINTF(MMIO, "MMIO_COMMAND_TAIL write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+            iommu_handle_cmdtail_write(s);
+            break;
+
+        case MMIO_EVENT_BASE:
+            IOMMU_DPRINTF(MMIO, "MMIO_EVENT_BASE write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+            iommu_handle_evtbase_write(s);
+            break;
+
+        case MMIO_EVENT_HEAD:
+            IOMMU_DPRINTF(MMIO, "MMIO_EVENT_HEAD write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+            iommu_handle_evthead_write(s);
+            break;
+
+        case MMIO_EVENT_TAIL:
+            IOMMU_DPRINTF(MMIO, "MMIO_EVENT_TAIL write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+            iommu_handle_evttail_write(s);
+            break;
+
+        case MMIO_EXCL_LIMIT:
+            IOMMU_DPRINTF(MMIO, "MMIO_EXCL_LIMIT write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+            iommu_handle_excllim_write(s);
+            break;
+
+        /* PPR log base - unused for now */
+        case MMIO_PPR_BASE:
+            IOMMU_DPRINTF(MMIO, "MMIO_PPR_BASE write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+            iommu_handle_pprbase_write(s);
+            break;
+        /* PPR log head - also unused for now */
+        case MMIO_PPR_HEAD:
+            IOMMU_DPRINTF(MMIO, "MMIO_PPR_HEAD write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+            iommu_handle_pprhead_write(s);
+            break;
+        /* PPR log tail - unused for now */
+        case MMIO_PPR_TAIL:
+            IOMMU_DPRINTF(MMIO, "MMIO_PPR_TAIL write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            if(size == 2){
+                amd_iommu_writew(s, addr, val);
+            } else if(size == 4){
+                amd_iommu_writel(s, addr, val);
+            } else if(size == 8){
+                amd_iommu_writeq(s, addr, val);
+            }
+            iommu_handle_pprtail_write(s);
+            break;
+
+        default:
+            IOMMU_DPRINTF(MMIO, "UNHANDLED MMIO write addr 0x%"PRIx64
+                          ", size %d, val 0x%"PRIx64 ", offset 0x%"PRIx64,
+                          addr, size, val, offset);
+            ;
+    }
+
+}
+
+static inline uint64_t amd_iommu_get_perms(uint64_t entry)
+{
+    return (entry & (DEV_PERM_READ | DEV_PERM_WRITE )) >> DEV_PERM_SHIFT;
+}
+
+AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn)
+{
+    AMDIOMMUState *s = opaque;
+    AMDIOMMUAddressSpace **iommu_as;
+    int bus_num = pci_bus_num(bus);
+
+    /* just in case */
+    assert(0 <= bus_num && bus_num <= PCI_BUS_MAX);
+    assert(0 <= devfn && devfn <= PCI_DEVFN_MAX);
+
+    iommu_as = s->address_spaces[bus_num];
+
+    /* allocate memory during the first run */
+    if(!iommu_as) {
+        iommu_as = g_malloc0(sizeof(AMDIOMMUAddressSpace*) * PCI_DEVFN_MAX);
+        s->address_spaces[bus_num] = iommu_as;
+    }
+
+    /* set up IOMMU region */
+    if(!iommu_as[devfn]){
+        iommu_as[devfn] = g_malloc0(sizeof(AMDIOMMUAddressSpace));
+        iommu_as[devfn]->bus_num = (uint8_t)bus_num;
+        iommu_as[devfn]->devfn = (uint8_t)devfn;
+        iommu_as[devfn]->iommu_state = s;
+        
+        memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
+                                 &s->iommu_ops, "amd-iommu", UINT64_MAX);
+        address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu, 
+                           "amd-iommu");
+    }
+    return &iommu_as[devfn]->as;
+}
+
+/* validate a page table entry */
+static bool amd_iommu_validate_dte(AMDIOMMUState *s, uint16_t devid, uint64_t 
*dte)
+{
+    if((dte[0] & DTE_LOWER_QUAD_RESERVED) || (dte[1] & 
DTE_MIDDLE_QUAD_RESERVED) 
+        || (dte[2] & DTE_UPPER_QUAD_RESERVED) || dte[3]){
+        amd_iommu_log_illegaldevtab_error(s, devid, 
+                                s->devtab + devid * DEVTAB_ENTRY_SIZE, 0);
+        return false;
+    }
+    
+    return (dte[0] & DEV_VALID) && (dte[0] & DEV_TRANSLATION_VALID);
+}
+
+/* get a device table entry given the devid */
+static bool amd_iommu_get_dte(AMDIOMMUState *s, int devid, uint64_t *entry)
+{
+    uint32_t offset = devid * DEVTAB_ENTRY_SIZE;
+    if(dma_memory_read(&address_space_memory, s->devtab + offset, entry,
+                       DEVTAB_ENTRY_SIZE)){
+        IOMMU_DPRINTF(MMU, "error: fail to access Device Entry devtab 
0x%"PRIx64
+                      "offset 0x%"PRIx32, s->devtab, offset);
+        /* log ever accessing dte */
+        amd_iommu_log_devtab_error(s, devid, s->devtab + offset, 0);
+        return false;
+    }
+
+    if(!amd_iommu_validate_dte(s, devid, entry)){
+        
+        IOMMU_DPRINTF(MMU,
+                      "Pte entry at 0x%"PRIx64" is invalid", entry[0]);
+        return false;
+    }
+
+    return true;
+}
+
+/* get pte translation mode */
+static uint8_t get_pte_translation_mode(uint64_t pte)
+{
+    return (pte >> DEV_MODE_RSHIFT) & DEV_MODE_MASK;
+}
+
+static int amd_iommu_page_walk(AMDIOMMUAddressSpace *as, uint64_t *dte,
+                               IOMMUTLBEntry *ret, unsigned perms,
+                               hwaddr addr)
+{
+    unsigned level, present;
+    uint64_t pte, pte_addr;
+    uint64_t pte_perms;
+    pte = dte[0];
+
+    level = get_pte_translation_mode(pte);
+
+    if(level >= 7 || level == 0)
+        return -1;
+    
+    while(level > 0){
+        pte_perms = amd_iommu_get_perms(pte);
+        present = pte & 1;
+        if(!present || perms != (perms & pte_perms)){
+            amd_iommu_page_fault(as->iommu_state, as->devfn, addr, perms);
+            IOMMU_DPRINTF(MMU, "error: page fault accessing virtual addr 0x%"
+                          PRIx64, addr);
+            return -1;
+        }
+
+        /* go to the next lower level */
+        pte_addr = pte & DEV_PT_ROOT_MASK;
+        pte_addr += ((addr >> ( 9 * level)) & 0xff8);
+        pte = ldq_phys(&address_space_memory, pte_addr);
+        level = (pte >> DEV_MODE_RSHIFT) & DEV_MODE_MASK;
+    }
+
+    ret->iova = addr & IOMMU_PAGE_MASK_4K;
+    ret->translated_addr = (pte & DEV_PT_ROOT_MASK) & IOMMU_PAGE_MASK_4K;
+    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
+    ret->perm = IOMMU_RW;
+    return 0;
+}
+
+/* TODO : Mark addresses as Accessed and Dirty */
+static void amd_iommu_do_translate(AMDIOMMUAddressSpace *as, hwaddr addr,
+                                   bool is_write, IOMMUTLBEntry *ret)
+{
+    AMDIOMMUState *s = as->iommu_state;
+    /* TODO: Get the correct devid instead */
+    uint16_t devid = as->devfn;
+    IOMMUIOTLBEntry *iotlb_entry;
+    uint8_t err;
+    uint64_t entry[4];
+
+    /* try getting a cache entry first */
+    iotlb_entry = amd_iommu_iotlb_lookup(s, addr, as->devfn);
+
+    if(iotlb_entry){
+        IOMMU_DPRINTF(CACHE, "hit  iotlb devid: %02x:%02x.%x gpa 0x%"PRIx64
+                      "hpa 0x%"PRIx64, PCI_BUS_NUM(devid), PCI_SLOT(devid),
+                      PCI_FUNC(devid), addr, iotlb_entry->hwaddr);
+        ret->iova = addr & IOMMU_PAGE_MASK_4K;
+        ret->translated_addr = iotlb_entry->hwaddr;
+        ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
+        ret->perm = iotlb_entry->perms;
+        return;
+    } else {
+        if(!amd_iommu_get_dte(s, devid, entry))
+            goto out;
+        
+        err = amd_iommu_page_walk(as, entry, ret,
+                                  is_write ? IOMMU_PERM_WRITE : 
IOMMU_PERM_READ,
+                                  addr);
+        if(!err){
+            IOMMU_DPRINTF(MMU, "error: hardware error accessing page tables"
+                          "while translating addr 0x%"PRIx64, addr);
+            amd_iommu_log_pagetab_error(s, as->devfn, addr, 0);
+            goto out;
+        }
+        
+        amd_iommu_update_iotlb(s, as->devfn, addr, ret->translated_addr,
+                               ret->perm);
+        return;
+    }
+
+out:
+    ret->iova = addr;
+    ret->translated_addr = addr & IOMMU_PAGE_MASK_4K;
+    ret->addr_mask = ~IOMMU_PAGE_MASK_4K;
+    ret->perm = IOMMU_RW;
+    return;
+}
+
+static IOMMUTLBEntry amd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
+                                         bool is_write)
+{
+
+    IOMMU_DPRINTF(GENERAL, "");
+
+    AMDIOMMUAddressSpace *as = container_of(iommu, AMDIOMMUAddressSpace, 
iommu);
+    AMDIOMMUState *s = as->iommu_state;
+
+    IOMMUTLBEntry ret = {
+        .target_as = &address_space_memory,
+        .iova = addr,
+        .translated_addr = 0,
+        .addr_mask = ~(hwaddr)0,
+        .perm = IOMMU_NONE,
+    };
+
+    if(!s->enabled){
+        /* IOMMU disabled - corresponds to iommu=off not 
+         * failure to provide any parameter 
+         */
+        ret.iova = addr & IOMMU_PAGE_MASK_4K;
+        ret.translated_addr = addr & IOMMU_PAGE_MASK_4K;
+        ret.addr_mask = ~IOMMU_PAGE_MASK_4K;
+        ret.perm = IOMMU_RW;
+        return ret;
+    }
+
+    amd_iommu_do_translate(as, addr, is_write, &ret);
+
+    IOMMU_DPRINTF(MMU, "devid: %02x:%02x.%x gpa 0x%"PRIx64 " hpa 0x%"PRIx64, 
+                  as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn), addr,
+                  ret.translated_addr);
+
+    return ret;
+}
+
+static const MemoryRegionOps mmio_mem_ops = {
+    .read = amd_iommu_mmio_read,
+    .write = amd_iommu_mmio_write,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .valid = {
+        .min_access_size = 1,
+        .max_access_size = 8,
+        .unaligned = false,
+    },
+
+    .valid = {
+        .min_access_size = 8,
+        .max_access_size = 8,
+    },
+};
+
+static void amd_iommu_set_misc_capab(uint32_t host_va, uint32_t host_pa, 
+                                     uint32_t guest_va, uint32_t *reg)
+{
+    *reg |= MAX_VA_ADDR | MAX_PH_ADDR | MAX_GVA_ADDR;
+}
+
+static void amd_iommu_init(AMDIOMMUState *s)
+{
+    uint32_t capab_misc = 0;
+
+    amd_iommu_iotlb_reset(s);
+
+    s->iommu_ops.translate = amd_iommu_translate;
+
+    s->devtab_len = 0;
+    s->cmdbuf_len = 0;
+    s->cmdbuf_head = 0;
+    s->cmdbuf_tail = 0;
+    s->evtlog_head = 0;
+    s->evtlog_tail = 0;
+    s->excl_enabled = false;
+    s->excl_allow = false;
+    s->mmio_enabled = false;
+    s->enabled = false;
+    s->ats_enabled = false;
+    s->cmdbuf_enabled = false;
+
+    /* reset MMIO */
+    memset(s->mmior, 0, MMIO_SIZE);
+    amd_iommu_writeq(s, MMIO_EXT_FEATURES, EXT_FEATURES);
+    amd_iommu_set_misc_capab(MAX_PH_ADDR, MAX_VA_ADDR, MAX_GVA_ADDR, 
+                             &capab_misc);
+
+    /* reset device ident */
+    pci_config_set_vendor_id(s->dev.config, PCI_VENDOR_ID_AMD);
+    pci_config_set_device_id(s->dev.config, PCI_DEVICE_ID_RD890_IOMMU);
+    pci_config_set_prog_interface(s->dev.config, 00);
+    pci_config_set_class(s->dev.config, 0x0806);
+
+    /* add msi and hypertransport capabilities */
+    pci_add_capability(&s->dev, PCI_CAP_ID_MSI, 0, CAPAB_REG_SIZE);
+    pci_add_capability(&s->dev, PCI_CAP_ID_HT, 0, CAPAB_REG_SIZE);
+
+    /* reset IOMMU specific capabilities  */
+    pci_set_long(s->dev.config + s->capab_offset, CAPAB_FEATURES);
+    pci_set_long(s->dev.config + s->capab_offset + CAPAB_BAR_LOW,
+                 s->mmio.addr & ~(0xffff0000));
+    pci_set_long(s->dev.config + s->capab_offset + CAPAB_BAR_HIGH,
+                (s->mmio.addr & ~(0xffff)) >> 16);
+    pci_set_long(s->dev.config + s->capab_offset + CAPAB_RANGE, 0xff000000);
+    pci_set_long(s->dev.config + s->capab_offset + CAPAB_MISC, capab_misc);
+}
+
+/* I honestly don't know how to reserve MMIO */
+static void amd_iommu_mmio_map(AMDIOMMUState *s, hwaddr addr)
+{
+    if(s->mmio.addr == addr){
+        return;
+    }
+
+//    if(s->mmio.addr != (hwaddr)-1) {
+//        memory_region_del_subregion(get_system_memory(), &s->mmio);
+//    }
+
+    s->mmio.addr = addr;
+    memory_region_add_subregion(get_system_memory(), addr, &s->mmio);
+
+}
+
+static void amd_iommu_reset(DeviceState *dev)
+{
+    AMDIOMMUState *s = AMD_IOMMU_DEVICE(dev);
+
+    amd_iommu_init(s);
+}
+
+static void amd_iommu_realize(PCIDevice *dev, Error **error)
+{
+    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
+
+    s->capab_offset = pci_add_capability(dev, PCI_CAP_ID_SEC, 0, CAPAB_SIZE);
+
+    /* set up MMIO */
+    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amd-iommu",
+                         MMIO_SIZE);
+    amd_iommu_mmio_map(s, BUS_AMD_IOMMU_ADDR);
+
+    s->iotlb = g_hash_table_new_full(amd_iommu_uint64_hash, 
+                                     amd_iommu_uint64_equal, g_free, g_free);
+
+    amd_iommu_init(s);
+}
+
+static const VMStateDescription vmstate_amd_iommu = {
+    .name = "amd-iommu",
+    .fields  = (VMStateField[]) {
+        VMSTATE_PCI_DEVICE(dev, AMDIOMMUState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static Property amd_iommu_properties[] = {
+    DEFINE_PROP_UINT32("version", AMDIOMMUState, version, 2),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static void amd_iommu_uninit(PCIDevice *dev)
+{
+    AMDIOMMUState *s = container_of(dev, AMDIOMMUState, dev);
+
+    amd_iommu_iotlb_reset(s);
+}
+
+static void amd_iommu_class_init(ObjectClass *klass, void* data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+    k->realize = amd_iommu_realize;
+    k->exit = amd_iommu_uninit;
+
+    dc->reset = amd_iommu_reset;
+    dc->vmsd = &vmstate_amd_iommu;
+    dc->props = amd_iommu_properties;
+}
+
+static const TypeInfo amd_iommu = {
+    .name = TYPE_AMD_IOMMU_DEVICE,
+    .parent = TYPE_PCI_DEVICE,
+    .instance_size = sizeof(AMDIOMMUState),
+    .class_init = amd_iommu_class_init
+};
+
+static void amd_iommu_register_types(void)
+{
+    type_register_static(&amd_iommu);
+}
+
+type_init(amd_iommu_register_types);
diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
new file mode 100644
index 0000000..c67ec4a
--- /dev/null
+++ b/hw/i386/amd_iommu.h
@@ -0,0 +1,363 @@
+/*
+ * QEMU emulation of an AMD IOMMU (AMD-Vi)
+ *
+ * Copyright (C) 2011 Eduard - Gabriel Munteanu
+ * Copyright (C) 2015 David Kiarie, <address@hidden>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef AMD_IOMMU_H_
+#define AMD_IOMMU_H_
+
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "hw/sysbus.h"
+#include "sysemu/dma.h"
+#include "qemu/osdep.h"
+#include "qemu/event_notifier.h"
+
+/* Capability registers */
+#define CAPAB_HEADER            0x00
+#define   CAPAB_REV_TYPE        0x02
+#define   CAPAB_FLAGS           0x03
+#define CAPAB_BAR_LOW           0x04
+#define CAPAB_BAR_HIGH          0x08
+#define CAPAB_RANGE             0x0C
+#define CAPAB_MISC              0x10
+#define CAPAB_MISC1             0x14
+
+#define CAPAB_SIZE              0x18
+#define CAPAB_REG_SIZE          0x04
+
+/* Capability header data */
+#define CAPAB_FLAT_EXT          (1 << 28)
+#define CAPAB_EFR_SUP           (1 << 27)
+#define CAPAB_FLAG_NPCACHE      (1 << 26)
+#define CAPAB_FLAG_HTTUNNEL     (1 << 25)
+#define CAPAB_FLAG_IOTLBSUP     (1 << 24)
+#define CAPAB_INIT_REV          (1 << 19)
+#define CAPAB_INIT_TYPE         (3 << 16)
+#define CAPAB_INIT_REV_TYPE     (CAPAB_REV | CAPAB_TYPE)
+#define CAPAB_INIT_FLAGS        (CAPAB_FLAG_NPCACHE | CAPAB_FLAG_HTTUNNEL)
+#define CAPAB_INIT_MISC         ((64 << 15) | (48 << 8))
+#define CAPAB_BAR_MASK          (~((1UL << 14) - 1))
+
+/* MMIO registers */
+#define MMIO_DEVICE_TABLE       0x0000
+#define MMIO_COMMAND_BASE       0x0008
+#define MMIO_EVENT_BASE         0x0010
+#define MMIO_CONTROL            0x0018
+#define MMIO_EXCL_BASE          0x0020
+#define MMIO_EXCL_LIMIT         0x0028
+#define MMIO_EXT_FEATURES       0x0030
+#define MMIO_COMMAND_HEAD       0x2000
+#define MMIO_COMMAND_TAIL       0x2008
+#define MMIO_EVENT_HEAD         0x2010
+#define MMIO_EVENT_TAIL         0x2018
+#define MMIO_STATUS             0x2020
+#define MMIO_PPR_BASE           0x0038
+#define MMIO_PPR_HEAD           0x2030
+#define MMIO_PPR_TAIL           0x2038
+
+#define MMIO_SIZE               0x4000
+
+#define MMIO_DEVTAB_SIZE_MASK   ((1ULL << 12) - 1)
+#define MMIO_DEVTAB_BASE_MASK   (((1ULL << 52) - 1) & ~MMIO_DEVTAB_SIZE_MASK)
+#define MMIO_DEVTAB_ENTRY_SIZE  32
+#define MMIO_DEVTAB_SIZE_UNIT   4096
+
+/* some of this are similar but just for readability */
+#define MMIO_CMDBUF_SIZE_BYTE       (MMIO_COMMAND_BASE + 7)
+#define MMIO_CMDBUF_SIZE_MASK       0x0F
+#define MMIO_CMDBUF_BASE_MASK       MMIO_DEVTAB_BASE_MASK
+#define MMIO_CMDBUF_DEFAULT_SIZE    8
+#define MMIO_CMDBUF_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
+#define MMIO_CMDBUF_TAIL_MASK       MMIO_EVTLOG_HEAD_MASK
+
+#define MMIO_EVTLOG_SIZE_BYTE       (MMIO_EVENT_BASE + 7)
+#define MMIO_EVTLOG_SIZE_MASK       MMIO_CMDBUF_SIZE_MASK
+#define MMIO_EVTLOG_BASE_MASK       MMIO_CMDBUF_BASE_MASK
+#define MMIO_EVTLOG_DEFAULT_SIZE    MMIO_CMDBUF_DEFAULT_SIZE
+#define MMIO_EVTLOG_HEAD_MASK       (((1ULL << 19) - 1) & ~0x0F)
+#define MMIO_EVTLOG_TAIL_MASK       MMIO_EVTLOG_HEAD_MASK
+
+#define MMIO_PPRLOG_SIZE_BYTE       (MMIO_EVENT_BASE + 7)
+#define MMIO_PPRLOG_HEAD_MASK       MMIO_EVTLOG_HEAD_MASK
+#define MMIO_PPRLOG_TAIL_MASK       MMIO_EVTLOG_HEAD_MASK
+#define MMIO_PPRLOG_BASE_MASK       MMIO_EVTLOG_BASE_MASK
+#define MMIO_PPRLOG_SIZE_MASK       MMIO_EVTLOG_SIZE_MASK
+
+#define MMIO_EXCL_BASE_MASK         MMIO_DEVTAB_BASE_MASK
+#define MMIO_EXCL_ENABLED_MASK      (1ULL << 0)
+#define MMIO_EXCL_ALLOW_MASK        (1ULL << 1)
+#define MMIO_EXCL_LIMIT_MASK        MMIO_DEVTAB_BASE_MASK
+#define MMIO_EXCL_LIMIT_LOW         0xFFF
+
+#define MMIO_CONTROL_IOMMUEN        (1ULL << 0)
+#define MMIO_CONTROL_HTTUNEN        (1ULL << 1)
+#define MMIO_CONTROL_EVENTLOGEN     (1ULL << 2)
+#define MMIO_CONTROL_EVENTINTEN     (1ULL << 3)
+#define MMIO_CONTROL_COMWAITINTEN   (1ULL << 4)
+#define MMIO_CONTROL_CMDBUFLEN       (1ULL << 12)
+
+#define MMIO_STATUS_EVTLOG_OF       (1ULL << 0)
+#define MMIO_STATUS_EVTLOG_INTR     (1ULL << 1)
+#define MMIO_STATUS_COMWAIT_INTR    (1ULL << 2)
+#define MMIO_STATUS_EVTLOG_RUN      (1ULL << 3)
+#define MMIO_STATUS_CMDBUF_RUN      (1ULL << 4)
+
+#define CMDBUF_ID_BYTE              0x07
+#define CMDBUF_ID_RSHIFT            4
+#define CMDBUF_ENTRY_SIZE           0x10
+
+#define CMD_COMPLETION_WAIT         0x01
+#define CMD_INVAL_DEVTAB_ENTRY      0x02
+#define CMD_INVAL_IOMMU_PAGES       0x03
+#define CMD_INVAL_IOTLB_PAGES       0x04
+#define CMD_INVAL_INTR_TABLE        0x05
+#define CMD_PREFETCH_IOMMU_PAGES    0x06
+#define CMD_COMPLETE_PPR_REQUEST    0x07
+#define CMD_INVAL_IOMMU_ALL         0x08
+
+#define DEVTAB_ENTRY_SIZE           32
+
+/* Device table entry bits 0:63 */
+#define DEV_VALID                   (1ULL << 0)
+#define DEV_TRANSLATION_VALID       (1ULL << 1)
+#define DEV_MODE_MASK               0x7
+#define DEV_MODE_RSHIFT             9
+#define DEV_PT_ROOT_MASK            0xFFFFFFFFFF000
+#define DEV_PT_ROOT_RSHIFT          12
+#define DEV_PERM_SHIFT              61
+#define DEV_PERM_READ               (1ULL << 61)
+#define DEV_PERM_WRITE              (1ULL << 62)
+
+/* Device table entry bits 64:127 */
+#define DEV_DOMID_ID_MASK          ((1ULL << 16) - 1)
+#define DEV_IOTLB_SUPPORT           (1ULL << 17)
+#define DEV_SUPPRESS_PF             (1ULL << 18)
+#define DEV_SUPPRESS_ALL_PF         (1ULL << 19)
+#define DEV_IOCTL_MASK              (~3)
+#define DEV_IOCTL_RSHIFT            20
+#define   DEV_IOCTL_DENY            0
+#define   DEV_IOCTL_PASSTHROUGH     1
+#define   DEV_IOCTL_TRANSLATE       2
+#define DEV_CACHE                   (1ULL << 37)
+#define DEV_SNOOP_DISABLE           (1ULL << 38)
+#define DEV_EXCL                    (1ULL << 39)
+
+/* Event codes and flags, as stored in the info field */
+#define EVENT_ILLEGAL_DEVTAB_ENTRY  (0x1U << 12)
+#define EVENT_IOPF                  (0x2U << 12)
+#define   EVENT_IOPF_I              (1U << 3)
+#define   EVENT_IOPF_PR             (1U << 4)
+#define   EVENT_IOPF_RW             (1U << 5)
+#define   EVENT_IOPF_PE             (1U << 6)
+#define   EVENT_IOPF_RZ             (1U << 7)
+#define   EVENT_IOPF_TR             (1U << 8)
+#define EVENT_DEV_TAB_HW_ERROR      (0x3U << 12)
+#define EVENT_PAGE_TAB_HW_ERROR     (0x4U << 12)
+#define EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
+#define EVENT_COMMAND_HW_ERROR      (0x6U << 12)
+#define EVENT_IOTLB_INV_TIMEOUT     (0x7U << 12)
+#define EVENT_INVALID_DEV_REQUEST   (0x8U << 12)
+
+#define EVENT_LEN                   16
+
+#define IOMMU_PERM_READ             (1 << 0)
+#define IOMMU_PERM_WRITE            (1 << 1)
+#define IOMMU_PERM_RW               (IOMMU_PERM_READ | IOMMU_PERM_WRITE)
+
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU   0x20
+
+#define PCI_CAP_ID_SEC                0xf /* IOMMU capability header register  
   */
+#define PCI_CAP_ID_MMIO_LOW           0x0 /* MMIO base address low register    
   */
+#define PCI_CAP_ID_MMIO_HIGH          0x0 /* MMIO base address high register   
   */
+#define PCI_CAP_ID_RANGE              0x0 /* Device range register             
   */
+#define PCI_CAP_ID_MISC               0x0 /* miscellaneous Information 
register 0 */
+#define PCI_CAP_ID_MISC1              0x0 /* miscellaneous Information 
register 1 */
+
+#define FEATURE_PREFETCH            (1ULL<<0)
+#define FEATURE_PPR                 (1ULL<<1)
+#define FEATURE_NX                  (1ULL<<3)
+#define FEATURE_GT                  (1ULL<<4)
+#define FEATURE_IA                  (1ULL<<6)
+#define FEATURE_GA                  (1ULL<<7)
+#define FEATURE_HE                  (1ULL<<8)
+#define FEATURE_PC                  (1ULL<<9)
+
+/* reserved DTE bits */
+#define DTE_LOWER_QUAD_RESERVED  0x80300000000000fc
+#define DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
+#define DTE_UPPER_QUAD_RESERVED  0x08f0000000000000
+
+/* IOMMU paging mode */
+#define GATS_MODE                 (6ULL <<  12)
+#define HATS_MODE                 (6ULL <<  10)
+
+/* PCI SIG constants */
+#define PCI_BUS_MAX 256
+#define PCI_SLOT_MAX 32
+#define PCI_FUNC_MAX 8
+#define PCI_DEVFN_MAX 256
+
+/* utility */
+#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
+
+/* IOTLB */
+#define IOMMU_IOTLB_MAX_SIZE 1024
+#define IOMMU_DEVID_SHIFT    36
+
+/* extended feature support */
+#define EXT_FEATURES (FEATURE_PREFETCH | FEATURE_PPR | FEATURE_NX | FEATURE_GT 
| FEATURE_IA | FEATURE_GA | FEATURE_HE | GATS_MODE | HATS_MODE )
+
+/* capabilities header */
+#define CAPAB_FEATURES (CAPAB_FLAT_EXT | CAPAB_FLAG_NPCACHE | 
CAPAB_FLAG_IOTLBSUP | PCI_CAP_ID_SEC | CAPAB_INIT_TYPE | CAPAB_FLAG_HTTUNNEL |  
CAPAB_EFR_SUP)
+
+/* command constants */
+#define COM_STORE_ADDRESS_MASK 0xffffffffffff8
+#define COM_COMPLETION_STORE_MASK 0x1
+#define COM_COMPLETION_INTR 0x2
+#define COM_COMPLETION_DATA_OFF 0x8
+#define IOMMU_COMMAND_SIZE 0x10
+
+/* IOMMU default address */
+#define BUS_AMD_IOMMU_ADDR 0xfeb00000
+
+/* page management constants */
+#define IOMMU_PAGE_SHIFT 12
+#define IOMMU_PAGE_SIZE  (1ULL << IOMMU_PAGE_SHIFT)
+
+#define IOMMU_PAGE_SHIFT_4K 12
+#define IOMMU_PAGE_MASK_4K  (~((1ULL << IOMMU_PAGE_SHIFT_4K) - 1))
+#define IOMMU_PAGE_SHIFT_2M 21
+#define IOMMU_PAGE_MASK_2M  (~((1ULL << IOMMU_PAGE_SHIFT_2M) -1))
+#define IOMMU_PAGE_SHIFT_1G 30
+#define IOMMU_PAGE_MASK_1G (~((1ULL << IOMMU_PAGE_SHIFT_1G) - 1))
+
+#define PCI_SLOT(devfn)      (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn)      ((devfn) & 0x07)
+
+#define MAX_VA_ADDR          (48UL << 5)
+#define MAX_PH_ADDR          (40UL << 8)
+#define MAX_GVA_ADDR         (48UL << 15)
+
+/* invalidation command device id */
+#define INVAL_DEV_ID_SHIFT  32
+#define INVAL_DEV_ID_MASK   (~((1UL << INVAL_DEV_ID_SHIFT) - 1))
+
+/* invalidation address */
+#define INVAL_ADDR_MASK_SHIFT 12
+#define INVAL_ADDR_MASK     (~((1UL << INVAL_ADDR_MASK_SHIFT)-1))
+
+/* invalidation S bit mask */
+#define INVAL_ALL(val) ((val) & (0x1))
+
+/* reserved bits */
+#define COMPLETION_WAIT_RSVD    0x0ff000000
+#define CMD_INVAL_DEV_RSVD      0xffff00000fffffff
+#define INVAL_IOMMU_PAGES_RSVD  0xfff000000fff0000
+#define INVAL_IOTLB_PAGES_RSVD  0x00000ff4
+#define INVAL_INTR_TABLE_RSVD   0xffff00000fffffff
+#define PRF_IOMMU_PAGES_RSVD    0x00ff00000ff00000
+#define COMPLETE_PPR_RQ_RSVD    0xffff00000ff00000
+#define INVAL_IOMMU_ALL_RSVD    0x0fffffff00000000
+
+#define TYPE_AMD_IOMMU_DEVICE "amd-iommu"
+#define AMD_IOMMU_DEVICE(obj)\
+    OBJECT_CHECK(AMDIOMMUState, (obj), TYPE_AMD_IOMMU_DEVICE)
+
+typedef struct AMDIOMMUState AMDIOMMUState;
+
+typedef struct AMDIOMMUAddressSpace{
+    uint8_t bus_num;            /* bus number                           */
+    uint8_t devfn;              /* device function                      */
+    AMDIOMMUState *iommu_state; /* IOMMU - one per machine              */
+    MemoryRegion iommu;         /* Device's iommu region                */
+    AddressSpace as;            /* device's corresponding address space */
+}AMDIOMMUAddressSpace;
+
+struct AMDIOMMUState {
+    PCIDevice dev;               /* The PCI device itself        */
+
+    uint32_t version;
+
+    uint32_t capab_offset;       /* capability offset pointer    */
+    uint8_t *capab;              /* capabilities registers       */
+
+    bool enabled;                /* IOMMU enabled                */
+    bool ats_enabled;            /* address translation enabled  */
+    bool cmdbuf_enabled;         /* command buffer enabled       */
+    bool evtlog_enabled;         /* event log enabled            */
+    bool excl_enabled;
+
+    dma_addr_t devtab;           /* base address device table    */
+    size_t devtab_len;           /* device table length          */
+
+    dma_addr_t cmdbuf;           /* command buffer base address  */
+    uint64_t cmdbuf_len;         /* command buffer length        */
+    uint32_t cmdbuf_head;        /* current IOMMU read position  */
+    uint32_t cmdbuf_tail;        /* next Software write position */
+    bool completion_wait_intr;
+
+    dma_addr_t evtlog;           /* base address event log       */
+    bool evtlog_intr;
+    uint32_t evtlog_len;         /* event log length             */
+    uint32_t evtlog_head;        /* current IOMMU write position */
+    uint32_t evtlog_tail;        /* current Software read position */
+
+    /* unused for now */
+    dma_addr_t excl_base;        /* base DVA - IOMMU exclusion range */
+    dma_addr_t excl_limit;       /* limit of IOMMU exclusion range   */
+    bool excl_allow;             /* translate accesses to the exclusion range 
*/
+    bool excl_enable;            /* exclusion range enabled          */
+
+    dma_addr_t ppr_log;          /* base address ppr log */
+    uint32_t pprlog_len;         /* ppr log len  */
+    uint32_t pprlog_head;        /* ppr log head */
+    uint32_t pprlog_tail;        /* ppr log tail */
+
+    MemoryRegion mmio;           /* MMIO region                  */
+    uint8_t mmior[MMIO_SIZE];    /* read/write MMIO              */
+    uint8_t mmiow1c[MMIO_SIZE];  /* read/write 1 clear MMIO      */
+    bool mmio_enabled;
+
+    /* IOMMU function */
+    MemoryRegionIOMMUOps iommu_ops;
+
+    /* for each served device */
+    AMDIOMMUAddressSpace **address_spaces[PCI_BUS_MAX];
+
+    /* IOTLB */
+    GHashTable *iotlb;
+};
+
+typedef struct IOMMUIOTLBEntry{
+    uint64_t gfn;
+    uint64_t devid;
+    uint64_t perms;
+    dma_addr_t hwaddr;
+} IOMMUIOTLBEntry;
+
+typedef struct IOMMUTLBPageInvInfo {
+    uint64_t devid;
+    uint64_t gfn;
+    uint64_t mask;
+} IOMMUTLBPageInvInfo;
+
+AddressSpace *bridge_host_amd_iommu(PCIBus *bus, void *opaque, int devfn);
+
+#endif
-- 
2.1.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]