qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 10/11] QEMU NVMe: Add logging


From: Keith Busch
Subject: [Qemu-devel] [PATCH 10/11] QEMU NVMe: Add logging
Date: Tue, 26 Feb 2013 17:47:13 -0700

Adds logging at various levels of detail throughout the code so that
a developer may see what is occuring within a device interacting with
their nvme drivers and utilities.

Signed-off-by: Keith Busch <address@hidden>
---
 hw/nvme.c |  266 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 260 insertions(+), 6 deletions(-)

diff --git a/hw/nvme.c b/hw/nvme.c
index 2945406..aec1ef7 100644
--- a/hw/nvme.c
+++ b/hw/nvme.c
@@ -45,12 +45,32 @@
 #include "block-common.h"
 #include "hw.h"
 #include "pci/msix.h"
+#include "pci/msi.h"
 #include "pci/pci.h"
 #include "qemu/bitmap.h"
 #include "qemu/bitops.h"
 
 #include "nvme.h"
 
+#define NVME_DEBUG
+#ifdef NVME_DEBUG
+enum {
+    IO_DBG_VER, IO_DBG, ADM_DBG, DBG, INFO, ERR
+};
+
+#define DBGBIT(x) (1 << x)
+int debug_flags = DBGBIT(IO_DBG) | DBGBIT(DBG) | DBGBIT(INFO) | DBGBIT(ERR) | 
DBGBIT(ADM_DBG);
+#define NVME_LOG(level, fmt, ...) \
+    do {\
+        if (debug_flags & DBGBIT(level)) { \
+            fprintf(stdout, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\
+        } \
+    } while (0)
+#else
+#define DBGBIT(x) 0
+#define NVME_LOG(level, fmt, ...) do {} while (0)
+#endif
+
 #define NVME_MAX_QS             PCI_MSIX_FLAGS_QSIZE
 #define NVME_MAX_QUEUE_ENTRIES  0xffff
 #define NVME_MAX_STRIDE         12
@@ -104,6 +124,8 @@ static void nvme_isr_notify(NvmeCtrl *n, NvmeCQueue *cq)
     if (cq->irq_enabled) {
         if (msix_enabled(&(n->dev))) {
             msix_notify(&(n->dev), cq->vector);
+        } else if (msi_enabled(&(n->dev))) {
+            msi_notify(&(n->dev), cq->vector);
         } else {
             qemu_irq_pulse(n->dev.irq[0]);
         }
@@ -119,9 +141,16 @@ static uint64_t *nvme_setup_discontig(NvmeCtrl *n, 
uint64_t prp_addr,
     uint16_t total_prps = DIV_ROUND_UP(queue_depth * entry_size, n->page_size);
     uint64_t *prp_list = g_malloc0(total_prps * sizeof(*prp_list));
 
+    NVME_LOG(DBG,
+        "queue depth:%u page size:%u entry size:%u prps per page:%u total:%u",
+        queue_depth, n->page_size, entry_size, prps_per_page, total_prps);
+
     for (i = 0; i < total_prps; i++) {
         if (i % prps_per_page == 0 && i < total_prps - 1) {
             if (!prp_addr || prp_addr & (n->page_size - 1)) {
+                NVME_LOG(ERR,
+                    "invalid prp list address for discontig queue:%"PRIx64"",
+                    prp_addr);
                 g_free(prp_list);
                 return NULL;
             }
@@ -129,7 +158,10 @@ static uint64_t *nvme_setup_discontig(NvmeCtrl *n, 
uint64_t prp_addr,
             prp_addr = prp[prps_per_page - 1];
         }
         prp_list[i] = prp[i % prps_per_page];
+        NVME_LOG(DBG, "map prp:%"PRIx64"", prp_list[i]);
         if (!prp_list[i] || prp_list[i] & (n->page_size - 1)) {
+            NVME_LOG(ERR, "invalid prp for discontig queue:%"PRIx64"",
+                prp_list[i]);
             g_free(prp_list);
             return NULL;
         }
@@ -143,6 +175,9 @@ static hwaddr nvme_discontig(uint64_t *dma_addr, uint16_t 
page_size,
     uint16_t entries_per_page = page_size / entry_size;
     uint16_t prp_index = queue_idx / entries_per_page;
     uint16_t index_in_prp = queue_idx % entries_per_page;
+    NVME_LOG(IO_DBG,
+        "dma_addr:%"PRIx64" page size:%u queue index:%u entry size:%u",
+        *dma_addr, page_size, queue_idx, entry_size);
     return dma_addr[prp_index] + index_in_prp * entry_size;
 }
 
@@ -155,13 +190,19 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t 
prp1, uint64_t prp2,
 
     qemu_sglist_init(qsg, num_prps, pci_dma_context(&n->dev));
     if (!prp1) {
+        NVME_LOG(ERR, "null prp1");
         goto unmap;
     }
 
+    NVME_LOG(IO_DBG,
+        "ctrl:%u page:%u prp1:%"PRIx64" prp2:%"PRIx64" len:%u",
+        n->instance, n->page_size, prp1, prp2, len);
+
     qemu_sglist_add(qsg, prp1, trans_len);
     len -= trans_len;
     if (len) {
         if (!prp2) {
+            NVME_LOG(ERR, "null prp2");
             goto unmap;
         }
         if (len > n->page_size) {
@@ -175,6 +216,9 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t 
prp1, uint64_t prp2,
             while (len != 0) {
                 if (i == n->max_prp_ents - 1 && len > n->page_size) {
                     if (!prp_list[i] || prp_list[i] & (n->page_size - 1)) {
+                        NVME_LOG(ERR,
+                            "null or unaligned prp chain:%u entry %"PRIx64"", 
i,
+                            prp_list[i]);
                         goto unmap;
                     }
                     nents = (len + n->page_size - 1) >> n->page_bits;
@@ -184,9 +228,13 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t 
prp1, uint64_t prp2,
                     i = 0;
                 }
                 if (!prp_list[i] || prp_list[i] & (n->page_size - 1)) {
+                    NVME_LOG(ERR,
+                        "null or unaligned prp list:%u entry %"PRIx64"",
+                        i, prp_list[i]);
                     goto unmap;
                 }
 
+                NVME_LOG(IO_DBG, "prp[%u]:%"PRIx64"", i, prp_list[i]);
                 trans_len = MIN(len, n->page_size);
                 qemu_sglist_add(qsg, prp_list[i], trans_len);
                 len -= trans_len;
@@ -194,6 +242,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, uint64_t 
prp1, uint64_t prp2,
             }
         } else {
             if (prp2 & (n->page_size - 1)) {
+                NVME_LOG(ERR, "prp2 alignment");
                 goto unmap;
             }
             qemu_sglist_add(qsg, prp2, len);
@@ -209,6 +258,10 @@ static uint16_t nvme_dma_prp(uint64_t prp1, uint64_t prp2, 
uint32_t len,
     NvmeCtrl *n, uint8_t *ptr, DMADirection dir)
 {
     QEMUSGList qsg;
+
+    NVME_LOG(IO_DBG,
+        "instance:%d prp1:%"PRIx64" prp2:%"PRIx64" len:%u ptr:%p dir:%d",
+        n->instance, prp1, prp2, len, ptr, dir);
     if (nvme_map_prp(&qsg, prp1, prp2, len, n)) {
         return NVME_INVALID_FIELD | NVME_DNR;
     }
@@ -237,6 +290,8 @@ static void nvme_post_cqes(void *opaque)
     int coalesce = (n->features.int_vector_config[cq->vector] >> 16) & 1;
     int thresh = NVME_INTC_THR(n->features.int_coalescing) + 1;
 
+    NVME_LOG(IO_DBG, "ctrl:%d cq:%u thresh:%d coalesce:%d", n->instance, 
cq->id,
+        thresh, coalesce);
     QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
         hwaddr addr;
         if (nvme_cq_full(cq)) {
@@ -245,6 +300,10 @@ static void nvme_post_cqes(void *opaque)
    
         QTAILQ_REMOVE(&cq->req_list, req, entry);
         sq = req->sq;
+
+        NVME_LOG(IO_DBG, "ctrl:%u cq:%u sq:%u cid:%d cq head:%u cq tail:%u",
+            n->instance, cq->id, sq->id, req->cqe.cid, cq->head, cq->tail);
+
         req->cqe.status |= cq->phase;
         req->cqe.sq_id = sq->id;
         req->cqe.sq_head = sq->head;
@@ -255,6 +314,10 @@ static void nvme_post_cqes(void *opaque)
                 n->cqe_size);
         }
         nvme_inc_cq_tail(cq);
+
+        NVME_LOG(IO_DBG_VER, "ctrl:%u cq:%u cq tail:%u addr:%"PRIx64"",
+            n->instance, cq->id, cq->tail, addr);
+
         pci_dma_write(&n->dev, addr, (void *)&req->cqe, sizeof(req->cqe));
         QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
         if (coalesce && ++processed % thresh == 0) {
@@ -269,6 +332,8 @@ static void nvme_post_cqes(void *opaque)
 
 static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
 {
+    NVME_LOG(IO_DBG, "cq:%u sq:%d cid:%u req:%p", cq->id, req->sq->id,
+        req->cqe.cid, req);
     assert(cq->id == req->sq->cqid);
     QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
     QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
@@ -298,6 +363,8 @@ static void nvme_enqueue_event(NvmeCtrl *n, uint8_t 
event_type,
     event->result.event_info = event_info;
     event->result.log_page   = log_page;
     QSIMPLEQ_INSERT_TAIL(&(n->aer_queue), event, entry);
+    NVME_LOG(INFO, "ctrl:%u type:%x info:%x page:%x", n->instance,
+        event_type, event_info, log_page);
     qemu_mod_timer(n->aer_timer, qemu_get_clock_ns(vm_clock) + 10000);
 }
 
@@ -307,6 +374,10 @@ static void nvme_aer_process_cb(void *param)
     NvmeRequest *req;
     AerResult *result;
     AsyncEvent *event, *next;;
+
+    NVME_LOG(DBG, "ctrl:%u outstanding aers:%u mask:%x queue empty:%u",
+        n->instance, n->outstanding_aers, n->aer_mask,
+        QSIMPLEQ_EMPTY(&n->aer_queue));
     QSIMPLEQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) {
         if (n->outstanding_aers <= 0) {
             break;
@@ -380,10 +451,11 @@ static void nvme_rw_cb(void *opaque, int ret)
     NvmeCtrl *n = sq->ctrl;
     NvmeCQueue *cq = n->cq[sq->cqid];
     NvmeNamespace *ns = req->ns;
-    n = sq->ctrl;
-    cq = n->cq[sq->cqid];
-    qemu_sglist_destroy(&req->qsg);
 
+    NVME_LOG(IO_DBG_VER,
+        "ctrl:%u sq:%u cid:%x nsid:%u nlb:%u slba:%"PRIu64" ret:%d",
+        n->instance, sq->id, req->cqe.cid, ns->id, req->nlb, req->slba, ret);
+    qemu_sglist_destroy(&req->qsg);
     nvme_update_stats(ns, req->nlb, req->rw);
     if (!req->rw) {
         nvme_update_ns_util(ns, req->slba, req->nlb);
@@ -392,6 +464,8 @@ static void nvme_rw_cb(void *opaque, int ret)
     if (!ret) {
         req->cqe.status = NVME_SUCCESS << 1;
     } else {
+        NVME_LOG(ERR, "nsid:%u nlb:%u slba:%"PRIu64" ret:%d", ns->id, req->nlb,
+            req->slba, ret);
         req->cqe.status = NVME_INTERNAL_DEV_ERROR << 1;
     }
     nvme_enqueue_req_completion(cq, req);
@@ -407,12 +481,19 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, 
NvmeCmd *cmd,
     int data_dir = rw->opcode == NVME_CMD_WRITE ? 0 : 1;
     uint16_t ret;
 
+    NVME_LOG(IO_DBG,
+        "nsid:%u slba:%"PRIu64" nlb:%u data size:%"PRIu64"",
+        ns->id, rw->slba, rw->nlb, data_size);
     if ((rw->slba + rw->nlb) > ns->id_ns.nsze) {
+        NVME_LOG(ERR, "range error, slba:%ld nlb:%u size:%ld", rw->slba,
+            rw->nlb, ns->id_ns.nsze);
         nvme_set_error_page(n, req->sq->id, cmd->cid, NVME_LBA_RANGE,
             offsetof(NvmeRwCmd, slba), rw->slba + rw->nlb, ns->id);
         return NVME_LBA_RANGE | NVME_DNR;
     }
     if (n->id_ctrl.mdts && data_size > n->page_size * (1 << n->id_ctrl.mdts)) {
+        NVME_LOG(ERR, "transfer size error, mdts:%u data size:%"PRIu64"",
+            n->id_ctrl.mdts, data_size);
         nvme_set_error_page(n, req->sq->id, cmd->cid, NVME_INVALID_FIELD,
             offsetof(NvmeRwCmd, nlb), rw->slba + rw->nlb, ns->id);
         return NVME_INVALID_FIELD | NVME_DNR;
@@ -431,6 +512,10 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, 
NvmeCmd *cmd,
         req->aiocb = data_dir ?
             dma_bdrv_read(n->conf.bs, &req->qsg, slba, nvme_rw_cb, req) :
             dma_bdrv_write(n->conf.bs, &req->qsg, slba, nvme_rw_cb, req);
+        NVME_LOG(IO_DBG,
+            "req:%p aiocb:%p ns:%u sq:%u cid:%x nlb:%u slba:%"PRIu64" "\
+            "aio slba:%"PRIu64" aio nlb:%u", req, req->aiocb, ns->id,
+            req->sq->id, req->cqe.cid, rw->nlb, rw->slba, slba, nlb);
 
         ret = NVME_NO_COMPLETE;
     }
@@ -457,6 +542,7 @@ static void nvme_dsm_cb(void *opaque, int ret)
     NvmeCtrl *n = sq->ctrl;
     NvmeCQueue *cq = n->cq[sq->cqid];
 
+    NVME_LOG(IO_DBG, "ctrl:%u ret:%d", n->instance, ret);
     if (ret && !req->cqe.status) {
         req->cqe.status = NVME_INTERNAL_DEV_ERROR << 1;
     }
@@ -474,6 +560,7 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeNamespace *ns, 
NvmeCmd *cmd,
     uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
     NvmeDsmRange range[nr];
 
+    NVME_LOG(IO_DBG, "ctrl:%u nsid:%u", n->instance, ns->id);
     if (nvme_dma_prp(cmd->prp1, cmd->prp2, sizeof(range), n, (uint8_t *)range,
             DMA_DIRECTION_TO_DEVICE)) {
         nvme_set_error_page(n, req->sq->id, cmd->cid, NVME_INVALID_FIELD,
@@ -494,6 +581,9 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeNamespace *ns, 
NvmeCmd *cmd,
                 req->aio_count -= (nr - i);
                 nvme_set_error_page(n, req->sq->id, cmd->cid, NVME_LBA_RANGE,
                     offsetof(NvmeCmd, cdw10), slba + nlb, ns->id);
+                NVME_LOG(ERR,
+                    "range error, slba:%"PRIu64" nlb:%u size:%"PRIu64"",
+                    slba, nlb, ns->id_ns.ncap);
                 if (req->aio_count) {
                     req->cqe.status = NVME_LBA_RANGE | NVME_DNR;
                     break;
@@ -522,6 +612,7 @@ static void nvme_flush_cb(void *opaque, int ret)
     NvmeCtrl *n = sq->ctrl;
     NvmeCQueue *cq = n->cq[sq->cqid];
 
+    NVME_LOG(IO_DBG, "ctrl:%u ret:%d", n->instance, ret);
     if (!ret) {
         req->cqe.status = NVME_SUCCESS << 1;
     } else {
@@ -533,6 +624,7 @@ static void nvme_flush_cb(void *opaque, int ret)
 static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
     NvmeRequest *req)
 {
+    NVME_LOG(IO_DBG, "ctrl:%u nsid:%u", n->instance, ns->id);
     req->ns = ns;
     bdrv_aio_flush(n->conf.bs, nvme_flush_cb, req);
     return NVME_NO_COMPLETE;
@@ -542,12 +634,15 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, 
NvmeRequest *req)
 {
     NvmeNamespace *ns;
     if (cmd->nsid == 0 || cmd->nsid > n->num_namespaces) {
+        NVME_LOG(ERR, "invalid nsid:%u", cmd->nsid);
         nvme_set_error_page(n, req->sq->id, cmd->cid, NVME_INVALID_NSID,
                 offsetof(NvmeCmd, nsid), 0, cmd->nsid);
         return NVME_INVALID_NSID | NVME_DNR;
     }
 
     ns = &n->namespaces[cmd->nsid - 1];
+    NVME_LOG(IO_DBG, "ctrl:%u nsid:%u opcode:%x", n->instance, ns->id,
+        cmd->opcode);
     switch (cmd->opcode) {
     case NVME_CMD_FLUSH:
         return nvme_flush(n, ns, cmd, req);
@@ -583,10 +678,13 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
     NvmeSQueue *sq;
 
     if (!c->qid || nvme_check_sqid(n, c->qid)) {
+        NVME_LOG(ERR, "invalid sq:%u", c->qid);
         return NVME_INVALID_QID | NVME_DNR;
     }
 
     sq = n->sq[c->qid];
+    NVME_LOG(INFO, "ctrl:%u sq:%u cq:%u io completed:%lu", n->instance,
+        c->qid, sq->cqid, sq->completed);
     while (!QTAILQ_EMPTY(&sq->out_req_list)) {
         req = QTAILQ_FIRST(&sq->out_req_list);
         if (req->aiocb) {
@@ -603,6 +701,9 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
                 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
             }
         }
+    } else {
+        NVME_LOG(ERR, "sq:%u does not contain valid cq:%u", sq->id,
+            sq->cqid);
     }
 
     nvme_free_sq(sq, n);
@@ -631,6 +732,7 @@ static uint16_t nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, 
uint64_t dma_addr,
         sq->prp_list = nvme_setup_discontig(n, dma_addr, size,
             n->sqe_size);
         if (!sq->prp_list) {
+            NVME_LOG(ERR, "failed to create discontiguous queue");
             return NVME_INVALID_FIELD | NVME_DNR;
         }
     }
@@ -671,26 +773,35 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
     NvmeSQueue *sq;
     NvmeCreateSq *c = (NvmeCreateSq *)cmd;
     if (!c->cqid || nvme_check_cqid(n, c->cqid)) {
+        NVME_LOG(ERR, "invalid cq:%u", c->cqid);
         return NVME_INVALID_CQID | NVME_DNR;
     }
     if (!c->sqid || (c->sqid && !nvme_check_sqid(n, c->sqid))) {
+        NVME_LOG(ERR, "invalid sq:%u", c->sqid);
         return NVME_INVALID_QID | NVME_DNR;
     }
     if (!c->qsize || c->qsize > NVME_CAP_MQES(n->bar.cap)) {
+        NVME_LOG(ERR, "invalid size:%u max:%"PRIu64"", c->qsize,
+            NVME_CAP_MQES(n->bar.cap));
         return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
     }
     if (!c->prp1 || c->prp1 & (n->page_size - 1)) {
+        NVME_LOG(ERR, "invalid prp:%"PRIx64"", c->prp1);
         return NVME_INVALID_FIELD | NVME_DNR;
     }
     if (!(NVME_SQ_FLAGS_PC(c->sq_flags)) && NVME_CAP_CQR(n->bar.cap)) {
+        NVME_LOG(ERR, "invalid prp discontinuity sq_flags:%x", c->sq_flags);
         return NVME_INVALID_FIELD | NVME_DNR;
     }
+
     sq = g_malloc0(sizeof(*sq));
     if (nvme_init_sq(sq, n, c->prp1, c->sqid, c->cqid, c->qsize + 1,
             NVME_SQ_FLAGS_QPRIO(c->sq_flags), NVME_SQ_FLAGS_PC(c->sq_flags))) {
         g_free(sq);
         return NVME_INVALID_FIELD | NVME_DNR;
     }
+    NVME_LOG(INFO, "ctrl:%u sq:%u cq:%u size:%u burst:%u dma:%"PRIx64"",
+        n->instance, c->sqid, c->cqid, c->qsize, sq->arb_burst, sq->dma_addr);
     return NVME_SUCCESS;
 }
 
@@ -715,13 +826,19 @@ static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
     NvmeCQueue *cq;
 
     if (!c->qid || nvme_check_cqid(n, c->qid)) {
+        NVME_LOG(ERR, "invalid cq:%u", c->qid);
         return NVME_INVALID_CQID | NVME_DNR;
     }
 
     cq = n->cq[c->qid];
     if (!QTAILQ_EMPTY(&cq->sq_list)) {
+        NVME_LOG(ERR, "error, cq:%u sq(s) are still connected to CQ",
+            c->qid);
         return NVME_INVALID_QUEUE_DEL;
     }
+
+    NVME_LOG(INFO, "ctrl:%u cq:%u irq vector:%u", n->instance, c->qid,
+        cq->vector);
     nvme_free_cq(cq, n);
     return NVME_SUCCESS;
 }
@@ -740,9 +857,11 @@ static uint16_t nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, 
uint64_t dma_addr,
     if (cq->phys_contig) {
         cq->dma_addr = dma_addr;
     } else {
+        printf("setting up non contiguous!\n");
         cq->prp_list = nvme_setup_discontig(n, dma_addr, size,
             n->cqe_size);
         if (!cq->prp_list) {
+            NVME_LOG(ERR, "failed to create discontiguous queue");
             return NVME_INVALID_FIELD | NVME_DNR;
         }
     }
@@ -759,18 +878,23 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
     NvmeCreateCq *c = (NvmeCreateCq *)cmd;
     NvmeCQueue *cq;
     if (!c->cqid || (c->cqid && !nvme_check_cqid(n, c->cqid))) {
+        NVME_LOG(ERR, "invalid cq:%u", c->cqid);
         return NVME_INVALID_CQID | NVME_DNR;
     }
     if (!c->qsize || c->qsize > NVME_CAP_MQES(n->bar.cap)) {
+        NVME_LOG(ERR, "invalid size:%u", c->qsize);
         return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
     }
     if (!c->prp1) {
+        NVME_LOG(ERR, "invalid null prp");
         return NVME_INVALID_FIELD | NVME_DNR;
     }
     if (c->irq_vector > n->num_queues) {
+        NVME_LOG(ERR, "invalid irq vector:%u", c->irq_vector);
         return NVME_INVALID_IRQ_VECTOR;
     }
     if (!(NVME_CQ_FLAGS_PC(c->cq_flags)) && NVME_CAP_CQR(n->bar.cap)) {
+        NVME_LOG(ERR, "invalid prp discontinuity");
         return NVME_INVALID_FIELD | NVME_DNR;
     }
 
@@ -780,6 +904,10 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
         g_free(cq);
         return NVME_INVALID_FIELD | NVME_DNR;
     }
+
+    NVME_LOG(INFO,
+        "ctrl:%u cq:%u size:%u irq vector:%u dma:%"PRIx64"", n->instance,
+        c->cqid, c->qsize, cq->vector, cq->dma_addr);
     return NVME_SUCCESS;
 }
 
@@ -787,12 +915,16 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
 {
     NvmeIdentify *c = (NvmeIdentify *)cmd;
     NvmeNamespace *ns;
+
+    NVME_LOG(ADM_DBG, "ctrl:%u cns:%u nsid:%u", n->instance, c->cns, c->nsid);
     if (c->cns) {
         return nvme_dma_prp(cmd->prp1, cmd->prp2, sizeof(n->id_ctrl), n,
             (uint8_t *)&n->id_ctrl, DMA_DIRECTION_TO_DEVICE);
     }
 
     if (c->nsid == 0 || c->nsid > n->num_namespaces) {
+        NVME_LOG(ERR, "ctrl:%u invalid nsid:%u, namespaces:%u",
+            n->instance, c->nsid, n->num_namespaces);
         return NVME_INVALID_NSID | NVME_DNR;
     }
     ns = &n->namespaces[c->nsid - 1];
@@ -803,6 +935,7 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
 {
     uint32_t dw11 = cmd->cdw11;
+    NVME_LOG(ADM_DBG, "ctrl:%u feature id:%x", n->instance, cmd->cdw10);
     switch (cmd->cdw10) {
     case NVME_ARBITRATION:
         req->cqe.result = n->features.arbitration;
@@ -855,6 +988,7 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, 
NvmeRequest *req)
 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
 {
     uint32_t dw11 = cmd->cdw11;
+    NVME_LOG(ADM_DBG, "ctrl:%u feature id:%x", n->instance, cmd->cdw10);
     switch (cmd->cdw10) {
     case NVME_ARBITRATION:
         req->cqe.result = n->features.arbitration;
@@ -916,6 +1050,11 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd 
*cmd, uint32_t buf_len)
     uint32_t trans_len;
     NvmeFwSlotInfoLog fw_log;
     trans_len = MIN(sizeof(fw_log), buf_len);
+    if (buf_len < sizeof(fw_log)) {
+        NVME_LOG(ERR, "not enough memory, needs %ld, has %u bytes",
+            sizeof(fw_log), buf_len);
+    }
+
     return nvme_dma_prp(cmd->prp1, cmd->prp2, trans_len, n, (uint8_t *)&fw_log,
         DMA_DIRECTION_FROM_DEVICE);
 }
@@ -924,6 +1063,11 @@ static uint16_t nvme_error_log_info(NvmeCtrl *n, NvmeCmd 
*cmd, uint32_t buf_len)
 {
     uint32_t trans_len;
     trans_len = MIN(sizeof(*n->elpes) * n->elpe, buf_len);
+    if (buf_len % sizeof(*n->elpes)) {
+        NVME_LOG(ERR, "buffer not multiple of log page size:%"PRIu64"",
+            sizeof(*n->elpes));
+    }
+
     n->aer_mask &= ~(1 << NVME_AER_TYPE_ERROR);
     if (!QSIMPLEQ_EMPTY(&n->aer_queue)) {
         qemu_mod_timer(n->aer_timer, qemu_get_clock_ns(vm_clock) + 10000);
@@ -950,6 +1094,11 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd 
*cmd, uint32_t buf_len)
     NvmeSmartLog smart_log;
 
     trans_len = MIN(sizeof(smart_log), buf_len);
+    if (buf_len < sizeof(smart_log)) {
+        NVME_LOG(ERR, "not enough memory for log, needs %ld, has %u bytes",
+            sizeof(smart_log), buf_len);
+    }
+
     memset(&smart_log, 0x0, sizeof(smart_log));
     if (cmd->nsid == 0xffffffff || !(n->id_ctrl.lpa & 0x1)) {
         int i;
@@ -1052,6 +1201,7 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd)
 {
     uint16_t lid = cmd->cdw10 & 0xffff;
     uint32_t len = ((cmd->cdw10 >> 16) & 0xff) << 2;
+    NVME_LOG(ADM_DBG, "ctrl:%u log id:%u len:%u", n->instance, lid, len);
     switch (lid) {
     case NVME_LOG_ERROR_INFO:
         return nvme_error_log_info(n, cmd, len);
@@ -1072,6 +1222,7 @@ static uint16_t nvme_abort_req(NvmeCtrl *n, NvmeCmd *cmd, 
uint32_t *result)
     NvmeSQueue *sq;
 
     *result = 1;
+    NVME_LOG(INFO, "sq:%u cid:%x", sqid, cid);
     if (nvme_check_sqid(n, sqid)) {
         return NVME_SUCCESS;
     }
@@ -1090,6 +1241,7 @@ static uint16_t nvme_abort_req(NvmeCtrl *n, NvmeCmd *cmd, 
uint32_t *result)
         pci_dma_read(&n->dev, addr, (void *)&abort_cmd, sizeof(abort_cmd));
         if (abort_cmd.cid == cid) {
             NvmeRequest *req = QTAILQ_FIRST(&sq->req_list);
+            NVME_LOG(INFO, "found sq:%u cid:%x to abort", sqid, cid);
             QTAILQ_REMOVE(&sq->req_list, req, entry);
             QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
             memset(&req->cqe, 0, sizeof(req->cqe));
@@ -1103,6 +1255,9 @@ static uint16_t nvme_abort_req(NvmeCtrl *n, NvmeCmd *cmd, 
uint32_t *result)
         }
         ++index;
     }
+    if (*result) {
+        NVME_LOG(ERR, "failed to find req to abort sq:%u cid:%x", sqid, cid);
+    }
     return NVME_SUCCESS;
 }
 
@@ -1113,28 +1268,43 @@ static uint16_t nvme_format_namespace(NvmeNamespace 
*ns, uint8_t lba_idx,
     uint8_t lbaf = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
 
     if (lba_idx > ns->id_ns.nlbaf) {
+        NVME_LOG(ERR, "invalid lba index:%u", lba_idx);
         return NVME_INVALID_FORMAT | NVME_DNR;
     }
     if (pi) {
         if (pil && !NVME_ID_NS_DPC_LAST_EIGHT(ns->id_ns.dpc)) {
+            NVME_LOG(ERR, "pi requested as last 8 bytes, dpc:%x",
+                ns->id_ns.dpc);
             return NVME_INVALID_FORMAT | NVME_DNR;
         }
         if (!pil && !NVME_ID_NS_DPC_FIRST_EIGHT(ns->id_ns.dpc)) {
+            NVME_LOG(ERR, "pi requested as first 8 bytes, dpc:%x",
+                ns->id_ns.dpc);
             return NVME_INVALID_FORMAT | NVME_DNR;
         }
         if (!((ns->id_ns.dpc & 0x7) & (1 << (pi - 1)))) {
+            NVME_LOG(ERR, "invalid pi type:%u, dpc:%x",
+                pi, ns->id_ns.dpc);
             return NVME_INVALID_FORMAT | NVME_DNR;
         }
     }
     if (meta_loc && ns->id_ns.lbaf[lba_idx].ms &&
             !NVME_ID_NS_MC_EXTENDED(ns->id_ns.mc)) {
+        NVME_LOG(ERR, "invalid meta location:%x, mc:%x",
+            meta_loc, ns->id_ns.mc);
         return NVME_INVALID_FORMAT | NVME_DNR;
     }
     if (!meta_loc && ns->id_ns.lbaf[lba_idx].ms &&
             !NVME_ID_NS_MC_SEPARATE(ns->id_ns.mc)) {
+        NVME_LOG(ERR, "invalid meta location:%x, mc:%x",
+            meta_loc, ns->id_ns.mc);
         return NVME_INVALID_FORMAT | NVME_DNR;
     }
 
+    NVME_LOG(DBG,
+        "ctrl:%u nsid:%x lba index:%u meta loc:%u pil:%u pi:%u erase:%u",
+        ns->ctrl->instance, ns->id, lba_idx, meta_loc, pil, pi, sec_erase);
+
     g_free(ns->util);
     old_size = ns->id_ns.nsze * (1 << ns->id_ns.lbaf[lbaf].ds);
     ns->id_ns.nuse = 0;
@@ -1179,6 +1349,7 @@ static uint16_t nvme_format(NvmeCtrl *n, NvmeCmd *cmd)
     }
 
     if (nsid == 0 || nsid > n->num_namespaces) {
+        NVME_LOG(ERR, "invalid nsid:%u", nsid);
         return NVME_INVALID_NSID | NVME_DNR;
     }
 
@@ -1189,6 +1360,8 @@ static uint16_t nvme_format(NvmeCtrl *n, NvmeCmd *cmd)
 
 static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
 {
+    NVME_LOG(IO_DBG, "opcode:%x ctrl:%u sq:%u head:%u tail:%u", cmd->opcode,
+        n->instance, req->sq->id, req->sq->head, req->sq->tail);
     switch (cmd->opcode) {
     case NVME_ADM_CMD_DELETE_SQ:
         return nvme_del_sq(n, cmd);
@@ -1217,6 +1390,7 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, 
NvmeRequest *req)
     case NVME_ADM_CMD_SECURITY_SEND:
     case NVME_ADM_CMD_SECURITY_RECV:
     default:
+        NVME_LOG(ERR, "invalid opcode:%x", cmd->opcode);
         return NVME_INVALID_OPCODE | NVME_DNR;
     }
 }
@@ -1232,6 +1406,8 @@ static void nvme_sq_process(void *opaque)
     NvmeCQueue *cq = n->cq[sq->cqid];
     int processed = 0;
 
+    NVME_LOG(IO_DBG, "ctrl:%u sq:%u cq:%u burst:%u head:%u tail:%u",
+        n->instance, sq->id, cq->id, sq->arb_burst, sq->head, sq->tail);
     while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list)) &&
             processed++ < sq->arb_burst) {
         if (sq->phys_contig) {
@@ -1251,6 +1427,7 @@ static void nvme_sq_process(void *opaque)
         req->aiocb = NULL;
         req->aiocb_dsm = NULL;
 
+        printf("about to call nvme command handler\n");
         status = sq->id ? nvme_io_cmd(n, &cmd, req) :
             nvme_admin_cmd(n, &cmd, req);
         if (status != NVME_NO_COMPLETE) {
@@ -1258,6 +1435,8 @@ static void nvme_sq_process(void *opaque)
             nvme_enqueue_req_completion(cq, req);
         }
     }
+    sq->completed += processed;
+    NVME_LOG(IO_DBG, "sq:%u cq:%u processed:%d", sq->id, cq->id, processed);
 
     if (!nvme_sq_empty(sq)) {
         qemu_mod_timer(sq->timer, qemu_get_clock_ns(vm_clock) + 500);
@@ -1272,11 +1451,14 @@ static void nvme_clear_ctrl(NvmeCtrl *n)
 
     for (i = 0; i < n->num_queues; i++) {
         if (n->sq[i] != NULL) {
+            NVME_LOG(INFO, "ctrl:%u sq:%u cq:%u io completed:%lu", n->instance,
+                n->sq[i]->id, n->sq[i]->cqid, n->sq[i]->completed);
             nvme_free_sq(n->sq[i], n);
         }
     }
     for (i = 0; i < n->num_queues; i++) {
         if (n->cq[i] != NULL) {
+            NVME_LOG(INFO, "ctrl:%u cq:%u", n->instance, n->cq[i]->id);
             nvme_free_cq(n->cq[i], n);
         }
     }
@@ -1297,30 +1479,45 @@ static int nvme_start_ctrl(NvmeCtrl *n)
     uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
     uint32_t page_size = 1 << page_bits;
     if (n->cq[0] || n->sq[0]) {
+        NVME_LOG(ERR, "admin sq/cq already set");
         return -1;
     }
     if (!n->bar.asq || !n->bar.acq) {
+        NVME_LOG(ERR, "initialize attempted before admin queues created");
         return -1;
     }
     if (n->bar.asq & (page_size - 1) || n->bar.acq & (page_size - 1)) {
+        NVME_LOG(ERR, "asq:%"PRIx64" acq:%"PRIx64" not page aligned:%x",
+            n->bar.asq, n->bar.acq, page_size);
         return -1;
     }
     if (NVME_CC_MPS(n->bar.cc) < NVME_CAP_MPSMIN(n->bar.cap) ||
             NVME_CC_MPS(n->bar.cc) > NVME_CAP_MPSMAX(n->bar.cap)) {
+        NVME_LOG(ERR, "bad page size:%u min:%u max:%u",
+            NVME_CC_MPS(n->bar.cc), (uint32_t)NVME_CAP_MPSMIN(n->bar.cap),
+            (uint32_t)NVME_CAP_MPSMAX(n->bar.cap));
         return -1;
     }
     if (NVME_CC_IOCQES(n->bar.cc) < NVME_CTRL_CQES_MIN(n->id_ctrl.cqes) ||
             NVME_CC_IOCQES(n->bar.cc) > NVME_CTRL_CQES_MAX(n->id_ctrl.cqes)) {
+        NVME_LOG(ERR, "bad cq entry size:%u min:%x max:%u",
+            NVME_CC_IOCQES(n->bar.cc), NVME_CTRL_CQES_MIN(n->id_ctrl.cqes),
+            NVME_CTRL_CQES_MAX(n->id_ctrl.cqes));
         return -1;
     }
     if (NVME_CC_IOSQES(n->bar.cc) < NVME_CTRL_SQES_MIN(n->id_ctrl.cqes) ||
             NVME_CC_IOSQES(n->bar.cc) > NVME_CTRL_SQES_MAX(n->id_ctrl.cqes)) {
+        NVME_LOG(ERR, "bad sq entry size:%u min:%x max:%u",
+            NVME_CC_IOSQES(n->bar.cc), NVME_CTRL_SQES_MIN(n->id_ctrl.cqes),
+            NVME_CTRL_SQES_MAX(n->id_ctrl.cqes));
         return -1;
     }
     if (NVME_AQA_ASQS(n->bar.aqa) == 0 || NVME_AQA_ASQS(n->bar.aqa) > 4095) {
+        NVME_LOG(ERR, "bad asq:%u", NVME_AQA_ASQS(n->bar.aqa));
         return -1;
     }
     if (NVME_AQA_ACQS(n->bar.aqa) == 0 || NVME_AQA_ACQS(n->bar.aqa) > 4095) {
+        NVME_LOG(ERR, "bad acq:%u", NVME_AQA_ACQS(n->bar.aqa));
         return -1;
     }
 
@@ -1345,6 +1542,8 @@ static int nvme_start_ctrl(NvmeCtrl *n)
 static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
     unsigned size)
 {
+    NVME_LOG(IO_DBG, "ctrl:%u offset:%"PRIx64" data:%"PRIx64"", n->instance,
+        offset, data);
     switch (offset) {
     case 0xc:
         n->bar.intms |= data & 0xffffffff;
@@ -1357,16 +1556,19 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, 
uint64_t data,
     case 0x14:
         if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
             n->bar.cc = data;
+            NVME_LOG(ERR, "start ctrl:%u", n->instance);
             if (nvme_start_ctrl(n)) {
                 n->bar.csts = NVME_CSTS_FAILED;
             } else {
                 n->bar.csts = NVME_CSTS_READY;
             }
         } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
+            NVME_LOG(ERR, "disable ctrl:%u", n->instance);
             nvme_clear_ctrl(n);
             n->bar.csts &= ~NVME_CSTS_READY;
         }
         if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
+                NVME_LOG(ERR, "shutdown ctrl:%u", n->instance);
                 nvme_clear_ctrl(n);
                 n->bar.cc = data;
                 n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
@@ -1400,6 +1602,8 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, 
unsigned size)
     uint64_t val = 0;
     if (addr < sizeof(n->bar)) {
         memcpy(&val, ptr + addr, size);
+    } else {
+        NVME_LOG(ERR, "bad bar offset %ld", addr);
     }
     return val;
 }
@@ -1407,7 +1611,10 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr 
addr, unsigned size)
 static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
 {
     uint32_t qid;
+    NVME_LOG(IO_DBG, "ctrl:%u addr:%"PRIx64" val:%u", n->instance, addr, val);
     if (addr & ((1 << (2 + n->db_stride)) - 1)) {
+        NVME_LOG(ERR, "invalid doorbell:%"PRIx64", within stride:%u", addr,
+            n->db_stride);
         nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
             NVME_AER_INFO_ERR_INVALID_DB, NVME_LOG_ERROR_INFO);
         return;
@@ -1421,6 +1628,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int 
val)
         qid = (addr - (0x1000 + (1 << (2 + n->db_stride)))) >>
             (3 + n->db_stride);
         if (nvme_check_cqid(n, qid)) {
+            NVME_LOG(ERR, "invalid cq:%u for addr:%"PRIx64"", qid, addr);
             nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
                 NVME_AER_INFO_ERR_INVALID_DB, NVME_LOG_ERROR_INFO);
             return;
@@ -1428,11 +1636,15 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, 
int val)
 
         cq = n->cq[qid];
         if (new_head >= cq->size) {
+            NVME_LOG(ERR, "invalid head:%u size:%u", new_head, cq->size);
             nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
                 NVME_AER_INFO_ERR_INVALID_DB, NVME_LOG_ERROR_INFO);
             return;
         }
 
+        NVME_LOG(IO_DBG,
+            "ctrl:%u cq:%u tail:%u current head:%u new head:%u",
+            n->instance, qid, cq->tail, cq->head, new_head);
         start_sqs = nvme_cq_full(cq) ? 1 : 0;
         cq->head = new_head;
         if (start_sqs) {
@@ -1451,6 +1663,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int 
val)
 
         qid = (addr - 0x1000) >> (3 + n->db_stride);
         if (nvme_check_sqid(n, qid)) {
+            NVME_LOG(ERR, "invalid sq:%u", qid);
             nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
                 NVME_AER_INFO_ERR_INVALID_SQ, NVME_LOG_ERROR_INFO);
             return;
@@ -1458,10 +1671,15 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, 
int val)
 
         sq = n->sq[qid];
         if (new_tail >= sq->size) {
+            NVME_LOG(ERR, "invalid tail:%u size:%u", new_tail, sq->size);
             nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
                 NVME_AER_INFO_ERR_INVALID_DB, NVME_LOG_ERROR_INFO);
             return;
         }
+
+        NVME_LOG(IO_DBG,
+            "ctrl:%u sq:%u head:%u current tail:%u new tail:%u",
+            n->instance, qid, sq->head, sq->tail, new_tail);
         sq->tail = new_tail;
         qemu_mod_timer(sq->timer, qemu_get_clock_ns(vm_clock) + 500);
     }
@@ -1475,6 +1693,8 @@ static void nvme_mmio_write(void *opaque, hwaddr addr, 
uint64_t data,
         nvme_write_bar(n, addr, data, size);
     } else if (addr >= 0x1000) {
         nvme_process_db(n, addr, data);
+    } else {
+        NVME_LOG(ERR, "bad bar offset %ld", addr);
     }
 }
 
@@ -1496,32 +1716,49 @@ static int nvme_init(PCIDevice *pci_dev)
     int64_t bs_size;
     int i, j;
 
+    NVME_LOG(DBG, "new controller B:D.f: %02x:%02x.%u",
+        pci_bus_num(pci_dev->bus), PCI_SLOT(pci_dev->devfn),
+        PCI_FUNC(pci_dev->devfn));
     if (!n->conf.bs) {
+        NVME_LOG(ERR, "drive property not set");
         return -1;
     }
     if (n->num_namespaces == 0 || n->num_namespaces > NVME_MAX_NUM_NAMESPACES) 
{
+        NVME_LOG(ERR, "requested invalid number of namespace:%u max:%u",
+            n->num_namespaces, NVME_MAX_NUM_NAMESPACES);
         return -1;
     }
     if (n->num_queues < 1 || n->num_queues > NVME_MAX_QS) {
+        NVME_LOG(ERR, "requested invalid number of queues:%u max:%u",
+            n->num_queues, NVME_MAX_QS);
         return -1;
     }
     if (n->db_stride > NVME_MAX_STRIDE) {
+        NVME_LOG(ERR, "requested invalid stride:%u max:%u",
+            n->db_stride, NVME_MAX_STRIDE);
         return -1;
     }
     if (n->max_q_ents < 1 || n->max_q_ents > NVME_MAX_QUEUE_ENTRIES) {
+        NVME_LOG(ERR, "requested invalid queue entries:%u, max:%u",
+            n->max_q_ents, NVME_MAX_QUEUE_ENTRIES);
         return -1;
     }
     if (n->max_sqes > NVME_MAX_QUEUE_ES || n->max_cqes > NVME_MAX_QUEUE_ES ||
         n->max_sqes < NVME_MIN_SQUEUE_ES || n->max_cqes < NVME_MIN_CQUEUE_ES) {
+        NVME_LOG(ERR, "requested max queue entry size sq:%d cq:%d", 
n->max_sqes,
+            n->max_cqes);
         return -1;
     }
     if (n->cqr > 1) {
+        NVME_LOG(ERR, "requested invalid contiguous regions:%u", n->cqr);
         return -1;
     }
     if (n->vwc > 1) {
+        NVME_LOG(ERR, "requested invalid volatile write cache:%u", n->vwc);
         return -1;
     }
     if (n->lba_index > 3) {
+        NVME_LOG(ERR, "requested invalid lba index:%u", n->lba_index);
         return -1;
     }
 
@@ -1553,7 +1790,13 @@ static int nvme_init(PCIDevice *pci_dev)
     pci_register_bar(&n->dev, 0,
         PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
         &n->iomem);
-    msix_init_exclusive_bar(&n->dev, n->num_queues, 4);
+
+    if (msix_init_exclusive_bar(&n->dev, n->num_queues, 4)) {
+        NVME_LOG(ERR, "ctrl:%u msix init failed", n->instance);
+    }
+    if (msi_init(&n->dev, 0x50, 32, true, false) < 0) {
+        NVME_LOG(ERR, "ctrl:%u msi init failed", n->instance);
+    }
 
     id->vid     = PCI_VENDOR_ID_INTEL;
     id->ssvid   = 0x0111;
@@ -1608,22 +1851,31 @@ static int nvme_init(PCIDevice *pci_dev)
     n->bar.vs = 0x00010001;
     n->bar.intmc = n->bar.intms = 0;
 
+    NVME_LOG(DBG, "ctrl:%u cap:%016lx", n->instance, n->bar.cap);
+
     for (i = 0; i < n->num_namespaces; i++) {
         NvmeNamespace *ns = &n->namespaces[i];
         NvmeIdNs *id_ns = &ns->id_ns;
-        id_ns->ncap  = id_ns->nsze = (n->ns_size) >> 9;
         id_ns->nlbaf = 0x4;
         id_ns->flbas = n->lba_index;
 
         for (j = 0; j <= id_ns->nlbaf; j++) {
             id_ns->lbaf[j].ds = 9 + j;
         }
+        id_ns->ncap  = id_ns->nsze = (n->ns_size) >> id_ns->lbaf[j].ds;
         ns->id = i + 1;
         ns->ctrl = n;
-        ns->start_block = id_ns->nsze * i;
+        ns->start_block = (n->ns_size / BDRV_SECTOR_SIZE) * i;
         ns->util = bitmap_new(id_ns->nsze);
     }
 
+    NVME_LOG(DBG, "NVMe controller properties:\n"
+        "\tctrl:%u namespaces:%u ns size:%u queues:%u stride:%u bar size:%u\n"\
+        "\tqueue entries:%u cqr:%u mdts:%u aerl:%u acl:%u elpe:%u\n"\
+        "\tfile size:%"PRIu64"", instance, n->num_namespaces, n->ns_size,
+        n->num_queues, n->db_stride, n->reg_size, n->max_q_ents, n->cqr,
+        n->mdts, n->aerl, n->acl, n->elpe, bdrv_getlength(n->conf.bs));
+
     return 0;
 }
 
@@ -1631,6 +1883,7 @@ static void nvme_exit(PCIDevice *pci_dev)
 {
     int i;
     NvmeCtrl *n = DO_UPCAST(NvmeCtrl, dev, pci_dev);
+    NVME_LOG(DBG, "ctrl:%u", n->instance);
     nvme_clear_ctrl(n);
     for (i = 0; i < n->num_namespaces; i++) {
         g_free(n->namespaces[i].util);
@@ -1648,6 +1901,7 @@ static void nvme_exit(PCIDevice *pci_dev)
 static void nvme_reset(DeviceState *dev)
 {
     NvmeCtrl *n = DO_UPCAST(NvmeCtrl, dev.qdev, dev);
+    NVME_LOG(DBG, "ctrl:%u", n->instance);
     (void)n;
 }
 
-- 
1.7.0.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]