summaryrefslogtreecommitdiff
path: root/hw/block/nvme.c
diff options
context:
space:
mode:
authorDoug Gale <doug16k@gmail.com>2017-11-03 09:37:53 -0400
committerKevin Wolf <kwolf@redhat.com>2017-12-22 15:03:41 +0100
commit1ee24514aed34760fb2863d98bea3a1b705d9c9f (patch)
treeea234f8d9d62179f07dfafbca7682eeb625da872 /hw/block/nvme.c
parent6b4738ce4d32d551b37afb387813a37a24b6de8f (diff)
downloadqemu-1ee24514aed34760fb2863d98bea3a1b705d9c9f.tar.gz
nvme: Add tracing
Add trace output for commands, errors, and undefined behavior. Add guest error log output for undefined behavior. Report invalid undefined accesses to MMIO. Annotate unlikely error checks with unlikely. Signed-off-by: Doug Gale <doug16k@gmail.com> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'hw/block/nvme.c')
-rw-r--r--hw/block/nvme.c349
1 files changed, 297 insertions, 52 deletions
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index e529e88e4e..1ac356d3a5 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -34,8 +34,17 @@
#include "qapi/visitor.h"
#include "sysemu/block-backend.h"
+#include "qemu/log.h"
+#include "trace.h"
#include "nvme.h"
+#define NVME_GUEST_ERR(trace, fmt, ...) \
+ do { \
+ (trace_##trace)(__VA_ARGS__); \
+ qemu_log_mask(LOG_GUEST_ERROR, #trace \
+ " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
+ } while (0)
+
static void nvme_process_sq(void *opaque);
static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
@@ -86,10 +95,14 @@ static void nvme_isr_notify(NvmeCtrl *n, NvmeCQueue *cq)
{
if (cq->irq_enabled) {
if (msix_enabled(&(n->parent_obj))) {
+ trace_nvme_irq_msix(cq->vector);
msix_notify(&(n->parent_obj), cq->vector);
} else {
+ trace_nvme_irq_pin();
pci_irq_pulse(&n->parent_obj);
}
+ } else {
+ trace_nvme_irq_masked();
}
}
@@ -100,7 +113,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
trans_len = MIN(len, trans_len);
int num_prps = (len >> n->page_bits) + 1;
- if (!prp1) {
+ if (unlikely(!prp1)) {
+ trace_nvme_err_invalid_prp();
return NVME_INVALID_FIELD | NVME_DNR;
} else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
@@ -113,7 +127,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
}
len -= trans_len;
if (len) {
- if (!prp2) {
+ if (unlikely(!prp2)) {
+ trace_nvme_err_invalid_prp2_missing();
goto unmap;
}
if (len > n->page_size) {
@@ -128,7 +143,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
uint64_t prp_ent = le64_to_cpu(prp_list[i]);
if (i == n->max_prp_ents - 1 && len > n->page_size) {
- if (!prp_ent || prp_ent & (n->page_size - 1)) {
+ if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
+ trace_nvme_err_invalid_prplist_ent(prp_ent);
goto unmap;
}
@@ -140,7 +156,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
prp_ent = le64_to_cpu(prp_list[i]);
}
- if (!prp_ent || prp_ent & (n->page_size - 1)) {
+ if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
+ trace_nvme_err_invalid_prplist_ent(prp_ent);
goto unmap;
}
@@ -154,7 +171,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
i++;
}
} else {
- if (prp2 & (n->page_size - 1)) {
+ if (unlikely(prp2 & (n->page_size - 1))) {
+ trace_nvme_err_invalid_prp2_align(prp2);
goto unmap;
}
if (qsg->nsg) {
@@ -178,16 +196,20 @@ static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
QEMUIOVector iov;
uint16_t status = NVME_SUCCESS;
+ trace_nvme_dma_read(prp1, prp2);
+
if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
return NVME_INVALID_FIELD | NVME_DNR;
}
if (qsg.nsg > 0) {
- if (dma_buf_read(ptr, len, &qsg)) {
+ if (unlikely(dma_buf_read(ptr, len, &qsg))) {
+ trace_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_sglist_destroy(&qsg);
} else {
- if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
+ if (unlikely(qemu_iovec_to_buf(&iov, 0, ptr, len) != len)) {
+ trace_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_iovec_destroy(&iov);
@@ -273,7 +295,8 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
uint64_t aio_slba = slba << (data_shift - BDRV_SECTOR_BITS);
uint32_t aio_nlb = nlb << (data_shift - BDRV_SECTOR_BITS);
- if (slba + nlb > ns->id_ns.nsze) {
+ if (unlikely(slba + nlb > ns->id_ns.nsze)) {
+ trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
return NVME_LBA_RANGE | NVME_DNR;
}
@@ -301,8 +324,11 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
- if ((slba + nlb) > ns->id_ns.nsze) {
+ trace_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba);
+
+ if (unlikely((slba + nlb) > ns->id_ns.nsze)) {
block_acct_invalid(blk_get_stats(n->conf.blk), acct);
+ trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
return NVME_LBA_RANGE | NVME_DNR;
}
@@ -336,7 +362,8 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
NvmeNamespace *ns;
uint32_t nsid = le32_to_cpu(cmd->nsid);
- if (nsid == 0 || nsid > n->num_namespaces) {
+ if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
+ trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
return NVME_INVALID_NSID | NVME_DNR;
}
@@ -350,6 +377,7 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
case NVME_CMD_READ:
return nvme_rw(n, ns, cmd, req);
default:
+ trace_nvme_err_invalid_opc(cmd->opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
}
@@ -373,10 +401,13 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
NvmeCQueue *cq;
uint16_t qid = le16_to_cpu(c->qid);
- if (!qid || nvme_check_sqid(n, qid)) {
+ if (unlikely(!qid || nvme_check_sqid(n, qid))) {
+ trace_nvme_err_invalid_del_sq(qid);
return NVME_INVALID_QID | NVME_DNR;
}
+ trace_nvme_del_sq(qid);
+
sq = n->sq[qid];
while (!QTAILQ_EMPTY(&sq->out_req_list)) {
req = QTAILQ_FIRST(&sq->out_req_list);
@@ -439,19 +470,26 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
uint16_t qflags = le16_to_cpu(c->sq_flags);
uint64_t prp1 = le64_to_cpu(c->prp1);
- if (!cqid || nvme_check_cqid(n, cqid)) {
+ trace_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
+
+ if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
+ trace_nvme_err_invalid_create_sq_cqid(cqid);
return NVME_INVALID_CQID | NVME_DNR;
}
- if (!sqid || !nvme_check_sqid(n, sqid)) {
+ if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) {
+ trace_nvme_err_invalid_create_sq_sqid(sqid);
return NVME_INVALID_QID | NVME_DNR;
}
- if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
+ if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
+ trace_nvme_err_invalid_create_sq_size(qsize);
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
}
- if (!prp1 || prp1 & (n->page_size - 1)) {
+ if (unlikely(!prp1 || prp1 & (n->page_size - 1))) {
+ trace_nvme_err_invalid_create_sq_addr(prp1);
return NVME_INVALID_FIELD | NVME_DNR;
}
- if (!(NVME_SQ_FLAGS_PC(qflags))) {
+ if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
+ trace_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
return NVME_INVALID_FIELD | NVME_DNR;
}
sq = g_malloc0(sizeof(*sq));
@@ -476,14 +514,17 @@ static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
NvmeCQueue *cq;
uint16_t qid = le16_to_cpu(c->qid);
- if (!qid || nvme_check_cqid(n, qid)) {
+ if (unlikely(!qid || nvme_check_cqid(n, qid))) {
+ trace_nvme_err_invalid_del_cq_cqid(qid);
return NVME_INVALID_CQID | NVME_DNR;
}
cq = n->cq[qid];
- if (!QTAILQ_EMPTY(&cq->sq_list)) {
+ if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
+ trace_nvme_err_invalid_del_cq_notempty(qid);
return NVME_INVALID_QUEUE_DEL;
}
+ trace_nvme_del_cq(qid);
nvme_free_cq(cq, n);
return NVME_SUCCESS;
}
@@ -516,19 +557,27 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
uint16_t qflags = le16_to_cpu(c->cq_flags);
uint64_t prp1 = le64_to_cpu(c->prp1);
- if (!cqid || !nvme_check_cqid(n, cqid)) {
+ trace_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
+ NVME_CQ_FLAGS_IEN(qflags) != 0);
+
+ if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) {
+ trace_nvme_err_invalid_create_cq_cqid(cqid);
return NVME_INVALID_CQID | NVME_DNR;
}
- if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
+ if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
+ trace_nvme_err_invalid_create_cq_size(qsize);
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
}
- if (!prp1) {
+ if (unlikely(!prp1)) {
+ trace_nvme_err_invalid_create_cq_addr(prp1);
return NVME_INVALID_FIELD | NVME_DNR;
}
- if (vector > n->num_queues) {
+ if (unlikely(vector > n->num_queues)) {
+ trace_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
}
- if (!(NVME_CQ_FLAGS_PC(qflags))) {
+ if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
+ trace_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
return NVME_INVALID_FIELD | NVME_DNR;
}
@@ -543,6 +592,8 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
uint64_t prp1 = le64_to_cpu(c->prp1);
uint64_t prp2 = le64_to_cpu(c->prp2);
+ trace_nvme_identify_ctrl();
+
return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
prp1, prp2);
}
@@ -554,11 +605,15 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
uint64_t prp1 = le64_to_cpu(c->prp1);
uint64_t prp2 = le64_to_cpu(c->prp2);
- if (nsid == 0 || nsid > n->num_namespaces) {
+ trace_nvme_identify_ns(nsid);
+
+ if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
+ trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
return NVME_INVALID_NSID | NVME_DNR;
}
ns = &n->namespaces[nsid - 1];
+
return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
prp1, prp2);
}
@@ -573,6 +628,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
uint16_t ret;
int i, j = 0;
+ trace_nvme_identify_nslist(min_nsid);
+
list = g_malloc0(data_len);
for (i = 0; i < n->num_namespaces; i++) {
if (i < min_nsid) {
@@ -601,6 +658,7 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
case 0x02:
return nvme_identify_nslist(n, c);
default:
+ trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
return NVME_INVALID_FIELD | NVME_DNR;
}
}
@@ -613,11 +671,14 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
switch (dw10) {
case NVME_VOLATILE_WRITE_CACHE:
result = blk_enable_write_cache(n->conf.blk);
+ trace_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
break;
case NVME_NUMBER_OF_QUEUES:
result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
+ trace_nvme_getfeat_numq(result);
break;
default:
+ trace_nvme_err_invalid_getfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
}
@@ -635,10 +696,14 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
break;
case NVME_NUMBER_OF_QUEUES:
+ trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
+ ((dw11 >> 16) & 0xFFFF) + 1,
+ n->num_queues - 1, n->num_queues - 1);
req->cqe.result =
cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
break;
default:
+ trace_nvme_err_invalid_setfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
}
return NVME_SUCCESS;
@@ -662,6 +727,7 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
case NVME_ADM_CMD_GET_FEATURES:
return nvme_get_feature(n, cmd, req);
default:
+ trace_nvme_err_invalid_admin_opc(cmd->opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
}
@@ -721,15 +787,78 @@ static int nvme_start_ctrl(NvmeCtrl *n)
uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
uint32_t page_size = 1 << page_bits;
- if (n->cq[0] || n->sq[0] || !n->bar.asq || !n->bar.acq ||
- n->bar.asq & (page_size - 1) || n->bar.acq & (page_size - 1) ||
- NVME_CC_MPS(n->bar.cc) < NVME_CAP_MPSMIN(n->bar.cap) ||
- NVME_CC_MPS(n->bar.cc) > NVME_CAP_MPSMAX(n->bar.cap) ||
- NVME_CC_IOCQES(n->bar.cc) < NVME_CTRL_CQES_MIN(n->id_ctrl.cqes) ||
- NVME_CC_IOCQES(n->bar.cc) > NVME_CTRL_CQES_MAX(n->id_ctrl.cqes) ||
- NVME_CC_IOSQES(n->bar.cc) < NVME_CTRL_SQES_MIN(n->id_ctrl.sqes) ||
- NVME_CC_IOSQES(n->bar.cc) > NVME_CTRL_SQES_MAX(n->id_ctrl.sqes) ||
- !NVME_AQA_ASQS(n->bar.aqa) || !NVME_AQA_ACQS(n->bar.aqa)) {
+ if (unlikely(n->cq[0])) {
+ trace_nvme_err_startfail_cq();
+ return -1;
+ }
+ if (unlikely(n->sq[0])) {
+ trace_nvme_err_startfail_sq();
+ return -1;
+ }
+ if (unlikely(!n->bar.asq)) {
+ trace_nvme_err_startfail_nbarasq();
+ return -1;
+ }
+ if (unlikely(!n->bar.acq)) {
+ trace_nvme_err_startfail_nbaracq();
+ return -1;
+ }
+ if (unlikely(n->bar.asq & (page_size - 1))) {
+ trace_nvme_err_startfail_asq_misaligned(n->bar.asq);
+ return -1;
+ }
+ if (unlikely(n->bar.acq & (page_size - 1))) {
+ trace_nvme_err_startfail_acq_misaligned(n->bar.acq);
+ return -1;
+ }
+ if (unlikely(NVME_CC_MPS(n->bar.cc) <
+ NVME_CAP_MPSMIN(n->bar.cap))) {
+ trace_nvme_err_startfail_page_too_small(
+ NVME_CC_MPS(n->bar.cc),
+ NVME_CAP_MPSMIN(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(NVME_CC_MPS(n->bar.cc) >
+ NVME_CAP_MPSMAX(n->bar.cap))) {
+ trace_nvme_err_startfail_page_too_large(
+ NVME_CC_MPS(n->bar.cc),
+ NVME_CAP_MPSMAX(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
+ NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
+ trace_nvme_err_startfail_cqent_too_small(
+ NVME_CC_IOCQES(n->bar.cc),
+ NVME_CTRL_CQES_MIN(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
+ NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
+ trace_nvme_err_startfail_cqent_too_large(
+ NVME_CC_IOCQES(n->bar.cc),
+ NVME_CTRL_CQES_MAX(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
+ NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
+ trace_nvme_err_startfail_sqent_too_small(
+ NVME_CC_IOSQES(n->bar.cc),
+ NVME_CTRL_SQES_MIN(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
+ NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
+ trace_nvme_err_startfail_sqent_too_large(
+ NVME_CC_IOSQES(n->bar.cc),
+ NVME_CTRL_SQES_MAX(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
+ trace_nvme_err_startfail_asqent_sz_zero();
+ return -1;
+ }
+ if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
+ trace_nvme_err_startfail_acqent_sz_zero();
return -1;
}
@@ -749,16 +878,48 @@ static int nvme_start_ctrl(NvmeCtrl *n)
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
unsigned size)
{
+ if (unlikely(offset & (sizeof(uint32_t) - 1))) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_misaligned32,
+ "MMIO write not 32-bit aligned,"
+ " offset=0x%"PRIx64"", offset);
+ /* should be ignored, fall through for now */
+ }
+
+ if (unlikely(size < sizeof(uint32_t))) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_toosmall,
+ "MMIO write smaller than 32-bits,"
+ " offset=0x%"PRIx64", size=%u",
+ offset, size);
+ /* should be ignored, fall through for now */
+ }
+
switch (offset) {
- case 0xc:
+ case 0xc: /* INTMS */
+ if (unlikely(msix_enabled(&(n->parent_obj)))) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
+ "undefined access to interrupt mask set"
+ " when MSI-X is enabled");
+ /* should be ignored, fall through for now */
+ }
n->bar.intms |= data & 0xffffffff;
n->bar.intmc = n->bar.intms;
+ trace_nvme_mmio_intm_set(data & 0xffffffff,
+ n->bar.intmc);
break;
- case 0x10:
+ case 0x10: /* INTMC */
+ if (unlikely(msix_enabled(&(n->parent_obj)))) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
+ "undefined access to interrupt mask clr"
+ " when MSI-X is enabled");
+ /* should be ignored, fall through for now */
+ }
n->bar.intms &= ~(data & 0xffffffff);
n->bar.intmc = n->bar.intms;
+ trace_nvme_mmio_intm_clr(data & 0xffffffff,
+ n->bar.intmc);
break;
- case 0x14:
+ case 0x14: /* CC */
+ trace_nvme_mmio_cfg(data & 0xffffffff);
/* Windows first sends data, then sends enable bit */
if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
!NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
@@ -768,40 +929,82 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
n->bar.cc = data;
- if (nvme_start_ctrl(n)) {
+ if (unlikely(nvme_start_ctrl(n))) {
+ trace_nvme_err_startfail();
n->bar.csts = NVME_CSTS_FAILED;
} else {
+ trace_nvme_mmio_start_success();
n->bar.csts = NVME_CSTS_READY;
}
} else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
+ trace_nvme_mmio_stopped();
nvme_clear_ctrl(n);
n->bar.csts &= ~NVME_CSTS_READY;
}
if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
- nvme_clear_ctrl(n);
- n->bar.cc = data;
- n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
+ trace_nvme_mmio_shutdown_set();
+ nvme_clear_ctrl(n);
+ n->bar.cc = data;
+ n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
} else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
- n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
- n->bar.cc = data;
+ trace_nvme_mmio_shutdown_cleared();
+ n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
+ n->bar.cc = data;
+ }
+ break;
+ case 0x1C: /* CSTS */
+ if (data & (1 << 4)) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_ssreset_w1c_unsupported,
+ "attempted to W1C CSTS.NSSRO"
+ " but CAP.NSSRS is zero (not supported)");
+ } else if (data != 0) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_ro_csts,
+ "attempted to set a read only bit"
+ " of controller status");
+ }
+ break;
+ case 0x20: /* NSSR */
+ if (data == 0x4E564D65) {
+ trace_nvme_ub_mmiowr_ssreset_unsupported();
+ } else {
+ /* The spec says that writes of other values have no effect */
+ return;
}
break;
- case 0x24:
+ case 0x24: /* AQA */
n->bar.aqa = data & 0xffffffff;
+ trace_nvme_mmio_aqattr(data & 0xffffffff);
break;
- case 0x28:
+ case 0x28: /* ASQ */
n->bar.asq = data;
+ trace_nvme_mmio_asqaddr(data);
break;
- case 0x2c:
+ case 0x2c: /* ASQ hi */
n->bar.asq |= data << 32;
+ trace_nvme_mmio_asqaddr_hi(data, n->bar.asq);
break;
- case 0x30:
+ case 0x30: /* ACQ */
+ trace_nvme_mmio_acqaddr(data);
n->bar.acq = data;
break;
- case 0x34:
+ case 0x34: /* ACQ hi */
n->bar.acq |= data << 32;
+ trace_nvme_mmio_acqaddr_hi(data, n->bar.acq);
break;
+ case 0x38: /* CMBLOC */
+ NVME_GUEST_ERR(nvme_ub_mmiowr_cmbloc_reserved,
+ "invalid write to reserved CMBLOC"
+ " when CMBSZ is zero, ignored");
+ return;
+ case 0x3C: /* CMBSZ */
+ NVME_GUEST_ERR(nvme_ub_mmiowr_cmbsz_readonly,
+ "invalid write to read only CMBSZ, ignored");
+ return;
default:
+ NVME_GUEST_ERR(nvme_ub_mmiowr_invalid,
+ "invalid MMIO write,"
+ " offset=0x%"PRIx64", data=%"PRIx64"",
+ offset, data);
break;
}
}
@@ -812,9 +1015,26 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
uint8_t *ptr = (uint8_t *)&n->bar;
uint64_t val = 0;
+ if (unlikely(addr & (sizeof(uint32_t) - 1))) {
+ NVME_GUEST_ERR(nvme_ub_mmiord_misaligned32,
+ "MMIO read not 32-bit aligned,"
+ " offset=0x%"PRIx64"", addr);
+ /* should RAZ, fall through for now */
+ } else if (unlikely(size < sizeof(uint32_t))) {
+ NVME_GUEST_ERR(nvme_ub_mmiord_toosmall,
+ "MMIO read smaller than 32-bits,"
+ " offset=0x%"PRIx64"", addr);
+ /* should RAZ, fall through for now */
+ }
+
if (addr < sizeof(n->bar)) {
memcpy(&val, ptr + addr, size);
+ } else {
+ NVME_GUEST_ERR(nvme_ub_mmiord_invalid_ofs,
+ "MMIO read beyond last register,"
+ " offset=0x%"PRIx64", returning 0", addr);
}
+
return val;
}
@@ -822,22 +1042,36 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
{
uint32_t qid;
- if (addr & ((1 << 2) - 1)) {
+ if (unlikely(addr & ((1 << 2) - 1))) {
+ NVME_GUEST_ERR(nvme_ub_db_wr_misaligned,
+ "doorbell write not 32-bit aligned,"
+ " offset=0x%"PRIx64", ignoring", addr);
return;
}
if (((addr - 0x1000) >> 2) & 1) {
+ /* Completion queue doorbell write */
+
uint16_t new_head = val & 0xffff;
int start_sqs;
NvmeCQueue *cq;
qid = (addr - (0x1000 + (1 << 2))) >> 3;
- if (nvme_check_cqid(n, qid)) {
+ if (unlikely(nvme_check_cqid(n, qid))) {
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cq,
+ "completion queue doorbell write"
+ " for nonexistent queue,"
+ " sqid=%"PRIu32", ignoring", qid);
return;
}
cq = n->cq[qid];
- if (new_head >= cq->size) {
+ if (unlikely(new_head >= cq->size)) {
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cqhead,
+ "completion queue doorbell write value"
+ " beyond queue size, sqid=%"PRIu32","
+ " new_head=%"PRIu16", ignoring",
+ qid, new_head);
return;
}
@@ -855,16 +1089,27 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
nvme_isr_notify(n, cq);
}
} else {
+ /* Submission queue doorbell write */
+
uint16_t new_tail = val & 0xffff;
NvmeSQueue *sq;
qid = (addr - 0x1000) >> 3;
- if (nvme_check_sqid(n, qid)) {
+ if (unlikely(nvme_check_sqid(n, qid))) {
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sq,
+ "submission queue doorbell write"
+ " for nonexistent queue,"
+ " sqid=%"PRIu32", ignoring", qid);
return;
}
sq = n->sq[qid];
- if (new_tail >= sq->size) {
+ if (unlikely(new_tail >= sq->size)) {
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sqtail,
+ "submission queue doorbell write value"
+ " beyond queue size, sqid=%"PRIu32","
+ " new_tail=%"PRIu16", ignoring",
+ qid, new_tail);
return;
}