|
@@ -34,8 +34,17 @@
|
|
#include "qapi/visitor.h"
|
|
#include "qapi/visitor.h"
|
|
#include "sysemu/block-backend.h"
|
|
#include "sysemu/block-backend.h"
|
|
|
|
|
|
|
|
+#include "qemu/log.h"
|
|
|
|
+#include "trace.h"
|
|
#include "nvme.h"
|
|
#include "nvme.h"
|
|
|
|
|
|
|
|
+#define NVME_GUEST_ERR(trace, fmt, ...) \
|
|
|
|
+ do { \
|
|
|
|
+ (trace_##trace)(__VA_ARGS__); \
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR, #trace \
|
|
|
|
+ " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
|
|
|
|
+ } while (0)
|
|
|
|
+
|
|
static void nvme_process_sq(void *opaque);
|
|
static void nvme_process_sq(void *opaque);
|
|
|
|
|
|
static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
|
|
static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
|
|
@@ -86,10 +95,14 @@ static void nvme_isr_notify(NvmeCtrl *n, NvmeCQueue *cq)
|
|
{
|
|
{
|
|
if (cq->irq_enabled) {
|
|
if (cq->irq_enabled) {
|
|
if (msix_enabled(&(n->parent_obj))) {
|
|
if (msix_enabled(&(n->parent_obj))) {
|
|
|
|
+ trace_nvme_irq_msix(cq->vector);
|
|
msix_notify(&(n->parent_obj), cq->vector);
|
|
msix_notify(&(n->parent_obj), cq->vector);
|
|
} else {
|
|
} else {
|
|
|
|
+ trace_nvme_irq_pin();
|
|
pci_irq_pulse(&n->parent_obj);
|
|
pci_irq_pulse(&n->parent_obj);
|
|
}
|
|
}
|
|
|
|
+ } else {
|
|
|
|
+ trace_nvme_irq_masked();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -100,7 +113,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
|
|
trans_len = MIN(len, trans_len);
|
|
trans_len = MIN(len, trans_len);
|
|
int num_prps = (len >> n->page_bits) + 1;
|
|
int num_prps = (len >> n->page_bits) + 1;
|
|
|
|
|
|
- if (!prp1) {
|
|
|
|
|
|
+ if (unlikely(!prp1)) {
|
|
|
|
+ trace_nvme_err_invalid_prp();
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
} else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
|
|
} else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
|
|
prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
|
|
prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
|
|
@@ -113,7 +127,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
|
|
}
|
|
}
|
|
len -= trans_len;
|
|
len -= trans_len;
|
|
if (len) {
|
|
if (len) {
|
|
- if (!prp2) {
|
|
|
|
|
|
+ if (unlikely(!prp2)) {
|
|
|
|
+ trace_nvme_err_invalid_prp2_missing();
|
|
goto unmap;
|
|
goto unmap;
|
|
}
|
|
}
|
|
if (len > n->page_size) {
|
|
if (len > n->page_size) {
|
|
@@ -128,7 +143,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
|
|
uint64_t prp_ent = le64_to_cpu(prp_list[i]);
|
|
uint64_t prp_ent = le64_to_cpu(prp_list[i]);
|
|
|
|
|
|
if (i == n->max_prp_ents - 1 && len > n->page_size) {
|
|
if (i == n->max_prp_ents - 1 && len > n->page_size) {
|
|
- if (!prp_ent || prp_ent & (n->page_size - 1)) {
|
|
|
|
|
|
+ if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
|
|
|
|
+ trace_nvme_err_invalid_prplist_ent(prp_ent);
|
|
goto unmap;
|
|
goto unmap;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -140,7 +156,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
|
|
prp_ent = le64_to_cpu(prp_list[i]);
|
|
prp_ent = le64_to_cpu(prp_list[i]);
|
|
}
|
|
}
|
|
|
|
|
|
- if (!prp_ent || prp_ent & (n->page_size - 1)) {
|
|
|
|
|
|
+ if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
|
|
|
|
+ trace_nvme_err_invalid_prplist_ent(prp_ent);
|
|
goto unmap;
|
|
goto unmap;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -154,7 +171,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
|
|
i++;
|
|
i++;
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
- if (prp2 & (n->page_size - 1)) {
|
|
|
|
|
|
+ if (unlikely(prp2 & (n->page_size - 1))) {
|
|
|
|
+ trace_nvme_err_invalid_prp2_align(prp2);
|
|
goto unmap;
|
|
goto unmap;
|
|
}
|
|
}
|
|
if (qsg->nsg) {
|
|
if (qsg->nsg) {
|
|
@@ -178,16 +196,20 @@ static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
|
|
QEMUIOVector iov;
|
|
QEMUIOVector iov;
|
|
uint16_t status = NVME_SUCCESS;
|
|
uint16_t status = NVME_SUCCESS;
|
|
|
|
|
|
|
|
+ trace_nvme_dma_read(prp1, prp2);
|
|
|
|
+
|
|
if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
|
|
if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
}
|
|
}
|
|
if (qsg.nsg > 0) {
|
|
if (qsg.nsg > 0) {
|
|
- if (dma_buf_read(ptr, len, &qsg)) {
|
|
|
|
|
|
+ if (unlikely(dma_buf_read(ptr, len, &qsg))) {
|
|
|
|
+ trace_nvme_err_invalid_dma();
|
|
status = NVME_INVALID_FIELD | NVME_DNR;
|
|
status = NVME_INVALID_FIELD | NVME_DNR;
|
|
}
|
|
}
|
|
qemu_sglist_destroy(&qsg);
|
|
qemu_sglist_destroy(&qsg);
|
|
} else {
|
|
} else {
|
|
- if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
|
|
|
|
|
|
+ if (unlikely(qemu_iovec_to_buf(&iov, 0, ptr, len) != len)) {
|
|
|
|
+ trace_nvme_err_invalid_dma();
|
|
status = NVME_INVALID_FIELD | NVME_DNR;
|
|
status = NVME_INVALID_FIELD | NVME_DNR;
|
|
}
|
|
}
|
|
qemu_iovec_destroy(&iov);
|
|
qemu_iovec_destroy(&iov);
|
|
@@ -273,7 +295,8 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
|
|
uint64_t aio_slba = slba << (data_shift - BDRV_SECTOR_BITS);
|
|
uint64_t aio_slba = slba << (data_shift - BDRV_SECTOR_BITS);
|
|
uint32_t aio_nlb = nlb << (data_shift - BDRV_SECTOR_BITS);
|
|
uint32_t aio_nlb = nlb << (data_shift - BDRV_SECTOR_BITS);
|
|
|
|
|
|
- if (slba + nlb > ns->id_ns.nsze) {
|
|
|
|
|
|
+ if (unlikely(slba + nlb > ns->id_ns.nsze)) {
|
|
|
|
+ trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
|
|
return NVME_LBA_RANGE | NVME_DNR;
|
|
return NVME_LBA_RANGE | NVME_DNR;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -301,8 +324,11 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
|
|
int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
|
|
int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
|
|
enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
|
|
enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
|
|
|
|
|
|
- if ((slba + nlb) > ns->id_ns.nsze) {
|
|
|
|
|
|
+ trace_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba);
|
|
|
|
+
|
|
|
|
+ if (unlikely((slba + nlb) > ns->id_ns.nsze)) {
|
|
block_acct_invalid(blk_get_stats(n->conf.blk), acct);
|
|
block_acct_invalid(blk_get_stats(n->conf.blk), acct);
|
|
|
|
+ trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
|
|
return NVME_LBA_RANGE | NVME_DNR;
|
|
return NVME_LBA_RANGE | NVME_DNR;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -336,7 +362,8 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
|
|
NvmeNamespace *ns;
|
|
NvmeNamespace *ns;
|
|
uint32_t nsid = le32_to_cpu(cmd->nsid);
|
|
uint32_t nsid = le32_to_cpu(cmd->nsid);
|
|
|
|
|
|
- if (nsid == 0 || nsid > n->num_namespaces) {
|
|
|
|
|
|
+ if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
|
|
|
|
+ trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
|
|
return NVME_INVALID_NSID | NVME_DNR;
|
|
return NVME_INVALID_NSID | NVME_DNR;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -350,6 +377,7 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
|
|
case NVME_CMD_READ:
|
|
case NVME_CMD_READ:
|
|
return nvme_rw(n, ns, cmd, req);
|
|
return nvme_rw(n, ns, cmd, req);
|
|
default:
|
|
default:
|
|
|
|
+ trace_nvme_err_invalid_opc(cmd->opcode);
|
|
return NVME_INVALID_OPCODE | NVME_DNR;
|
|
return NVME_INVALID_OPCODE | NVME_DNR;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -373,10 +401,13 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
|
|
NvmeCQueue *cq;
|
|
NvmeCQueue *cq;
|
|
uint16_t qid = le16_to_cpu(c->qid);
|
|
uint16_t qid = le16_to_cpu(c->qid);
|
|
|
|
|
|
- if (!qid || nvme_check_sqid(n, qid)) {
|
|
|
|
|
|
+ if (unlikely(!qid || nvme_check_sqid(n, qid))) {
|
|
|
|
+ trace_nvme_err_invalid_del_sq(qid);
|
|
return NVME_INVALID_QID | NVME_DNR;
|
|
return NVME_INVALID_QID | NVME_DNR;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ trace_nvme_del_sq(qid);
|
|
|
|
+
|
|
sq = n->sq[qid];
|
|
sq = n->sq[qid];
|
|
while (!QTAILQ_EMPTY(&sq->out_req_list)) {
|
|
while (!QTAILQ_EMPTY(&sq->out_req_list)) {
|
|
req = QTAILQ_FIRST(&sq->out_req_list);
|
|
req = QTAILQ_FIRST(&sq->out_req_list);
|
|
@@ -439,19 +470,26 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
|
|
uint16_t qflags = le16_to_cpu(c->sq_flags);
|
|
uint16_t qflags = le16_to_cpu(c->sq_flags);
|
|
uint64_t prp1 = le64_to_cpu(c->prp1);
|
|
uint64_t prp1 = le64_to_cpu(c->prp1);
|
|
|
|
|
|
- if (!cqid || nvme_check_cqid(n, cqid)) {
|
|
|
|
|
|
+ trace_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
|
|
|
|
+
|
|
|
|
+ if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
|
|
|
|
+ trace_nvme_err_invalid_create_sq_cqid(cqid);
|
|
return NVME_INVALID_CQID | NVME_DNR;
|
|
return NVME_INVALID_CQID | NVME_DNR;
|
|
}
|
|
}
|
|
- if (!sqid || !nvme_check_sqid(n, sqid)) {
|
|
|
|
|
|
+ if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) {
|
|
|
|
+ trace_nvme_err_invalid_create_sq_sqid(sqid);
|
|
return NVME_INVALID_QID | NVME_DNR;
|
|
return NVME_INVALID_QID | NVME_DNR;
|
|
}
|
|
}
|
|
- if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
|
|
|
|
|
|
+ if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
|
|
|
|
+ trace_nvme_err_invalid_create_sq_size(qsize);
|
|
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
|
|
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
|
|
}
|
|
}
|
|
- if (!prp1 || prp1 & (n->page_size - 1)) {
|
|
|
|
|
|
+ if (unlikely(!prp1 || prp1 & (n->page_size - 1))) {
|
|
|
|
+ trace_nvme_err_invalid_create_sq_addr(prp1);
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
}
|
|
}
|
|
- if (!(NVME_SQ_FLAGS_PC(qflags))) {
|
|
|
|
|
|
+ if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
|
|
|
|
+ trace_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
}
|
|
}
|
|
sq = g_malloc0(sizeof(*sq));
|
|
sq = g_malloc0(sizeof(*sq));
|
|
@@ -476,14 +514,17 @@ static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
|
|
NvmeCQueue *cq;
|
|
NvmeCQueue *cq;
|
|
uint16_t qid = le16_to_cpu(c->qid);
|
|
uint16_t qid = le16_to_cpu(c->qid);
|
|
|
|
|
|
- if (!qid || nvme_check_cqid(n, qid)) {
|
|
|
|
|
|
+ if (unlikely(!qid || nvme_check_cqid(n, qid))) {
|
|
|
|
+ trace_nvme_err_invalid_del_cq_cqid(qid);
|
|
return NVME_INVALID_CQID | NVME_DNR;
|
|
return NVME_INVALID_CQID | NVME_DNR;
|
|
}
|
|
}
|
|
|
|
|
|
cq = n->cq[qid];
|
|
cq = n->cq[qid];
|
|
- if (!QTAILQ_EMPTY(&cq->sq_list)) {
|
|
|
|
|
|
+ if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
|
|
|
|
+ trace_nvme_err_invalid_del_cq_notempty(qid);
|
|
return NVME_INVALID_QUEUE_DEL;
|
|
return NVME_INVALID_QUEUE_DEL;
|
|
}
|
|
}
|
|
|
|
+ trace_nvme_del_cq(qid);
|
|
nvme_free_cq(cq, n);
|
|
nvme_free_cq(cq, n);
|
|
return NVME_SUCCESS;
|
|
return NVME_SUCCESS;
|
|
}
|
|
}
|
|
@@ -516,19 +557,27 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
|
|
uint16_t qflags = le16_to_cpu(c->cq_flags);
|
|
uint16_t qflags = le16_to_cpu(c->cq_flags);
|
|
uint64_t prp1 = le64_to_cpu(c->prp1);
|
|
uint64_t prp1 = le64_to_cpu(c->prp1);
|
|
|
|
|
|
- if (!cqid || !nvme_check_cqid(n, cqid)) {
|
|
|
|
|
|
+ trace_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
|
|
|
|
+ NVME_CQ_FLAGS_IEN(qflags) != 0);
|
|
|
|
+
|
|
|
|
+ if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) {
|
|
|
|
+ trace_nvme_err_invalid_create_cq_cqid(cqid);
|
|
return NVME_INVALID_CQID | NVME_DNR;
|
|
return NVME_INVALID_CQID | NVME_DNR;
|
|
}
|
|
}
|
|
- if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
|
|
|
|
|
|
+ if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
|
|
|
|
+ trace_nvme_err_invalid_create_cq_size(qsize);
|
|
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
|
|
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
|
|
}
|
|
}
|
|
- if (!prp1) {
|
|
|
|
|
|
+ if (unlikely(!prp1)) {
|
|
|
|
+ trace_nvme_err_invalid_create_cq_addr(prp1);
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
}
|
|
}
|
|
- if (vector > n->num_queues) {
|
|
|
|
|
|
+ if (unlikely(vector > n->num_queues)) {
|
|
|
|
+ trace_nvme_err_invalid_create_cq_vector(vector);
|
|
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
|
|
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
|
|
}
|
|
}
|
|
- if (!(NVME_CQ_FLAGS_PC(qflags))) {
|
|
|
|
|
|
+ if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
|
|
|
|
+ trace_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -543,6 +592,8 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
|
|
uint64_t prp1 = le64_to_cpu(c->prp1);
|
|
uint64_t prp1 = le64_to_cpu(c->prp1);
|
|
uint64_t prp2 = le64_to_cpu(c->prp2);
|
|
uint64_t prp2 = le64_to_cpu(c->prp2);
|
|
|
|
|
|
|
|
+ trace_nvme_identify_ctrl();
|
|
|
|
+
|
|
return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
|
|
return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
|
|
prp1, prp2);
|
|
prp1, prp2);
|
|
}
|
|
}
|
|
@@ -554,11 +605,15 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
|
|
uint64_t prp1 = le64_to_cpu(c->prp1);
|
|
uint64_t prp1 = le64_to_cpu(c->prp1);
|
|
uint64_t prp2 = le64_to_cpu(c->prp2);
|
|
uint64_t prp2 = le64_to_cpu(c->prp2);
|
|
|
|
|
|
- if (nsid == 0 || nsid > n->num_namespaces) {
|
|
|
|
|
|
+ trace_nvme_identify_ns(nsid);
|
|
|
|
+
|
|
|
|
+ if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
|
|
|
|
+ trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
|
|
return NVME_INVALID_NSID | NVME_DNR;
|
|
return NVME_INVALID_NSID | NVME_DNR;
|
|
}
|
|
}
|
|
|
|
|
|
ns = &n->namespaces[nsid - 1];
|
|
ns = &n->namespaces[nsid - 1];
|
|
|
|
+
|
|
return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
|
|
return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
|
|
prp1, prp2);
|
|
prp1, prp2);
|
|
}
|
|
}
|
|
@@ -573,6 +628,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
|
|
uint16_t ret;
|
|
uint16_t ret;
|
|
int i, j = 0;
|
|
int i, j = 0;
|
|
|
|
|
|
|
|
+ trace_nvme_identify_nslist(min_nsid);
|
|
|
|
+
|
|
list = g_malloc0(data_len);
|
|
list = g_malloc0(data_len);
|
|
for (i = 0; i < n->num_namespaces; i++) {
|
|
for (i = 0; i < n->num_namespaces; i++) {
|
|
if (i < min_nsid) {
|
|
if (i < min_nsid) {
|
|
@@ -601,6 +658,7 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
|
|
case 0x02:
|
|
case 0x02:
|
|
return nvme_identify_nslist(n, c);
|
|
return nvme_identify_nslist(n, c);
|
|
default:
|
|
default:
|
|
|
|
+ trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -613,11 +671,14 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
|
|
switch (dw10) {
|
|
switch (dw10) {
|
|
case NVME_VOLATILE_WRITE_CACHE:
|
|
case NVME_VOLATILE_WRITE_CACHE:
|
|
result = blk_enable_write_cache(n->conf.blk);
|
|
result = blk_enable_write_cache(n->conf.blk);
|
|
|
|
+ trace_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
|
|
break;
|
|
break;
|
|
case NVME_NUMBER_OF_QUEUES:
|
|
case NVME_NUMBER_OF_QUEUES:
|
|
result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
|
|
result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
|
|
|
|
+ trace_nvme_getfeat_numq(result);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
|
|
+ trace_nvme_err_invalid_getfeat(dw10);
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -635,10 +696,14 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
|
|
blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
|
|
blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
|
|
break;
|
|
break;
|
|
case NVME_NUMBER_OF_QUEUES:
|
|
case NVME_NUMBER_OF_QUEUES:
|
|
|
|
+ trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
|
|
|
|
+ ((dw11 >> 16) & 0xFFFF) + 1,
|
|
|
|
+ n->num_queues - 1, n->num_queues - 1);
|
|
req->cqe.result =
|
|
req->cqe.result =
|
|
cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
|
|
cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
|
|
+ trace_nvme_err_invalid_setfeat(dw10);
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
return NVME_INVALID_FIELD | NVME_DNR;
|
|
}
|
|
}
|
|
return NVME_SUCCESS;
|
|
return NVME_SUCCESS;
|
|
@@ -662,6 +727,7 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
|
|
case NVME_ADM_CMD_GET_FEATURES:
|
|
case NVME_ADM_CMD_GET_FEATURES:
|
|
return nvme_get_feature(n, cmd, req);
|
|
return nvme_get_feature(n, cmd, req);
|
|
default:
|
|
default:
|
|
|
|
+ trace_nvme_err_invalid_admin_opc(cmd->opcode);
|
|
return NVME_INVALID_OPCODE | NVME_DNR;
|
|
return NVME_INVALID_OPCODE | NVME_DNR;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -721,15 +787,78 @@ static int nvme_start_ctrl(NvmeCtrl *n)
|
|
uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
|
|
uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
|
|
uint32_t page_size = 1 << page_bits;
|
|
uint32_t page_size = 1 << page_bits;
|
|
|
|
|
|
- if (n->cq[0] || n->sq[0] || !n->bar.asq || !n->bar.acq ||
|
|
|
|
- n->bar.asq & (page_size - 1) || n->bar.acq & (page_size - 1) ||
|
|
|
|
- NVME_CC_MPS(n->bar.cc) < NVME_CAP_MPSMIN(n->bar.cap) ||
|
|
|
|
- NVME_CC_MPS(n->bar.cc) > NVME_CAP_MPSMAX(n->bar.cap) ||
|
|
|
|
- NVME_CC_IOCQES(n->bar.cc) < NVME_CTRL_CQES_MIN(n->id_ctrl.cqes) ||
|
|
|
|
- NVME_CC_IOCQES(n->bar.cc) > NVME_CTRL_CQES_MAX(n->id_ctrl.cqes) ||
|
|
|
|
- NVME_CC_IOSQES(n->bar.cc) < NVME_CTRL_SQES_MIN(n->id_ctrl.sqes) ||
|
|
|
|
- NVME_CC_IOSQES(n->bar.cc) > NVME_CTRL_SQES_MAX(n->id_ctrl.sqes) ||
|
|
|
|
- !NVME_AQA_ASQS(n->bar.aqa) || !NVME_AQA_ACQS(n->bar.aqa)) {
|
|
|
|
|
|
+ if (unlikely(n->cq[0])) {
|
|
|
|
+ trace_nvme_err_startfail_cq();
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(n->sq[0])) {
|
|
|
|
+ trace_nvme_err_startfail_sq();
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(!n->bar.asq)) {
|
|
|
|
+ trace_nvme_err_startfail_nbarasq();
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(!n->bar.acq)) {
|
|
|
|
+ trace_nvme_err_startfail_nbaracq();
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(n->bar.asq & (page_size - 1))) {
|
|
|
|
+ trace_nvme_err_startfail_asq_misaligned(n->bar.asq);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(n->bar.acq & (page_size - 1))) {
|
|
|
|
+ trace_nvme_err_startfail_acq_misaligned(n->bar.acq);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(NVME_CC_MPS(n->bar.cc) <
|
|
|
|
+ NVME_CAP_MPSMIN(n->bar.cap))) {
|
|
|
|
+ trace_nvme_err_startfail_page_too_small(
|
|
|
|
+ NVME_CC_MPS(n->bar.cc),
|
|
|
|
+ NVME_CAP_MPSMIN(n->bar.cap));
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(NVME_CC_MPS(n->bar.cc) >
|
|
|
|
+ NVME_CAP_MPSMAX(n->bar.cap))) {
|
|
|
|
+ trace_nvme_err_startfail_page_too_large(
|
|
|
|
+ NVME_CC_MPS(n->bar.cc),
|
|
|
|
+ NVME_CAP_MPSMAX(n->bar.cap));
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
|
|
|
|
+ NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
|
|
|
|
+ trace_nvme_err_startfail_cqent_too_small(
|
|
|
|
+ NVME_CC_IOCQES(n->bar.cc),
|
|
|
|
+ NVME_CTRL_CQES_MIN(n->bar.cap));
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
|
|
|
|
+ NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
|
|
|
|
+ trace_nvme_err_startfail_cqent_too_large(
|
|
|
|
+ NVME_CC_IOCQES(n->bar.cc),
|
|
|
|
+ NVME_CTRL_CQES_MAX(n->bar.cap));
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
|
|
|
|
+ NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
|
|
|
|
+ trace_nvme_err_startfail_sqent_too_small(
|
|
|
|
+ NVME_CC_IOSQES(n->bar.cc),
|
|
|
|
+ NVME_CTRL_SQES_MIN(n->bar.cap));
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
|
|
|
|
+ NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
|
|
|
|
+ trace_nvme_err_startfail_sqent_too_large(
|
|
|
|
+ NVME_CC_IOSQES(n->bar.cc),
|
|
|
|
+ NVME_CTRL_SQES_MAX(n->bar.cap));
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
|
|
|
|
+ trace_nvme_err_startfail_asqent_sz_zero();
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
|
|
|
|
+ trace_nvme_err_startfail_acqent_sz_zero();
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -749,16 +878,48 @@ static int nvme_start_ctrl(NvmeCtrl *n)
|
|
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
|
|
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
|
|
unsigned size)
|
|
unsigned size)
|
|
{
|
|
{
|
|
|
|
+ if (unlikely(offset & (sizeof(uint32_t) - 1))) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiowr_misaligned32,
|
|
|
|
+ "MMIO write not 32-bit aligned,"
|
|
|
|
+ " offset=0x%"PRIx64"", offset);
|
|
|
|
+ /* should be ignored, fall through for now */
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (unlikely(size < sizeof(uint32_t))) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiowr_toosmall,
|
|
|
|
+ "MMIO write smaller than 32-bits,"
|
|
|
|
+ " offset=0x%"PRIx64", size=%u",
|
|
|
|
+ offset, size);
|
|
|
|
+ /* should be ignored, fall through for now */
|
|
|
|
+ }
|
|
|
|
+
|
|
switch (offset) {
|
|
switch (offset) {
|
|
- case 0xc:
|
|
|
|
|
|
+ case 0xc: /* INTMS */
|
|
|
|
+ if (unlikely(msix_enabled(&(n->parent_obj)))) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
|
|
|
|
+ "undefined access to interrupt mask set"
|
|
|
|
+ " when MSI-X is enabled");
|
|
|
|
+ /* should be ignored, fall through for now */
|
|
|
|
+ }
|
|
n->bar.intms |= data & 0xffffffff;
|
|
n->bar.intms |= data & 0xffffffff;
|
|
n->bar.intmc = n->bar.intms;
|
|
n->bar.intmc = n->bar.intms;
|
|
|
|
+ trace_nvme_mmio_intm_set(data & 0xffffffff,
|
|
|
|
+ n->bar.intmc);
|
|
break;
|
|
break;
|
|
- case 0x10:
|
|
|
|
|
|
+ case 0x10: /* INTMC */
|
|
|
|
+ if (unlikely(msix_enabled(&(n->parent_obj)))) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
|
|
|
|
+ "undefined access to interrupt mask clr"
|
|
|
|
+ " when MSI-X is enabled");
|
|
|
|
+ /* should be ignored, fall through for now */
|
|
|
|
+ }
|
|
n->bar.intms &= ~(data & 0xffffffff);
|
|
n->bar.intms &= ~(data & 0xffffffff);
|
|
n->bar.intmc = n->bar.intms;
|
|
n->bar.intmc = n->bar.intms;
|
|
|
|
+ trace_nvme_mmio_intm_clr(data & 0xffffffff,
|
|
|
|
+ n->bar.intmc);
|
|
break;
|
|
break;
|
|
- case 0x14:
|
|
|
|
|
|
+ case 0x14: /* CC */
|
|
|
|
+ trace_nvme_mmio_cfg(data & 0xffffffff);
|
|
/* Windows first sends data, then sends enable bit */
|
|
/* Windows first sends data, then sends enable bit */
|
|
if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
|
|
if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
|
|
!NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
|
|
!NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
|
|
@@ -768,40 +929,82 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
|
|
|
|
|
|
if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
|
|
if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
|
|
n->bar.cc = data;
|
|
n->bar.cc = data;
|
|
- if (nvme_start_ctrl(n)) {
|
|
|
|
|
|
+ if (unlikely(nvme_start_ctrl(n))) {
|
|
|
|
+ trace_nvme_err_startfail();
|
|
n->bar.csts = NVME_CSTS_FAILED;
|
|
n->bar.csts = NVME_CSTS_FAILED;
|
|
} else {
|
|
} else {
|
|
|
|
+ trace_nvme_mmio_start_success();
|
|
n->bar.csts = NVME_CSTS_READY;
|
|
n->bar.csts = NVME_CSTS_READY;
|
|
}
|
|
}
|
|
} else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
|
|
} else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
|
|
|
|
+ trace_nvme_mmio_stopped();
|
|
nvme_clear_ctrl(n);
|
|
nvme_clear_ctrl(n);
|
|
n->bar.csts &= ~NVME_CSTS_READY;
|
|
n->bar.csts &= ~NVME_CSTS_READY;
|
|
}
|
|
}
|
|
if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
|
|
if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
|
|
- nvme_clear_ctrl(n);
|
|
|
|
- n->bar.cc = data;
|
|
|
|
- n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
|
|
|
|
|
|
+ trace_nvme_mmio_shutdown_set();
|
|
|
|
+ nvme_clear_ctrl(n);
|
|
|
|
+ n->bar.cc = data;
|
|
|
|
+ n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
|
|
} else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
|
|
} else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
|
|
- n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
|
|
|
|
- n->bar.cc = data;
|
|
|
|
|
|
+ trace_nvme_mmio_shutdown_cleared();
|
|
|
|
+ n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
|
|
|
|
+ n->bar.cc = data;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ case 0x1C: /* CSTS */
|
|
|
|
+ if (data & (1 << 4)) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiowr_ssreset_w1c_unsupported,
|
|
|
|
+ "attempted to W1C CSTS.NSSRO"
|
|
|
|
+ " but CAP.NSSRS is zero (not supported)");
|
|
|
|
+ } else if (data != 0) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiowr_ro_csts,
|
|
|
|
+ "attempted to set a read only bit"
|
|
|
|
+ " of controller status");
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ case 0x20: /* NSSR */
|
|
|
|
+ if (data == 0x4E564D65) {
|
|
|
|
+ trace_nvme_ub_mmiowr_ssreset_unsupported();
|
|
|
|
+ } else {
|
|
|
|
+ /* The spec says that writes of other values have no effect */
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
- case 0x24:
|
|
|
|
|
|
+ case 0x24: /* AQA */
|
|
n->bar.aqa = data & 0xffffffff;
|
|
n->bar.aqa = data & 0xffffffff;
|
|
|
|
+ trace_nvme_mmio_aqattr(data & 0xffffffff);
|
|
break;
|
|
break;
|
|
- case 0x28:
|
|
|
|
|
|
+ case 0x28: /* ASQ */
|
|
n->bar.asq = data;
|
|
n->bar.asq = data;
|
|
|
|
+ trace_nvme_mmio_asqaddr(data);
|
|
break;
|
|
break;
|
|
- case 0x2c:
|
|
|
|
|
|
+ case 0x2c: /* ASQ hi */
|
|
n->bar.asq |= data << 32;
|
|
n->bar.asq |= data << 32;
|
|
|
|
+ trace_nvme_mmio_asqaddr_hi(data, n->bar.asq);
|
|
break;
|
|
break;
|
|
- case 0x30:
|
|
|
|
|
|
+ case 0x30: /* ACQ */
|
|
|
|
+ trace_nvme_mmio_acqaddr(data);
|
|
n->bar.acq = data;
|
|
n->bar.acq = data;
|
|
break;
|
|
break;
|
|
- case 0x34:
|
|
|
|
|
|
+ case 0x34: /* ACQ hi */
|
|
n->bar.acq |= data << 32;
|
|
n->bar.acq |= data << 32;
|
|
|
|
+ trace_nvme_mmio_acqaddr_hi(data, n->bar.acq);
|
|
break;
|
|
break;
|
|
|
|
+ case 0x38: /* CMBLOC */
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiowr_cmbloc_reserved,
|
|
|
|
+ "invalid write to reserved CMBLOC"
|
|
|
|
+ " when CMBSZ is zero, ignored");
|
|
|
|
+ return;
|
|
|
|
+ case 0x3C: /* CMBSZ */
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiowr_cmbsz_readonly,
|
|
|
|
+ "invalid write to read only CMBSZ, ignored");
|
|
|
|
+ return;
|
|
default:
|
|
default:
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiowr_invalid,
|
|
|
|
+ "invalid MMIO write,"
|
|
|
|
+ " offset=0x%"PRIx64", data=%"PRIx64"",
|
|
|
|
+ offset, data);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -812,9 +1015,26 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
|
|
uint8_t *ptr = (uint8_t *)&n->bar;
|
|
uint8_t *ptr = (uint8_t *)&n->bar;
|
|
uint64_t val = 0;
|
|
uint64_t val = 0;
|
|
|
|
|
|
|
|
+ if (unlikely(addr & (sizeof(uint32_t) - 1))) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiord_misaligned32,
|
|
|
|
+ "MMIO read not 32-bit aligned,"
|
|
|
|
+ " offset=0x%"PRIx64"", addr);
|
|
|
|
+ /* should RAZ, fall through for now */
|
|
|
|
+ } else if (unlikely(size < sizeof(uint32_t))) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiord_toosmall,
|
|
|
|
+ "MMIO read smaller than 32-bits,"
|
|
|
|
+ " offset=0x%"PRIx64"", addr);
|
|
|
|
+ /* should RAZ, fall through for now */
|
|
|
|
+ }
|
|
|
|
+
|
|
if (addr < sizeof(n->bar)) {
|
|
if (addr < sizeof(n->bar)) {
|
|
memcpy(&val, ptr + addr, size);
|
|
memcpy(&val, ptr + addr, size);
|
|
|
|
+ } else {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_mmiord_invalid_ofs,
|
|
|
|
+ "MMIO read beyond last register,"
|
|
|
|
+ " offset=0x%"PRIx64", returning 0", addr);
|
|
}
|
|
}
|
|
|
|
+
|
|
return val;
|
|
return val;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -822,22 +1042,36 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
|
|
{
|
|
{
|
|
uint32_t qid;
|
|
uint32_t qid;
|
|
|
|
|
|
- if (addr & ((1 << 2) - 1)) {
|
|
|
|
|
|
+ if (unlikely(addr & ((1 << 2) - 1))) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_db_wr_misaligned,
|
|
|
|
+ "doorbell write not 32-bit aligned,"
|
|
|
|
+ " offset=0x%"PRIx64", ignoring", addr);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
if (((addr - 0x1000) >> 2) & 1) {
|
|
if (((addr - 0x1000) >> 2) & 1) {
|
|
|
|
+ /* Completion queue doorbell write */
|
|
|
|
+
|
|
uint16_t new_head = val & 0xffff;
|
|
uint16_t new_head = val & 0xffff;
|
|
int start_sqs;
|
|
int start_sqs;
|
|
NvmeCQueue *cq;
|
|
NvmeCQueue *cq;
|
|
|
|
|
|
qid = (addr - (0x1000 + (1 << 2))) >> 3;
|
|
qid = (addr - (0x1000 + (1 << 2))) >> 3;
|
|
- if (nvme_check_cqid(n, qid)) {
|
|
|
|
|
|
+ if (unlikely(nvme_check_cqid(n, qid))) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cq,
|
|
|
|
+ "completion queue doorbell write"
|
|
|
|
+ " for nonexistent queue,"
|
|
|
|
+ " sqid=%"PRIu32", ignoring", qid);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
cq = n->cq[qid];
|
|
cq = n->cq[qid];
|
|
- if (new_head >= cq->size) {
|
|
|
|
|
|
+ if (unlikely(new_head >= cq->size)) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cqhead,
|
|
|
|
+ "completion queue doorbell write value"
|
|
|
|
+ " beyond queue size, sqid=%"PRIu32","
|
|
|
|
+ " new_head=%"PRIu16", ignoring",
|
|
|
|
+ qid, new_head);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -855,16 +1089,27 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
|
|
nvme_isr_notify(n, cq);
|
|
nvme_isr_notify(n, cq);
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
|
|
+ /* Submission queue doorbell write */
|
|
|
|
+
|
|
uint16_t new_tail = val & 0xffff;
|
|
uint16_t new_tail = val & 0xffff;
|
|
NvmeSQueue *sq;
|
|
NvmeSQueue *sq;
|
|
|
|
|
|
qid = (addr - 0x1000) >> 3;
|
|
qid = (addr - 0x1000) >> 3;
|
|
- if (nvme_check_sqid(n, qid)) {
|
|
|
|
|
|
+ if (unlikely(nvme_check_sqid(n, qid))) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sq,
|
|
|
|
+ "submission queue doorbell write"
|
|
|
|
+ " for nonexistent queue,"
|
|
|
|
+ " sqid=%"PRIu32", ignoring", qid);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
sq = n->sq[qid];
|
|
sq = n->sq[qid];
|
|
- if (new_tail >= sq->size) {
|
|
|
|
|
|
+ if (unlikely(new_tail >= sq->size)) {
|
|
|
|
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sqtail,
|
|
|
|
+ "submission queue doorbell write value"
|
|
|
|
+ " beyond queue size, sqid=%"PRIu32","
|
|
|
|
+ " new_tail=%"PRIu16", ignoring",
|
|
|
|
+ qid, new_tail);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -920,7 +1165,7 @@ static const MemoryRegionOps nvme_cmb_ops = {
|
|
},
|
|
},
|
|
};
|
|
};
|
|
|
|
|
|
-static int nvme_init(PCIDevice *pci_dev)
|
|
|
|
|
|
+static void nvme_realize(PCIDevice *pci_dev, Error **errp)
|
|
{
|
|
{
|
|
NvmeCtrl *n = NVME(pci_dev);
|
|
NvmeCtrl *n = NVME(pci_dev);
|
|
NvmeIdCtrl *id = &n->id_ctrl;
|
|
NvmeIdCtrl *id = &n->id_ctrl;
|
|
@@ -928,27 +1173,27 @@ static int nvme_init(PCIDevice *pci_dev)
|
|
int i;
|
|
int i;
|
|
int64_t bs_size;
|
|
int64_t bs_size;
|
|
uint8_t *pci_conf;
|
|
uint8_t *pci_conf;
|
|
- Error *local_err = NULL;
|
|
|
|
|
|
|
|
if (!n->conf.blk) {
|
|
if (!n->conf.blk) {
|
|
- return -1;
|
|
|
|
|
|
+ error_setg(errp, "drive property not set");
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
bs_size = blk_getlength(n->conf.blk);
|
|
bs_size = blk_getlength(n->conf.blk);
|
|
if (bs_size < 0) {
|
|
if (bs_size < 0) {
|
|
- return -1;
|
|
|
|
|
|
+ error_setg(errp, "could not get backing file size");
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
blkconf_serial(&n->conf, &n->serial);
|
|
blkconf_serial(&n->conf, &n->serial);
|
|
if (!n->serial) {
|
|
if (!n->serial) {
|
|
- return -1;
|
|
|
|
|
|
+ error_setg(errp, "serial property not set");
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
blkconf_blocksizes(&n->conf);
|
|
blkconf_blocksizes(&n->conf);
|
|
- blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
|
|
|
|
- false, &local_err);
|
|
|
|
- if (local_err) {
|
|
|
|
- error_report_err(local_err);
|
|
|
|
- return -1;
|
|
|
|
|
|
+ if (!blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
|
|
|
|
+ false, errp)) {
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
pci_conf = pci_dev->config;
|
|
pci_conf = pci_dev->config;
|
|
@@ -1046,7 +1291,6 @@ static int nvme_init(PCIDevice *pci_dev)
|
|
cpu_to_le64(n->ns_size >>
|
|
cpu_to_le64(n->ns_size >>
|
|
id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas)].ds);
|
|
id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas)].ds);
|
|
}
|
|
}
|
|
- return 0;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void nvme_exit(PCIDevice *pci_dev)
|
|
static void nvme_exit(PCIDevice *pci_dev)
|
|
@@ -1081,7 +1325,7 @@ static void nvme_class_init(ObjectClass *oc, void *data)
|
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
|
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
|
|
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
|
|
|
|
|
|
- pc->init = nvme_init;
|
|
|
|
|
|
+ pc->realize = nvme_realize;
|
|
pc->exit = nvme_exit;
|
|
pc->exit = nvme_exit;
|
|
pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
|
|
pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
|
|
pc->vendor_id = PCI_VENDOR_ID_INTEL;
|
|
pc->vendor_id = PCI_VENDOR_ID_INTEL;
|