#define NVME_SQ_ENTRY_BYTES 64
#define NVME_CQ_ENTRY_BYTES 16
#define NVME_QUEUE_SIZE 128
-#define NVME_BAR_SIZE 8192
+#define NVME_DOORBELL_SIZE 4096
/*
* We have to leave one slot empty as that is the full queue case where
typedef struct BDRVNVMeState BDRVNVMeState;
+/* Same index is used for queues and IRQs */
+#define INDEX_ADMIN 0
+#define INDEX_IO(n) (1 + n)
+
+/* This driver shares a single MSIX IRQ for the admin and I/O queues */
+enum {
+ MSIX_SHARED_IRQ_IDX = 0,
+ MSIX_IRQ_COUNT = 1
+};
+
typedef struct {
int32_t head, tail;
uint8_t *queue;
QEMUBH *completion_bh;
} NVMeQueuePair;
-/* Memory mapped registers */
-typedef volatile struct {
- uint64_t cap;
- uint32_t vs;
- uint32_t intms;
- uint32_t intmc;
- uint32_t cc;
- uint32_t reserved0;
- uint32_t csts;
- uint32_t nssr;
- uint32_t aqa;
- uint64_t asq;
- uint64_t acq;
- uint32_t cmbloc;
- uint32_t cmbsz;
- uint8_t reserved1[0xec0];
- uint8_t cmd_set_specfic[0x100];
- uint32_t doorbells[];
-} NVMeRegs;
-
-QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
-
-#define INDEX_ADMIN 0
-#define INDEX_IO(n) (1 + n)
-
struct BDRVNVMeState {
AioContext *aio_context;
QEMUVFIOState *vfio;
- NVMeRegs *regs;
+ void *bar0_wo_map;
+ /* Memory mapped registers */
+ volatile struct {
+ uint32_t sq_tail;
+ uint32_t cq_head;
+ } *doorbells;
/* The submission/completion queue pairs.
* [0]: admin queue.
* [1..]: io queues.
*/
NVMeQueuePair **queues;
- int nr_queues;
+ unsigned queue_count;
size_t page_size;
/* How many uint32_t elements does each doorbell entry take. */
size_t doorbell_scale;
bool write_cache_supported;
- EventNotifier irq_notifier;
+ EventNotifier irq_notifier[MSIX_IRQ_COUNT];
uint64_t nsze; /* Namespace size reported by identify command */
int nsid; /* The namespace id to read/write data. */
/* PCI address (required for nvme_refresh_filename()) */
char *device;
+
+ struct {
+ uint64_t completion_errors;
+ uint64_t aligned_accesses;
+ uint64_t unaligned_accesses;
+ } stats;
};
#define NVME_BLOCK_OPT_DEVICE "device"
},
};
-static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q,
- int nentries, int entry_bytes, Error **errp)
+/* Returns true on success, false on failure. */
+static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
+ unsigned nentries, size_t entry_bytes, Error **errp)
{
- BDRVNVMeState *s = bs->opaque;
size_t bytes;
int r;
- bytes = ROUND_UP(nentries * entry_bytes, s->page_size);
+ bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size);
q->head = q->tail = 0;
- q->queue = qemu_try_blockalign0(bs, bytes);
-
+ q->queue = qemu_try_memalign(qemu_real_host_page_size, bytes);
if (!q->queue) {
error_setg(errp, "Cannot allocate queue");
- return;
+ return false;
}
+ memset(q->queue, 0, bytes);
r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
if (r) {
error_setg(errp, "Cannot map queue");
+ return false;
}
+ return true;
}
static void nvme_free_queue_pair(NVMeQueuePair *q)
{
+ trace_nvme_free_queue_pair(q->index, q);
if (q->completion_bh) {
qemu_bh_delete(q->completion_bh);
}
qemu_mutex_unlock(&q->lock);
}
-static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
- int idx, int size,
+static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
+ AioContext *aio_context,
+ unsigned idx, size_t size,
Error **errp)
{
int i, r;
- BDRVNVMeState *s = bs->opaque;
- Error *local_err = NULL;
NVMeQueuePair *q;
uint64_t prp_list_iova;
+ size_t bytes;
q = g_try_new0(NVMeQueuePair, 1);
if (!q) {
return NULL;
}
- q->prp_list_pages = qemu_try_blockalign0(bs,
- s->page_size * NVME_NUM_REQS);
+ trace_nvme_create_queue_pair(idx, q, size, aio_context,
+ event_notifier_get_fd(s->irq_notifier));
+ bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
+ qemu_real_host_page_size);
+ q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size, bytes);
if (!q->prp_list_pages) {
goto fail;
}
+ memset(q->prp_list_pages, 0, bytes);
qemu_mutex_init(&q->lock);
q->s = s;
q->index = idx;
qemu_co_queue_init(&q->free_req_queue);
- q->completion_bh = aio_bh_new(bdrv_get_aio_context(bs),
- nvme_process_completion_bh, q);
- r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
- s->page_size * NVME_NUM_REQS,
+ q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
+ r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
false, &prp_list_iova);
if (r) {
goto fail;
req->prp_list_iova = prp_list_iova + i * s->page_size;
}
- nvme_init_queue(bs, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ if (!nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, errp)) {
goto fail;
}
- q->sq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale];
+ q->sq.doorbell = &s->doorbells[idx * s->doorbell_scale].sq_tail;
- nvme_init_queue(bs, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ if (!nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, errp)) {
goto fail;
}
- q->cq.doorbell = &s->regs->doorbells[(idx * 2 + 1) * s->doorbell_scale];
+ q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head;
return q;
fail:
while (q->free_req_head == -1) {
if (qemu_in_coroutine()) {
- trace_nvme_free_req_queue_wait(q);
+ trace_nvme_free_req_queue_wait(q->s, q->index);
qemu_co_queue_wait(&q->free_req_queue, &q->lock);
} else {
qemu_mutex_unlock(&q->lock);
break;
}
ret = nvme_translate_error(c);
+ if (ret) {
+ s->stats.completion_errors++;
+ }
q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
if (!q->cq.head) {
q->cq_phase = !q->cq_phase;
}
cid = le16_to_cpu(c->cid);
if (cid == 0 || cid > NVME_QUEUE_SIZE) {
- fprintf(stderr, "Unexpected CID in completion queue: %" PRIu32 "\n",
- cid);
+ warn_report("NVMe: Unexpected CID in completion queue: %"PRIu32", "
+ "queue size: %u", cid, NVME_QUEUE_SIZE);
continue;
}
trace_nvme_complete_command(s, q->index, cid);
assert(!req->cb);
req->cb = cb;
req->opaque = opaque;
- cmd->cid = cpu_to_le32(req->cid);
+ cmd->cid = cpu_to_le16(req->cid);
trace_nvme_submit_command(q->s, q->index, req->cid);
nvme_trace_command(cmd);
qemu_mutex_unlock(&q->lock);
}
-static void nvme_cmd_sync_cb(void *opaque, int ret)
+static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
{
int *pret = opaque;
*pret = ret;
aio_wait_kick();
}
-static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
- NvmeCmd *cmd)
+static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
{
+ BDRVNVMeState *s = bs->opaque;
+ NVMeQueuePair *q = s->queues[INDEX_ADMIN];
+ AioContext *aio_context = bdrv_get_aio_context(bs);
NVMeRequest *req;
int ret = -EINPROGRESS;
req = nvme_get_free_req(q);
if (!req) {
return -EBUSY;
}
- nvme_submit_command(q, req, cmd, nvme_cmd_sync_cb, &ret);
+ nvme_submit_command(q, req, cmd, nvme_admin_cmd_sync_cb, &ret);
- BDRV_POLL_WHILE(bs, ret == -EINPROGRESS);
+ AIO_WAIT_WHILE(aio_context, ret == -EINPROGRESS);
return ret;
}
-static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
+/* Returns true on success, false on failure. */
+static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
{
BDRVNVMeState *s = bs->opaque;
- NvmeIdCtrl *idctrl;
- NvmeIdNs *idns;
+ bool ret = false;
+ union {
+ NvmeIdCtrl ctrl;
+ NvmeIdNs ns;
+ } *id;
NvmeLBAF *lbaf;
- uint8_t *resp;
uint16_t oncs;
int r;
uint64_t iova;
.opcode = NVME_ADM_CMD_IDENTIFY,
.cdw10 = cpu_to_le32(0x1),
};
+ size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size);
- resp = qemu_try_blockalign0(bs, sizeof(NvmeIdCtrl));
- if (!resp) {
+ id = qemu_try_memalign(qemu_real_host_page_size, id_size);
+ if (!id) {
error_setg(errp, "Cannot allocate buffer for identify response");
goto out;
}
- idctrl = (NvmeIdCtrl *)resp;
- idns = (NvmeIdNs *)resp;
- r = qemu_vfio_dma_map(s->vfio, resp, sizeof(NvmeIdCtrl), true, &iova);
+ r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova);
if (r) {
error_setg(errp, "Cannot map buffer for DMA");
goto out;
}
- cmd.dptr.prp1 = cpu_to_le64(iova);
- if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
+ memset(id, 0, id_size);
+ cmd.dptr.prp1 = cpu_to_le64(iova);
+ if (nvme_admin_cmd_sync(bs, &cmd)) {
error_setg(errp, "Failed to identify controller");
goto out;
}
- if (le32_to_cpu(idctrl->nn) < namespace) {
+ if (le32_to_cpu(id->ctrl.nn) < namespace) {
error_setg(errp, "Invalid namespace");
goto out;
}
- s->write_cache_supported = le32_to_cpu(idctrl->vwc) & 0x1;
- s->max_transfer = (idctrl->mdts ? 1 << idctrl->mdts : 0) * s->page_size;
+ s->write_cache_supported = le32_to_cpu(id->ctrl.vwc) & 0x1;
+ s->max_transfer = (id->ctrl.mdts ? 1 << id->ctrl.mdts : 0) * s->page_size;
/* For now the page list buffer per command is one page, to hold at most
* s->page_size / sizeof(uint64_t) entries. */
s->max_transfer = MIN_NON_ZERO(s->max_transfer,
s->page_size / sizeof(uint64_t) * s->page_size);
- oncs = le16_to_cpu(idctrl->oncs);
+ oncs = le16_to_cpu(id->ctrl.oncs);
s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES);
s->supports_discard = !!(oncs & NVME_ONCS_DSM);
- memset(resp, 0, 4096);
-
+ memset(id, 0, id_size);
cmd.cdw10 = 0;
cmd.nsid = cpu_to_le32(namespace);
- if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
+ if (nvme_admin_cmd_sync(bs, &cmd)) {
error_setg(errp, "Failed to identify namespace");
goto out;
}
- s->nsze = le64_to_cpu(idns->nsze);
- lbaf = &idns->lbaf[NVME_ID_NS_FLBAS_INDEX(idns->flbas)];
+ s->nsze = le64_to_cpu(id->ns.nsze);
+ lbaf = &id->ns.lbaf[NVME_ID_NS_FLBAS_INDEX(id->ns.flbas)];
- if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(idns->dlfeat) &&
- NVME_ID_NS_DLFEAT_READ_BEHAVIOR(idns->dlfeat) ==
+ if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id->ns.dlfeat) &&
+ NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id->ns.dlfeat) ==
NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES) {
bs->supported_write_flags |= BDRV_REQ_MAY_UNMAP;
}
goto out;
}
+ ret = true;
s->blkshift = lbaf->ds;
out:
- qemu_vfio_dma_unmap(s->vfio, resp);
- qemu_vfree(resp);
+ qemu_vfio_dma_unmap(s->vfio, id);
+ qemu_vfree(id);
+
+ return ret;
+}
+
+static bool nvme_poll_queue(NVMeQueuePair *q)
+{
+ bool progress = false;
+
+ const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
+ NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
+
+ trace_nvme_poll_queue(q->s, q->index);
+ /*
+ * Do an early check for completions. q->lock isn't needed because
+ * nvme_process_completion() only runs in the event loop thread and
+ * cannot race with itself.
+ */
+ if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
+ return false;
+ }
+
+ qemu_mutex_lock(&q->lock);
+ while (nvme_process_completion(q)) {
+ /* Keep polling */
+ progress = true;
+ }
+ qemu_mutex_unlock(&q->lock);
+
+ return progress;
}
static bool nvme_poll_queues(BDRVNVMeState *s)
bool progress = false;
int i;
- for (i = 0; i < s->nr_queues; i++) {
- NVMeQueuePair *q = s->queues[i];
- const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
- NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
-
- /*
- * Do an early check for completions. q->lock isn't needed because
- * nvme_process_completion() only runs in the event loop thread and
- * cannot race with itself.
- */
- if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
- continue;
- }
-
- qemu_mutex_lock(&q->lock);
- while (nvme_process_completion(q)) {
- /* Keep polling */
+ for (i = 0; i < s->queue_count; i++) {
+ if (nvme_poll_queue(s->queues[i])) {
progress = true;
}
- qemu_mutex_unlock(&q->lock);
}
return progress;
}
static void nvme_handle_event(EventNotifier *n)
{
- BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
+ BDRVNVMeState *s = container_of(n, BDRVNVMeState,
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
trace_nvme_handle_event(s);
event_notifier_test_and_clear(n);
static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
{
BDRVNVMeState *s = bs->opaque;
- int n = s->nr_queues;
+ unsigned n = s->queue_count;
NVMeQueuePair *q;
NvmeCmd cmd;
- int queue_size = NVME_QUEUE_SIZE;
+ unsigned queue_size = NVME_QUEUE_SIZE;
- q = nvme_create_queue_pair(bs, n, queue_size, errp);
+ assert(n <= UINT16_MAX);
+ q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs),
+ n, queue_size, errp);
if (!q) {
return false;
}
cmd = (NvmeCmd) {
.opcode = NVME_ADM_CMD_CREATE_CQ,
.dptr.prp1 = cpu_to_le64(q->cq.iova),
- .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
- .cdw11 = cpu_to_le32(0x3),
+ .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
+ .cdw11 = cpu_to_le32(NVME_CQ_IEN | NVME_CQ_PC),
};
- if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
- error_setg(errp, "Failed to create CQ io queue [%d]", n);
- nvme_free_queue_pair(q);
- return false;
+ if (nvme_admin_cmd_sync(bs, &cmd)) {
+ error_setg(errp, "Failed to create CQ io queue [%u]", n);
+ goto out_error;
}
cmd = (NvmeCmd) {
.opcode = NVME_ADM_CMD_CREATE_SQ,
.dptr.prp1 = cpu_to_le64(q->sq.iova),
- .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
- .cdw11 = cpu_to_le32(0x1 | (n << 16)),
+ .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
+ .cdw11 = cpu_to_le32(NVME_SQ_PC | (n << 16)),
};
- if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
- error_setg(errp, "Failed to create SQ io queue [%d]", n);
- nvme_free_queue_pair(q);
- return false;
+ if (nvme_admin_cmd_sync(bs, &cmd)) {
+ error_setg(errp, "Failed to create SQ io queue [%u]", n);
+ goto out_error;
}
s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
s->queues[n] = q;
- s->nr_queues++;
+ s->queue_count++;
return true;
+out_error:
+ nvme_free_queue_pair(q);
+ return false;
}
static bool nvme_poll_cb(void *opaque)
{
EventNotifier *e = opaque;
- BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
+ BDRVNVMeState *s = container_of(e, BDRVNVMeState,
+ irq_notifier[MSIX_SHARED_IRQ_IDX]);
- trace_nvme_poll_cb(s);
return nvme_poll_queues(s);
}
Error **errp)
{
BDRVNVMeState *s = bs->opaque;
+ NVMeQueuePair *q;
+ AioContext *aio_context = bdrv_get_aio_context(bs);
int ret;
uint64_t cap;
+ uint32_t ver;
uint64_t timeout_ms;
uint64_t deadline, now;
- Error *local_err = NULL;
+ volatile NvmeBar *regs = NULL;
qemu_co_mutex_init(&s->dma_map_lock);
qemu_co_queue_init(&s->dma_flush_queue);
s->device = g_strdup(device);
s->nsid = namespace;
s->aio_context = bdrv_get_aio_context(bs);
- ret = event_notifier_init(&s->irq_notifier, 0);
+ ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
if (ret) {
error_setg(errp, "Failed to init event notifier");
return ret;
goto out;
}
- s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp);
- if (!s->regs) {
+ regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, sizeof(NvmeBar),
+ PROT_READ | PROT_WRITE, errp);
+ if (!regs) {
ret = -EINVAL;
goto out;
}
-
/* Perform initialize sequence as described in NVMe spec "7.6.1
* Initialization". */
- cap = le64_to_cpu(s->regs->cap);
- if (!(cap & (1ULL << 37))) {
+ cap = le64_to_cpu(regs->cap);
+ trace_nvme_controller_capability_raw(cap);
+ trace_nvme_controller_capability("Maximum Queue Entries Supported",
+ 1 + NVME_CAP_MQES(cap));
+ trace_nvme_controller_capability("Contiguous Queues Required",
+ NVME_CAP_CQR(cap));
+ trace_nvme_controller_capability("Doorbell Stride",
+ 1 << (2 + NVME_CAP_DSTRD(cap)));
+ trace_nvme_controller_capability("Subsystem Reset Supported",
+ NVME_CAP_NSSRS(cap));
+ trace_nvme_controller_capability("Memory Page Size Minimum",
+ 1 << (12 + NVME_CAP_MPSMIN(cap)));
+ trace_nvme_controller_capability("Memory Page Size Maximum",
+ 1 << (12 + NVME_CAP_MPSMAX(cap)));
+ if (!NVME_CAP_CSS(cap)) {
error_setg(errp, "Device doesn't support NVMe command set");
ret = -EINVAL;
goto out;
}
- s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF)));
- s->doorbell_scale = (4 << (((cap >> 32) & 0xF))) / sizeof(uint32_t);
+ s->page_size = 1u << (12 + NVME_CAP_MPSMIN(cap));
+ s->doorbell_scale = (4 << NVME_CAP_DSTRD(cap)) / sizeof(uint32_t);
bs->bl.opt_mem_alignment = s->page_size;
- timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000);
+ bs->bl.request_alignment = s->page_size;
+ timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000);
+
+ ver = le32_to_cpu(regs->vs);
+ trace_nvme_controller_spec_version(extract32(ver, 16, 16),
+ extract32(ver, 8, 8),
+ extract32(ver, 0, 8));
/* Reset device to get a clean state. */
- s->regs->cc = cpu_to_le32(le32_to_cpu(s->regs->cc) & 0xFE);
+ regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE);
/* Wait for CSTS.RDY = 0. */
deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
- while (le32_to_cpu(s->regs->csts) & 0x1) {
+ while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
error_setg(errp, "Timeout while waiting for device to reset (%"
PRId64 " ms)",
}
}
+ s->bar0_wo_map = qemu_vfio_pci_map_bar(s->vfio, 0, 0,
+ sizeof(NvmeBar) + NVME_DOORBELL_SIZE,
+ PROT_WRITE, errp);
+ s->doorbells = (void *)((uintptr_t)s->bar0_wo_map + sizeof(NvmeBar));
+ if (!s->doorbells) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/* Set up admin queue. */
s->queues = g_new(NVMeQueuePair *, 1);
- s->queues[INDEX_ADMIN] = nvme_create_queue_pair(bs, 0,
- NVME_QUEUE_SIZE,
- errp);
- if (!s->queues[INDEX_ADMIN]) {
+ q = nvme_create_queue_pair(s, aio_context, 0, NVME_QUEUE_SIZE, errp);
+ if (!q) {
ret = -EINVAL;
goto out;
}
- s->nr_queues = 1;
- QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
- s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
- s->regs->asq = cpu_to_le64(s->queues[INDEX_ADMIN]->sq.iova);
- s->regs->acq = cpu_to_le64(s->queues[INDEX_ADMIN]->cq.iova);
+ s->queues[INDEX_ADMIN] = q;
+ s->queue_count = 1;
+ QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000);
+ regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
+ ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
+ regs->asq = cpu_to_le64(q->sq.iova);
+ regs->acq = cpu_to_le64(q->cq.iova);
/* After setting up all control registers we can enable device now. */
- s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
- (ctz32(NVME_SQ_ENTRY_BYTES) << 16) |
- 0x1);
+ regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
+ (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
+ CC_EN_MASK);
/* Wait for CSTS.RDY = 1. */
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
- deadline = now + timeout_ms * 1000000;
- while (!(le32_to_cpu(s->regs->csts) & 0x1)) {
+ deadline = now + timeout_ms * SCALE_MS;
+ while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
error_setg(errp, "Timeout while waiting for device to start (%"
PRId64 " ms)",
}
}
- ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
+ ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
VFIO_PCI_MSIX_IRQ_INDEX, errp);
if (ret) {
goto out;
}
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb);
- nvme_identify(bs, namespace, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ if (!nvme_identify(bs, namespace, errp)) {
ret = -EIO;
goto out;
}
ret = -EIO;
}
out:
+ if (regs) {
+ qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)regs, 0, sizeof(NvmeBar));
+ }
+
/* Cleaning up is done in nvme_file_open() upon error. */
return ret;
}
.cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
};
- ret = nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd);
+ ret = nvme_admin_cmd_sync(bs, &cmd);
if (ret) {
error_setg(errp, "Failed to configure NVMe write cache");
}
static void nvme_close(BlockDriverState *bs)
{
- int i;
BDRVNVMeState *s = bs->opaque;
- for (i = 0; i < s->nr_queues; ++i) {
+ for (unsigned i = 0; i < s->queue_count; ++i) {
nvme_free_queue_pair(s->queues[i]);
}
g_free(s->queues);
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, NULL, NULL);
- event_notifier_cleanup(&s->irq_notifier);
- qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
+ event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
+ qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map,
+ 0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE);
qemu_vfio_close(s->vfio);
g_free(s->device);
for (i = 0; i < qiov->niov; ++i) {
bool retry = true;
uint64_t iova;
+ size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
+ qemu_real_host_page_size);
try_map:
r = qemu_vfio_dma_map(s->vfio,
qiov->iov[i].iov_base,
- qiov->iov[i].iov_len,
- true, &iova);
+ len, true, &iova);
if (r == -ENOMEM && retry) {
retry = false;
trace_nvme_dma_flush_queue_wait(s);
};
trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
- assert(s->nr_queues > 1);
+ assert(s->queue_count > 1);
req = nvme_get_free_req(ioq);
assert(req);
BDRVNVMeState *s = bs->opaque;
for (i = 0; i < qiov->niov; ++i) {
- if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) ||
- !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) {
+ if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
+ qemu_real_host_page_size) ||
+ !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) {
trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
qiov->iov[i].iov_len, s->page_size);
return false;
int r;
uint8_t *buf = NULL;
QEMUIOVector local_qiov;
-
+ size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);
assert(QEMU_IS_ALIGNED(offset, s->page_size));
assert(QEMU_IS_ALIGNED(bytes, s->page_size));
assert(bytes <= s->max_transfer);
if (nvme_qiov_aligned(bs, qiov)) {
+ s->stats.aligned_accesses++;
return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
}
+ s->stats.unaligned_accesses++;
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
- buf = qemu_try_blockalign(bs, bytes);
+ buf = qemu_try_memalign(qemu_real_host_page_size, len);
if (!buf) {
return -ENOMEM;
.ret = -EINPROGRESS,
};
- assert(s->nr_queues > 1);
+ assert(s->queue_count > 1);
req = nvme_get_free_req(ioq);
assert(req);
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
cmd.cdw12 = cpu_to_le32(cdw12);
trace_nvme_write_zeroes(s, offset, bytes, flags);
- assert(s->nr_queues > 1);
+ assert(s->queue_count > 1);
req = nvme_get_free_req(ioq);
assert(req);
return -ENOTSUP;
}
- assert(s->nr_queues > 1);
+ assert(s->queue_count > 1);
- buf = qemu_try_blockalign0(bs, s->page_size);
+ buf = qemu_try_memalign(s->page_size, s->page_size);
if (!buf) {
return -ENOMEM;
}
-
+ memset(buf, 0, s->page_size);
buf->nlb = cpu_to_le32(bytes >> s->blkshift);
buf->slba = cpu_to_le64(offset >> s->blkshift);
buf->cattr = 0;
}
+static int coroutine_fn nvme_co_truncate(BlockDriverState *bs, int64_t offset,
+ bool exact, PreallocMode prealloc,
+ BdrvRequestFlags flags, Error **errp)
+{
+ int64_t cur_length;
+
+ if (prealloc != PREALLOC_MODE_OFF) {
+ error_setg(errp, "Unsupported preallocation mode '%s'",
+ PreallocMode_str(prealloc));
+ return -ENOTSUP;
+ }
+
+ cur_length = nvme_getlength(bs);
+ if (offset != cur_length && exact) {
+ error_setg(errp, "Cannot resize NVMe devices");
+ return -ENOTSUP;
+ } else if (offset > cur_length) {
+ error_setg(errp, "Cannot grow NVMe devices");
+ return -EINVAL;
+ }
+
+ return 0;
+}
static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue, Error **errp)
{
BDRVNVMeState *s = bs->opaque;
- for (int i = 0; i < s->nr_queues; i++) {
+ for (unsigned i = 0; i < s->queue_count; i++) {
NVMeQueuePair *q = s->queues[i];
qemu_bh_delete(q->completion_bh);
q->completion_bh = NULL;
}
- aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+ aio_set_event_notifier(bdrv_get_aio_context(bs),
+ &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, NULL, NULL);
}
BDRVNVMeState *s = bs->opaque;
s->aio_context = new_context;
- aio_set_event_notifier(new_context, &s->irq_notifier,
+ aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
false, nvme_handle_event, nvme_poll_cb);
- for (int i = 0; i < s->nr_queues; i++) {
+ for (unsigned i = 0; i < s->queue_count; i++) {
NVMeQueuePair *q = s->queues[i];
q->completion_bh =
static void nvme_aio_unplug(BlockDriverState *bs)
{
- int i;
BDRVNVMeState *s = bs->opaque;
assert(s->plugged);
s->plugged = false;
- for (i = INDEX_IO(0); i < s->nr_queues; i++) {
+ for (unsigned i = INDEX_IO(0); i < s->queue_count; i++) {
NVMeQueuePair *q = s->queues[i];
qemu_mutex_lock(&q->lock);
nvme_kick(q);
qemu_vfio_dma_unmap(s->vfio, host);
}
+static BlockStatsSpecific *nvme_get_specific_stats(BlockDriverState *bs)
+{
+ BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
+ BDRVNVMeState *s = bs->opaque;
+
+ stats->driver = BLOCKDEV_DRIVER_NVME;
+ stats->u.nvme = (BlockStatsSpecificNvme) {
+ .completion_errors = s->stats.completion_errors,
+ .aligned_accesses = s->stats.aligned_accesses,
+ .unaligned_accesses = s->stats.unaligned_accesses,
+ };
+
+ return stats;
+}
+
static const char *const nvme_strong_runtime_opts[] = {
NVME_BLOCK_OPT_DEVICE,
NVME_BLOCK_OPT_NAMESPACE,
.bdrv_close = nvme_close,
.bdrv_getlength = nvme_getlength,
.bdrv_probe_blocksizes = nvme_probe_blocksizes,
+ .bdrv_co_truncate = nvme_co_truncate,
.bdrv_co_preadv = nvme_co_preadv,
.bdrv_co_pwritev = nvme_co_pwritev,
.bdrv_refresh_filename = nvme_refresh_filename,
.bdrv_refresh_limits = nvme_refresh_limits,
.strong_runtime_opts = nvme_strong_runtime_opts,
+ .bdrv_get_specific_stats = nvme_get_specific_stats,
.bdrv_detach_aio_context = nvme_detach_aio_context,
.bdrv_attach_aio_context = nvme_attach_aio_context,