2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "qemu/cutils.h"
23 #include "qemu/option.h"
24 #include "qemu/vfio-helpers.h"
25 #include "block/block_int.h"
26 #include "sysemu/replay.h"
29 #include "block/nvme.h"
31 #define NVME_SQ_ENTRY_BYTES 64
32 #define NVME_CQ_ENTRY_BYTES 16
33 #define NVME_QUEUE_SIZE 128
34 #define NVME_BAR_SIZE 8192
37 * We have to leave one slot empty as that is the full queue case where
40 #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
46 /* Hardware MMIO register */
47 volatile uint32_t *doorbell
;
51 BlockCompletionFunc
*cb
;
55 uint64_t prp_list_iova
;
56 int free_req_next
; /* q->reqs[] index of next free req */
60 CoQueue free_req_queue
;
63 /* Fields protected by BQL */
65 uint8_t *prp_list_pages
;
67 /* Fields protected by @lock */
71 NVMeRequest reqs
[NVME_NUM_REQS
];
77 /* Memory mapped registers */
78 typedef volatile struct {
92 uint8_t reserved1
[0xec0];
93 uint8_t cmd_set_specfic
[0x100];
97 QEMU_BUILD_BUG_ON(offsetof(NVMeRegs
, doorbells
) != 0x1000);
100 AioContext
*aio_context
;
103 /* The submission/completion queue pairs.
107 NVMeQueuePair
**queues
;
110 /* How many uint32_t elements does each doorbell entry take. */
111 size_t doorbell_scale
;
112 bool write_cache_supported
;
113 EventNotifier irq_notifier
;
115 uint64_t nsze
; /* Namespace size reported by identify command */
116 int nsid
; /* The namespace id to read/write data. */
119 uint64_t max_transfer
;
122 bool supports_write_zeroes
;
123 bool supports_discard
;
125 CoMutex dma_map_lock
;
126 CoQueue dma_flush_queue
;
128 /* Total size of mapped qiov, accessed under dma_map_lock */
131 /* PCI address (required for nvme_refresh_filename()) */
135 #define NVME_BLOCK_OPT_DEVICE "device"
136 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
138 static QemuOptsList runtime_opts
= {
140 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
143 .name
= NVME_BLOCK_OPT_DEVICE
,
144 .type
= QEMU_OPT_STRING
,
145 .help
= "NVMe PCI device address",
148 .name
= NVME_BLOCK_OPT_NAMESPACE
,
149 .type
= QEMU_OPT_NUMBER
,
150 .help
= "NVMe namespace",
152 { /* end of list */ }
156 static void nvme_init_queue(BlockDriverState
*bs
, NVMeQueue
*q
,
157 int nentries
, int entry_bytes
, Error
**errp
)
159 BDRVNVMeState
*s
= bs
->opaque
;
163 bytes
= ROUND_UP(nentries
* entry_bytes
, s
->page_size
);
164 q
->head
= q
->tail
= 0;
165 q
->queue
= qemu_try_blockalign0(bs
, bytes
);
168 error_setg(errp
, "Cannot allocate queue");
171 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
);
173 error_setg(errp
, "Cannot map queue");
177 static void nvme_free_queue_pair(BlockDriverState
*bs
, NVMeQueuePair
*q
)
179 qemu_vfree(q
->prp_list_pages
);
180 qemu_vfree(q
->sq
.queue
);
181 qemu_vfree(q
->cq
.queue
);
182 qemu_mutex_destroy(&q
->lock
);
186 static void nvme_free_req_queue_cb(void *opaque
)
188 NVMeQueuePair
*q
= opaque
;
190 qemu_mutex_lock(&q
->lock
);
191 while (qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
192 /* Retry all pending requests */
194 qemu_mutex_unlock(&q
->lock
);
197 static NVMeQueuePair
*nvme_create_queue_pair(BlockDriverState
*bs
,
202 BDRVNVMeState
*s
= bs
->opaque
;
203 Error
*local_err
= NULL
;
204 NVMeQueuePair
*q
= g_new0(NVMeQueuePair
, 1);
205 uint64_t prp_list_iova
;
207 qemu_mutex_init(&q
->lock
);
209 qemu_co_queue_init(&q
->free_req_queue
);
210 q
->prp_list_pages
= qemu_blockalign0(bs
, s
->page_size
* NVME_NUM_REQS
);
211 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
,
212 s
->page_size
* NVME_NUM_REQS
,
213 false, &prp_list_iova
);
217 q
->free_req_head
= -1;
218 for (i
= 0; i
< NVME_NUM_REQS
; i
++) {
219 NVMeRequest
*req
= &q
->reqs
[i
];
221 req
->free_req_next
= q
->free_req_head
;
222 q
->free_req_head
= i
;
223 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
224 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
227 nvme_init_queue(bs
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, &local_err
);
229 error_propagate(errp
, local_err
);
232 q
->sq
.doorbell
= &s
->regs
->doorbells
[idx
* 2 * s
->doorbell_scale
];
234 nvme_init_queue(bs
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, &local_err
);
236 error_propagate(errp
, local_err
);
239 q
->cq
.doorbell
= &s
->regs
->doorbells
[(idx
* 2 + 1) * s
->doorbell_scale
];
243 nvme_free_queue_pair(bs
, q
);
248 static void nvme_kick(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
250 if (s
->plugged
|| !q
->need_kick
) {
253 trace_nvme_kick(s
, q
->index
);
254 assert(!(q
->sq
.tail
& 0xFF00));
255 /* Fence the write to submission queue entry before notifying the device. */
257 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
258 q
->inflight
+= q
->need_kick
;
262 /* Find a free request element if any, otherwise:
263 * a) if in coroutine context, try to wait for one to become available;
264 * b) if not in coroutine, return NULL;
266 static NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
270 qemu_mutex_lock(&q
->lock
);
272 while (q
->free_req_head
== -1) {
273 if (qemu_in_coroutine()) {
274 trace_nvme_free_req_queue_wait(q
);
275 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
277 qemu_mutex_unlock(&q
->lock
);
282 req
= &q
->reqs
[q
->free_req_head
];
283 q
->free_req_head
= req
->free_req_next
;
284 req
->free_req_next
= -1;
286 qemu_mutex_unlock(&q
->lock
);
291 static void nvme_put_free_req_locked(NVMeQueuePair
*q
, NVMeRequest
*req
)
293 req
->free_req_next
= q
->free_req_head
;
294 q
->free_req_head
= req
- q
->reqs
;
298 static void nvme_wake_free_req_locked(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
300 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
301 replay_bh_schedule_oneshot_event(s
->aio_context
,
302 nvme_free_req_queue_cb
, q
);
306 /* Insert a request in the freelist and wake waiters */
307 static void nvme_put_free_req_and_wake(BDRVNVMeState
*s
, NVMeQueuePair
*q
,
310 qemu_mutex_lock(&q
->lock
);
311 nvme_put_free_req_locked(q
, req
);
312 nvme_wake_free_req_locked(s
, q
);
313 qemu_mutex_unlock(&q
->lock
);
316 static inline int nvme_translate_error(const NvmeCqe
*c
)
318 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
320 trace_nvme_error(le32_to_cpu(c
->result
),
321 le16_to_cpu(c
->sq_head
),
322 le16_to_cpu(c
->sq_id
),
324 le16_to_cpu(status
));
339 static bool nvme_process_completion(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
341 bool progress
= false;
346 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
347 if (q
->busy
|| s
->plugged
) {
348 trace_nvme_process_completion_queue_busy(s
, q
->index
);
352 assert(q
->inflight
>= 0);
353 while (q
->inflight
) {
357 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
358 if ((le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
361 ret
= nvme_translate_error(c
);
362 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
364 q
->cq_phase
= !q
->cq_phase
;
366 cid
= le16_to_cpu(c
->cid
);
367 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
368 fprintf(stderr
, "Unexpected CID in completion queue: %" PRIu32
"\n",
372 trace_nvme_complete_command(s
, q
->index
, cid
);
373 preq
= &q
->reqs
[cid
- 1];
375 assert(req
.cid
== cid
);
377 nvme_put_free_req_locked(q
, preq
);
378 preq
->cb
= preq
->opaque
= NULL
;
379 qemu_mutex_unlock(&q
->lock
);
380 req
.cb(req
.opaque
, ret
);
381 qemu_mutex_lock(&q
->lock
);
386 /* Notify the device so it can post more completions. */
388 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
389 nvme_wake_free_req_locked(s
, q
);
395 static void nvme_trace_command(const NvmeCmd
*cmd
)
399 for (i
= 0; i
< 8; ++i
) {
400 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
401 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
402 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
406 static void nvme_submit_command(BDRVNVMeState
*s
, NVMeQueuePair
*q
,
408 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
413 req
->opaque
= opaque
;
414 cmd
->cid
= cpu_to_le32(req
->cid
);
416 trace_nvme_submit_command(s
, q
->index
, req
->cid
);
417 nvme_trace_command(cmd
);
418 qemu_mutex_lock(&q
->lock
);
419 memcpy((uint8_t *)q
->sq
.queue
+
420 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
421 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
424 nvme_process_completion(s
, q
);
425 qemu_mutex_unlock(&q
->lock
);
428 static void nvme_cmd_sync_cb(void *opaque
, int ret
)
435 static int nvme_cmd_sync(BlockDriverState
*bs
, NVMeQueuePair
*q
,
439 BDRVNVMeState
*s
= bs
->opaque
;
440 int ret
= -EINPROGRESS
;
441 req
= nvme_get_free_req(q
);
445 nvme_submit_command(s
, q
, req
, cmd
, nvme_cmd_sync_cb
, &ret
);
447 BDRV_POLL_WHILE(bs
, ret
== -EINPROGRESS
);
451 static void nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
453 BDRVNVMeState
*s
= bs
->opaque
;
462 .opcode
= NVME_ADM_CMD_IDENTIFY
,
463 .cdw10
= cpu_to_le32(0x1),
466 resp
= qemu_try_blockalign0(bs
, sizeof(NvmeIdCtrl
));
468 error_setg(errp
, "Cannot allocate buffer for identify response");
471 idctrl
= (NvmeIdCtrl
*)resp
;
472 idns
= (NvmeIdNs
*)resp
;
473 r
= qemu_vfio_dma_map(s
->vfio
, resp
, sizeof(NvmeIdCtrl
), true, &iova
);
475 error_setg(errp
, "Cannot map buffer for DMA");
478 cmd
.prp1
= cpu_to_le64(iova
);
480 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
481 error_setg(errp
, "Failed to identify controller");
485 if (le32_to_cpu(idctrl
->nn
) < namespace) {
486 error_setg(errp
, "Invalid namespace");
489 s
->write_cache_supported
= le32_to_cpu(idctrl
->vwc
) & 0x1;
490 s
->max_transfer
= (idctrl
->mdts
? 1 << idctrl
->mdts
: 0) * s
->page_size
;
491 /* For now the page list buffer per command is one page, to hold at most
492 * s->page_size / sizeof(uint64_t) entries. */
493 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
494 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
496 oncs
= le16_to_cpu(idctrl
->oncs
);
497 s
->supports_write_zeroes
= !!(oncs
& NVME_ONCS_WRITE_ZEROS
);
498 s
->supports_discard
= !!(oncs
& NVME_ONCS_DSM
);
500 memset(resp
, 0, 4096);
503 cmd
.nsid
= cpu_to_le32(namespace);
504 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
505 error_setg(errp
, "Failed to identify namespace");
509 s
->nsze
= le64_to_cpu(idns
->nsze
);
510 lbaf
= &idns
->lbaf
[NVME_ID_NS_FLBAS_INDEX(idns
->flbas
)];
512 if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(idns
->dlfeat
) &&
513 NVME_ID_NS_DLFEAT_READ_BEHAVIOR(idns
->dlfeat
) ==
514 NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES
) {
515 bs
->supported_write_flags
|= BDRV_REQ_MAY_UNMAP
;
519 error_setg(errp
, "Namespaces with metadata are not yet supported");
523 if (lbaf
->ds
< BDRV_SECTOR_BITS
|| lbaf
->ds
> 12 ||
524 (1 << lbaf
->ds
) > s
->page_size
)
526 error_setg(errp
, "Namespace has unsupported block size (2^%d)",
531 s
->blkshift
= lbaf
->ds
;
533 qemu_vfio_dma_unmap(s
->vfio
, resp
);
537 static bool nvme_poll_queues(BDRVNVMeState
*s
)
539 bool progress
= false;
542 for (i
= 0; i
< s
->nr_queues
; i
++) {
543 NVMeQueuePair
*q
= s
->queues
[i
];
544 const size_t cqe_offset
= q
->cq
.head
* NVME_CQ_ENTRY_BYTES
;
545 NvmeCqe
*cqe
= (NvmeCqe
*)&q
->cq
.queue
[cqe_offset
];
548 * Do an early check for completions. q->lock isn't needed because
549 * nvme_process_completion() only runs in the event loop thread and
550 * cannot race with itself.
552 if ((le16_to_cpu(cqe
->status
) & 0x1) == q
->cq_phase
) {
556 qemu_mutex_lock(&q
->lock
);
557 while (nvme_process_completion(s
, q
)) {
561 qemu_mutex_unlock(&q
->lock
);
566 static void nvme_handle_event(EventNotifier
*n
)
568 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
, irq_notifier
);
570 trace_nvme_handle_event(s
);
571 event_notifier_test_and_clear(n
);
575 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
577 BDRVNVMeState
*s
= bs
->opaque
;
578 int n
= s
->nr_queues
;
581 int queue_size
= NVME_QUEUE_SIZE
;
583 q
= nvme_create_queue_pair(bs
, n
, queue_size
, errp
);
588 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
589 .prp1
= cpu_to_le64(q
->cq
.iova
),
590 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
591 .cdw11
= cpu_to_le32(0x3),
593 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
594 error_setg(errp
, "Failed to create io queue [%d]", n
);
595 nvme_free_queue_pair(bs
, q
);
599 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
600 .prp1
= cpu_to_le64(q
->sq
.iova
),
601 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
602 .cdw11
= cpu_to_le32(0x1 | (n
<< 16)),
604 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
605 error_setg(errp
, "Failed to create io queue [%d]", n
);
606 nvme_free_queue_pair(bs
, q
);
609 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
615 static bool nvme_poll_cb(void *opaque
)
617 EventNotifier
*e
= opaque
;
618 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
, irq_notifier
);
620 trace_nvme_poll_cb(s
);
621 return nvme_poll_queues(s
);
624 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
627 BDRVNVMeState
*s
= bs
->opaque
;
631 uint64_t deadline
, now
;
632 Error
*local_err
= NULL
;
634 qemu_co_mutex_init(&s
->dma_map_lock
);
635 qemu_co_queue_init(&s
->dma_flush_queue
);
636 s
->device
= g_strdup(device
);
638 s
->aio_context
= bdrv_get_aio_context(bs
);
639 ret
= event_notifier_init(&s
->irq_notifier
, 0);
641 error_setg(errp
, "Failed to init event notifier");
645 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
651 s
->regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, NVME_BAR_SIZE
, errp
);
657 /* Perform initialize sequence as described in NVMe spec "7.6.1
658 * Initialization". */
660 cap
= le64_to_cpu(s
->regs
->cap
);
661 if (!(cap
& (1ULL << 37))) {
662 error_setg(errp
, "Device doesn't support NVMe command set");
667 s
->page_size
= MAX(4096, 1 << (12 + ((cap
>> 48) & 0xF)));
668 s
->doorbell_scale
= (4 << (((cap
>> 32) & 0xF))) / sizeof(uint32_t);
669 bs
->bl
.opt_mem_alignment
= s
->page_size
;
670 timeout_ms
= MIN(500 * ((cap
>> 24) & 0xFF), 30000);
672 /* Reset device to get a clean state. */
673 s
->regs
->cc
= cpu_to_le32(le32_to_cpu(s
->regs
->cc
) & 0xFE);
674 /* Wait for CSTS.RDY = 0. */
675 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* 1000000ULL;
676 while (le32_to_cpu(s
->regs
->csts
) & 0x1) {
677 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
678 error_setg(errp
, "Timeout while waiting for device to reset (%"
686 /* Set up admin queue. */
687 s
->queues
= g_new(NVMeQueuePair
*, 1);
688 s
->queues
[0] = nvme_create_queue_pair(bs
, 0, NVME_QUEUE_SIZE
, errp
);
694 QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE
& 0xF000);
695 s
->regs
->aqa
= cpu_to_le32((NVME_QUEUE_SIZE
<< 16) | NVME_QUEUE_SIZE
);
696 s
->regs
->asq
= cpu_to_le64(s
->queues
[0]->sq
.iova
);
697 s
->regs
->acq
= cpu_to_le64(s
->queues
[0]->cq
.iova
);
699 /* After setting up all control registers we can enable device now. */
700 s
->regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << 20) |
701 (ctz32(NVME_SQ_ENTRY_BYTES
) << 16) |
703 /* Wait for CSTS.RDY = 1. */
704 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
705 deadline
= now
+ timeout_ms
* 1000000;
706 while (!(le32_to_cpu(s
->regs
->csts
) & 0x1)) {
707 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
708 error_setg(errp
, "Timeout while waiting for device to start (%"
716 ret
= qemu_vfio_pci_init_irq(s
->vfio
, &s
->irq_notifier
,
717 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
721 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
722 false, nvme_handle_event
, nvme_poll_cb
);
724 nvme_identify(bs
, namespace, &local_err
);
726 error_propagate(errp
, local_err
);
731 /* Set up command queues. */
732 if (!nvme_add_io_queue(bs
, errp
)) {
736 /* Cleaning up is done in nvme_file_open() upon error. */
740 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
742 * nvme://0000:44:00.0/1
744 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
745 * is the PCI address, and the last part is the namespace number starting from
746 * 1 according to the NVMe spec. */
747 static void nvme_parse_filename(const char *filename
, QDict
*options
,
750 int pref
= strlen("nvme://");
752 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
753 const char *tmp
= filename
+ pref
;
755 const char *namespace;
757 const char *slash
= strchr(tmp
, '/');
759 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, tmp
);
762 device
= g_strndup(tmp
, slash
- tmp
);
763 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, device
);
765 namespace = slash
+ 1;
766 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
767 error_setg(errp
, "Invalid namespace '%s', positive number expected",
771 qdict_put_str(options
, NVME_BLOCK_OPT_NAMESPACE
,
772 *namespace ? namespace : "1");
776 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
780 BDRVNVMeState
*s
= bs
->opaque
;
782 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
783 .nsid
= cpu_to_le32(s
->nsid
),
784 .cdw10
= cpu_to_le32(0x06),
785 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
788 ret
= nvme_cmd_sync(bs
, s
->queues
[0], &cmd
);
790 error_setg(errp
, "Failed to configure NVMe write cache");
795 static void nvme_close(BlockDriverState
*bs
)
798 BDRVNVMeState
*s
= bs
->opaque
;
800 for (i
= 0; i
< s
->nr_queues
; ++i
) {
801 nvme_free_queue_pair(bs
, s
->queues
[i
]);
804 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
806 event_notifier_cleanup(&s
->irq_notifier
);
807 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)s
->regs
, 0, NVME_BAR_SIZE
);
808 qemu_vfio_close(s
->vfio
);
813 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
820 BDRVNVMeState
*s
= bs
->opaque
;
822 bs
->supported_write_flags
= BDRV_REQ_FUA
;
824 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
825 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
826 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
828 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
833 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
834 ret
= nvme_init(bs
, device
, namespace, errp
);
839 if (flags
& BDRV_O_NOCACHE
) {
840 if (!s
->write_cache_supported
) {
842 "NVMe controller doesn't support write cache configuration");
845 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
858 static int64_t nvme_getlength(BlockDriverState
*bs
)
860 BDRVNVMeState
*s
= bs
->opaque
;
861 return s
->nsze
<< s
->blkshift
;
864 static uint32_t nvme_get_blocksize(BlockDriverState
*bs
)
866 BDRVNVMeState
*s
= bs
->opaque
;
867 assert(s
->blkshift
>= BDRV_SECTOR_BITS
&& s
->blkshift
<= 12);
868 return UINT32_C(1) << s
->blkshift
;
871 static int nvme_probe_blocksizes(BlockDriverState
*bs
, BlockSizes
*bsz
)
873 uint32_t blocksize
= nvme_get_blocksize(bs
);
874 bsz
->phys
= blocksize
;
875 bsz
->log
= blocksize
;
879 /* Called with s->dma_map_lock */
880 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
884 BDRVNVMeState
*s
= bs
->opaque
;
886 s
->dma_map_count
-= qiov
->size
;
887 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
888 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
890 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
896 /* Called with s->dma_map_lock */
897 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
898 NVMeRequest
*req
, QEMUIOVector
*qiov
)
900 BDRVNVMeState
*s
= bs
->opaque
;
901 uint64_t *pagelist
= req
->prp_list_page
;
906 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
907 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
908 for (i
= 0; i
< qiov
->niov
; ++i
) {
912 r
= qemu_vfio_dma_map(s
->vfio
,
913 qiov
->iov
[i
].iov_base
,
914 qiov
->iov
[i
].iov_len
,
916 if (r
== -ENOMEM
&& retry
) {
918 trace_nvme_dma_flush_queue_wait(s
);
919 if (s
->dma_map_count
) {
920 trace_nvme_dma_map_flush(s
);
921 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
923 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
934 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
935 pagelist
[entries
++] = cpu_to_le64(iova
+ j
* s
->page_size
);
937 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
938 qiov
->iov
[i
].iov_len
/ s
->page_size
);
941 s
->dma_map_count
+= qiov
->size
;
943 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
948 cmd
->prp1
= pagelist
[0];
952 cmd
->prp1
= pagelist
[0];
953 cmd
->prp2
= pagelist
[1];
956 cmd
->prp1
= pagelist
[0];
957 cmd
->prp2
= cpu_to_le64(req
->prp_list_iova
+ sizeof(uint64_t));
960 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
961 for (i
= 0; i
< entries
; ++i
) {
962 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
966 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
967 * increment s->dma_map_count. This is okay for fixed mapping memory areas
968 * because they are already mapped before calling this function; for
969 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
970 * calling qemu_vfio_dma_reset_temporary when necessary. */
980 static void nvme_rw_cb_bh(void *opaque
)
982 NVMeCoData
*data
= opaque
;
983 qemu_coroutine_enter(data
->co
);
986 static void nvme_rw_cb(void *opaque
, int ret
)
988 NVMeCoData
*data
= opaque
;
991 /* The rw coroutine hasn't yielded, don't try to enter. */
994 replay_bh_schedule_oneshot_event(data
->ctx
, nvme_rw_cb_bh
, data
);
997 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
998 uint64_t offset
, uint64_t bytes
,
1004 BDRVNVMeState
*s
= bs
->opaque
;
1005 NVMeQueuePair
*ioq
= s
->queues
[1];
1008 uint32_t cdw12
= (((bytes
>> s
->blkshift
) - 1) & 0xFFFF) |
1009 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
1011 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
1012 .nsid
= cpu_to_le32(s
->nsid
),
1013 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1014 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1015 .cdw12
= cpu_to_le32(cdw12
),
1018 .ctx
= bdrv_get_aio_context(bs
),
1019 .ret
= -EINPROGRESS
,
1022 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
1023 assert(s
->nr_queues
> 1);
1024 req
= nvme_get_free_req(ioq
);
1027 qemu_co_mutex_lock(&s
->dma_map_lock
);
1028 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
1029 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1031 nvme_put_free_req_and_wake(s
, ioq
, req
);
1034 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1036 data
.co
= qemu_coroutine_self();
1037 while (data
.ret
== -EINPROGRESS
) {
1038 qemu_coroutine_yield();
1041 qemu_co_mutex_lock(&s
->dma_map_lock
);
1042 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
1043 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1048 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
1052 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
1053 const QEMUIOVector
*qiov
)
1056 BDRVNVMeState
*s
= bs
->opaque
;
1058 for (i
= 0; i
< qiov
->niov
; ++i
) {
1059 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
, s
->page_size
) ||
1060 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, s
->page_size
)) {
1061 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
1062 qiov
->iov
[i
].iov_len
, s
->page_size
);
1069 static int nvme_co_prw(BlockDriverState
*bs
, uint64_t offset
, uint64_t bytes
,
1070 QEMUIOVector
*qiov
, bool is_write
, int flags
)
1072 BDRVNVMeState
*s
= bs
->opaque
;
1074 uint8_t *buf
= NULL
;
1075 QEMUIOVector local_qiov
;
1077 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
1078 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
1079 assert(bytes
<= s
->max_transfer
);
1080 if (nvme_qiov_aligned(bs
, qiov
)) {
1081 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
1083 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
1084 buf
= qemu_try_blockalign(bs
, bytes
);
1089 qemu_iovec_init(&local_qiov
, 1);
1091 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1093 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1094 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1095 qemu_iovec_destroy(&local_qiov
);
1096 if (!r
&& !is_write
) {
1097 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1103 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1104 uint64_t offset
, uint64_t bytes
,
1105 QEMUIOVector
*qiov
, int flags
)
1107 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1110 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1111 uint64_t offset
, uint64_t bytes
,
1112 QEMUIOVector
*qiov
, int flags
)
1114 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1117 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1119 BDRVNVMeState
*s
= bs
->opaque
;
1120 NVMeQueuePair
*ioq
= s
->queues
[1];
1123 .opcode
= NVME_CMD_FLUSH
,
1124 .nsid
= cpu_to_le32(s
->nsid
),
1127 .ctx
= bdrv_get_aio_context(bs
),
1128 .ret
= -EINPROGRESS
,
1131 assert(s
->nr_queues
> 1);
1132 req
= nvme_get_free_req(ioq
);
1134 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1136 data
.co
= qemu_coroutine_self();
1137 if (data
.ret
== -EINPROGRESS
) {
1138 qemu_coroutine_yield();
1145 static coroutine_fn
int nvme_co_pwrite_zeroes(BlockDriverState
*bs
,
1148 BdrvRequestFlags flags
)
1150 BDRVNVMeState
*s
= bs
->opaque
;
1151 NVMeQueuePair
*ioq
= s
->queues
[1];
1154 uint32_t cdw12
= ((bytes
>> s
->blkshift
) - 1) & 0xFFFF;
1156 if (!s
->supports_write_zeroes
) {
1161 .opcode
= NVME_CMD_WRITE_ZEROS
,
1162 .nsid
= cpu_to_le32(s
->nsid
),
1163 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1164 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1168 .ctx
= bdrv_get_aio_context(bs
),
1169 .ret
= -EINPROGRESS
,
1172 if (flags
& BDRV_REQ_MAY_UNMAP
) {
1176 if (flags
& BDRV_REQ_FUA
) {
1180 cmd
.cdw12
= cpu_to_le32(cdw12
);
1182 trace_nvme_write_zeroes(s
, offset
, bytes
, flags
);
1183 assert(s
->nr_queues
> 1);
1184 req
= nvme_get_free_req(ioq
);
1187 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1189 data
.co
= qemu_coroutine_self();
1190 while (data
.ret
== -EINPROGRESS
) {
1191 qemu_coroutine_yield();
1194 trace_nvme_rw_done(s
, true, offset
, bytes
, data
.ret
);
1199 static int coroutine_fn
nvme_co_pdiscard(BlockDriverState
*bs
,
1203 BDRVNVMeState
*s
= bs
->opaque
;
1204 NVMeQueuePair
*ioq
= s
->queues
[1];
1207 QEMUIOVector local_qiov
;
1211 .opcode
= NVME_CMD_DSM
,
1212 .nsid
= cpu_to_le32(s
->nsid
),
1213 .cdw10
= cpu_to_le32(0), /*number of ranges - 0 based*/
1214 .cdw11
= cpu_to_le32(1 << 2), /*deallocate bit*/
1218 .ctx
= bdrv_get_aio_context(bs
),
1219 .ret
= -EINPROGRESS
,
1222 if (!s
->supports_discard
) {
1226 assert(s
->nr_queues
> 1);
1228 buf
= qemu_try_blockalign0(bs
, s
->page_size
);
1233 buf
->nlb
= cpu_to_le32(bytes
>> s
->blkshift
);
1234 buf
->slba
= cpu_to_le64(offset
>> s
->blkshift
);
1237 qemu_iovec_init(&local_qiov
, 1);
1238 qemu_iovec_add(&local_qiov
, buf
, 4096);
1240 req
= nvme_get_free_req(ioq
);
1243 qemu_co_mutex_lock(&s
->dma_map_lock
);
1244 ret
= nvme_cmd_map_qiov(bs
, &cmd
, req
, &local_qiov
);
1245 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1248 nvme_put_free_req_and_wake(s
, ioq
, req
);
1252 trace_nvme_dsm(s
, offset
, bytes
);
1254 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1256 data
.co
= qemu_coroutine_self();
1257 while (data
.ret
== -EINPROGRESS
) {
1258 qemu_coroutine_yield();
1261 qemu_co_mutex_lock(&s
->dma_map_lock
);
1262 ret
= nvme_cmd_unmap_qiov(bs
, &local_qiov
);
1263 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1270 trace_nvme_dsm_done(s
, offset
, bytes
, ret
);
1272 qemu_iovec_destroy(&local_qiov
);
1279 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1280 BlockReopenQueue
*queue
, Error
**errp
)
1285 static void nvme_refresh_filename(BlockDriverState
*bs
)
1287 BDRVNVMeState
*s
= bs
->opaque
;
1289 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "nvme://%s/%i",
1290 s
->device
, s
->nsid
);
1293 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1295 BDRVNVMeState
*s
= bs
->opaque
;
1297 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1298 bs
->bl
.request_alignment
= s
->page_size
;
1299 bs
->bl
.max_transfer
= s
->max_transfer
;
1302 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1304 BDRVNVMeState
*s
= bs
->opaque
;
1306 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
1310 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1311 AioContext
*new_context
)
1313 BDRVNVMeState
*s
= bs
->opaque
;
1315 s
->aio_context
= new_context
;
1316 aio_set_event_notifier(new_context
, &s
->irq_notifier
,
1317 false, nvme_handle_event
, nvme_poll_cb
);
1320 static void nvme_aio_plug(BlockDriverState
*bs
)
1322 BDRVNVMeState
*s
= bs
->opaque
;
1323 assert(!s
->plugged
);
1327 static void nvme_aio_unplug(BlockDriverState
*bs
)
1330 BDRVNVMeState
*s
= bs
->opaque
;
1333 for (i
= 1; i
< s
->nr_queues
; i
++) {
1334 NVMeQueuePair
*q
= s
->queues
[i
];
1335 qemu_mutex_lock(&q
->lock
);
1337 nvme_process_completion(s
, q
);
1338 qemu_mutex_unlock(&q
->lock
);
1342 static void nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1345 BDRVNVMeState
*s
= bs
->opaque
;
1347 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
);
1349 /* FIXME: we may run out of IOVA addresses after repeated
1350 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1351 * doesn't reclaim addresses for fixed mappings. */
1352 error_report("nvme_register_buf failed: %s", strerror(-ret
));
1356 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
)
1358 BDRVNVMeState
*s
= bs
->opaque
;
1360 qemu_vfio_dma_unmap(s
->vfio
, host
);
1363 static const char *const nvme_strong_runtime_opts
[] = {
1364 NVME_BLOCK_OPT_DEVICE
,
1365 NVME_BLOCK_OPT_NAMESPACE
,
1370 static BlockDriver bdrv_nvme
= {
1371 .format_name
= "nvme",
1372 .protocol_name
= "nvme",
1373 .instance_size
= sizeof(BDRVNVMeState
),
1375 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
1376 .create_opts
= &bdrv_create_opts_simple
,
1378 .bdrv_parse_filename
= nvme_parse_filename
,
1379 .bdrv_file_open
= nvme_file_open
,
1380 .bdrv_close
= nvme_close
,
1381 .bdrv_getlength
= nvme_getlength
,
1382 .bdrv_probe_blocksizes
= nvme_probe_blocksizes
,
1384 .bdrv_co_preadv
= nvme_co_preadv
,
1385 .bdrv_co_pwritev
= nvme_co_pwritev
,
1387 .bdrv_co_pwrite_zeroes
= nvme_co_pwrite_zeroes
,
1388 .bdrv_co_pdiscard
= nvme_co_pdiscard
,
1390 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1391 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1393 .bdrv_refresh_filename
= nvme_refresh_filename
,
1394 .bdrv_refresh_limits
= nvme_refresh_limits
,
1395 .strong_runtime_opts
= nvme_strong_runtime_opts
,
1397 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1398 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1400 .bdrv_io_plug
= nvme_aio_plug
,
1401 .bdrv_io_unplug
= nvme_aio_unplug
,
1403 .bdrv_register_buf
= nvme_register_buf
,
1404 .bdrv_unregister_buf
= nvme_unregister_buf
,
1407 static void bdrv_nvme_init(void)
1409 bdrv_register(&bdrv_nvme
);
1412 block_init(bdrv_nvme_init
);