2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "qemu/cutils.h"
23 #include "qemu/option.h"
24 #include "qemu/vfio-helpers.h"
25 #include "block/block_int.h"
26 #include "sysemu/replay.h"
29 #include "block/nvme.h"
31 #define NVME_SQ_ENTRY_BYTES 64
32 #define NVME_CQ_ENTRY_BYTES 16
33 #define NVME_QUEUE_SIZE 128
34 #define NVME_BAR_SIZE 8192
37 * We have to leave one slot empty as that is the full queue case where
40 #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
42 typedef struct BDRVNVMeState BDRVNVMeState
;
48 /* Hardware MMIO register */
49 volatile uint32_t *doorbell
;
53 BlockCompletionFunc
*cb
;
57 uint64_t prp_list_iova
;
58 int free_req_next
; /* q->reqs[] index of next free req */
64 /* Read from I/O code path, initialized under BQL */
68 /* Fields protected by BQL */
69 uint8_t *prp_list_pages
;
71 /* Fields protected by @lock */
72 CoQueue free_req_queue
;
76 NVMeRequest reqs
[NVME_NUM_REQS
];
80 /* Thread-safe, no lock necessary */
81 QEMUBH
*completion_bh
;
84 /* Memory mapped registers */
85 typedef volatile struct {
99 uint8_t reserved1
[0xec0];
100 uint8_t cmd_set_specfic
[0x100];
101 uint32_t doorbells
[];
104 QEMU_BUILD_BUG_ON(offsetof(NVMeRegs
, doorbells
) != 0x1000);
106 #define INDEX_ADMIN 0
107 #define INDEX_IO(n) (1 + n)
109 struct BDRVNVMeState
{
110 AioContext
*aio_context
;
113 /* The submission/completion queue pairs.
117 NVMeQueuePair
**queues
;
120 /* How many uint32_t elements does each doorbell entry take. */
121 size_t doorbell_scale
;
122 bool write_cache_supported
;
123 EventNotifier irq_notifier
;
125 uint64_t nsze
; /* Namespace size reported by identify command */
126 int nsid
; /* The namespace id to read/write data. */
129 uint64_t max_transfer
;
132 bool supports_write_zeroes
;
133 bool supports_discard
;
135 CoMutex dma_map_lock
;
136 CoQueue dma_flush_queue
;
138 /* Total size of mapped qiov, accessed under dma_map_lock */
141 /* PCI address (required for nvme_refresh_filename()) */
145 #define NVME_BLOCK_OPT_DEVICE "device"
146 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
148 static void nvme_process_completion_bh(void *opaque
);
150 static QemuOptsList runtime_opts
= {
152 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
155 .name
= NVME_BLOCK_OPT_DEVICE
,
156 .type
= QEMU_OPT_STRING
,
157 .help
= "NVMe PCI device address",
160 .name
= NVME_BLOCK_OPT_NAMESPACE
,
161 .type
= QEMU_OPT_NUMBER
,
162 .help
= "NVMe namespace",
164 { /* end of list */ }
168 static void nvme_init_queue(BlockDriverState
*bs
, NVMeQueue
*q
,
169 int nentries
, int entry_bytes
, Error
**errp
)
171 BDRVNVMeState
*s
= bs
->opaque
;
175 bytes
= ROUND_UP(nentries
* entry_bytes
, s
->page_size
);
176 q
->head
= q
->tail
= 0;
177 q
->queue
= qemu_try_memalign(s
->page_size
, bytes
);
179 error_setg(errp
, "Cannot allocate queue");
182 memset(q
->queue
, 0, bytes
);
183 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
);
185 error_setg(errp
, "Cannot map queue");
189 static void nvme_free_queue_pair(NVMeQueuePair
*q
)
191 if (q
->completion_bh
) {
192 qemu_bh_delete(q
->completion_bh
);
194 qemu_vfree(q
->prp_list_pages
);
195 qemu_vfree(q
->sq
.queue
);
196 qemu_vfree(q
->cq
.queue
);
197 qemu_mutex_destroy(&q
->lock
);
201 static void nvme_free_req_queue_cb(void *opaque
)
203 NVMeQueuePair
*q
= opaque
;
205 qemu_mutex_lock(&q
->lock
);
206 while (qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
207 /* Retry all pending requests */
209 qemu_mutex_unlock(&q
->lock
);
212 static NVMeQueuePair
*nvme_create_queue_pair(BlockDriverState
*bs
,
217 BDRVNVMeState
*s
= bs
->opaque
;
218 Error
*local_err
= NULL
;
220 uint64_t prp_list_iova
;
222 q
= g_try_new0(NVMeQueuePair
, 1);
226 q
->prp_list_pages
= qemu_try_memalign(s
->page_size
,
227 s
->page_size
* NVME_NUM_REQS
);
228 if (!q
->prp_list_pages
) {
231 memset(q
->prp_list_pages
, 0, s
->page_size
* NVME_NUM_REQS
);
232 qemu_mutex_init(&q
->lock
);
235 qemu_co_queue_init(&q
->free_req_queue
);
236 q
->completion_bh
= aio_bh_new(bdrv_get_aio_context(bs
),
237 nvme_process_completion_bh
, q
);
238 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
,
239 s
->page_size
* NVME_NUM_REQS
,
240 false, &prp_list_iova
);
244 q
->free_req_head
= -1;
245 for (i
= 0; i
< NVME_NUM_REQS
; i
++) {
246 NVMeRequest
*req
= &q
->reqs
[i
];
248 req
->free_req_next
= q
->free_req_head
;
249 q
->free_req_head
= i
;
250 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
251 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
254 nvme_init_queue(bs
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, &local_err
);
256 error_propagate(errp
, local_err
);
259 q
->sq
.doorbell
= &s
->regs
->doorbells
[idx
* 2 * s
->doorbell_scale
];
261 nvme_init_queue(bs
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, &local_err
);
263 error_propagate(errp
, local_err
);
266 q
->cq
.doorbell
= &s
->regs
->doorbells
[(idx
* 2 + 1) * s
->doorbell_scale
];
270 nvme_free_queue_pair(q
);
275 static void nvme_kick(NVMeQueuePair
*q
)
277 BDRVNVMeState
*s
= q
->s
;
279 if (s
->plugged
|| !q
->need_kick
) {
282 trace_nvme_kick(s
, q
->index
);
283 assert(!(q
->sq
.tail
& 0xFF00));
284 /* Fence the write to submission queue entry before notifying the device. */
286 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
287 q
->inflight
+= q
->need_kick
;
291 /* Find a free request element if any, otherwise:
292 * a) if in coroutine context, try to wait for one to become available;
293 * b) if not in coroutine, return NULL;
295 static NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
299 qemu_mutex_lock(&q
->lock
);
301 while (q
->free_req_head
== -1) {
302 if (qemu_in_coroutine()) {
303 trace_nvme_free_req_queue_wait(q
);
304 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
306 qemu_mutex_unlock(&q
->lock
);
311 req
= &q
->reqs
[q
->free_req_head
];
312 q
->free_req_head
= req
->free_req_next
;
313 req
->free_req_next
= -1;
315 qemu_mutex_unlock(&q
->lock
);
320 static void nvme_put_free_req_locked(NVMeQueuePair
*q
, NVMeRequest
*req
)
322 req
->free_req_next
= q
->free_req_head
;
323 q
->free_req_head
= req
- q
->reqs
;
327 static void nvme_wake_free_req_locked(NVMeQueuePair
*q
)
329 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
330 replay_bh_schedule_oneshot_event(q
->s
->aio_context
,
331 nvme_free_req_queue_cb
, q
);
335 /* Insert a request in the freelist and wake waiters */
336 static void nvme_put_free_req_and_wake(NVMeQueuePair
*q
, NVMeRequest
*req
)
338 qemu_mutex_lock(&q
->lock
);
339 nvme_put_free_req_locked(q
, req
);
340 nvme_wake_free_req_locked(q
);
341 qemu_mutex_unlock(&q
->lock
);
344 static inline int nvme_translate_error(const NvmeCqe
*c
)
346 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
348 trace_nvme_error(le32_to_cpu(c
->result
),
349 le16_to_cpu(c
->sq_head
),
350 le16_to_cpu(c
->sq_id
),
352 le16_to_cpu(status
));
367 static bool nvme_process_completion(NVMeQueuePair
*q
)
369 BDRVNVMeState
*s
= q
->s
;
370 bool progress
= false;
375 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
377 trace_nvme_process_completion_queue_plugged(s
, q
->index
);
382 * Support re-entrancy when a request cb() function invokes aio_poll().
383 * Pending completions must be visible to aio_poll() so that a cb()
384 * function can wait for the completion of another request.
386 * The aio_poll() loop will execute our BH and we'll resume completion
389 qemu_bh_schedule(q
->completion_bh
);
391 assert(q
->inflight
>= 0);
392 while (q
->inflight
) {
396 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
397 if ((le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
400 ret
= nvme_translate_error(c
);
401 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
403 q
->cq_phase
= !q
->cq_phase
;
405 cid
= le16_to_cpu(c
->cid
);
406 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
407 fprintf(stderr
, "Unexpected CID in completion queue: %" PRIu32
"\n",
411 trace_nvme_complete_command(s
, q
->index
, cid
);
412 preq
= &q
->reqs
[cid
- 1];
414 assert(req
.cid
== cid
);
416 nvme_put_free_req_locked(q
, preq
);
417 preq
->cb
= preq
->opaque
= NULL
;
419 qemu_mutex_unlock(&q
->lock
);
420 req
.cb(req
.opaque
, ret
);
421 qemu_mutex_lock(&q
->lock
);
425 /* Notify the device so it can post more completions. */
427 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
428 nvme_wake_free_req_locked(q
);
431 qemu_bh_cancel(q
->completion_bh
);
436 static void nvme_process_completion_bh(void *opaque
)
438 NVMeQueuePair
*q
= opaque
;
441 * We're being invoked because a nvme_process_completion() cb() function
442 * called aio_poll(). The callback may be waiting for further completions
443 * so notify the device that it has space to fill in more completions now.
446 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
447 nvme_wake_free_req_locked(q
);
449 nvme_process_completion(q
);
452 static void nvme_trace_command(const NvmeCmd
*cmd
)
456 if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW
)) {
459 for (i
= 0; i
< 8; ++i
) {
460 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
461 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
462 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
466 static void nvme_submit_command(NVMeQueuePair
*q
, NVMeRequest
*req
,
467 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
472 req
->opaque
= opaque
;
473 cmd
->cid
= cpu_to_le32(req
->cid
);
475 trace_nvme_submit_command(q
->s
, q
->index
, req
->cid
);
476 nvme_trace_command(cmd
);
477 qemu_mutex_lock(&q
->lock
);
478 memcpy((uint8_t *)q
->sq
.queue
+
479 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
480 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
483 nvme_process_completion(q
);
484 qemu_mutex_unlock(&q
->lock
);
487 static void nvme_cmd_sync_cb(void *opaque
, int ret
)
494 static int nvme_cmd_sync(BlockDriverState
*bs
, NVMeQueuePair
*q
,
498 int ret
= -EINPROGRESS
;
499 req
= nvme_get_free_req(q
);
503 nvme_submit_command(q
, req
, cmd
, nvme_cmd_sync_cb
, &ret
);
505 BDRV_POLL_WHILE(bs
, ret
== -EINPROGRESS
);
509 static void nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
511 BDRVNVMeState
*s
= bs
->opaque
;
521 .opcode
= NVME_ADM_CMD_IDENTIFY
,
522 .cdw10
= cpu_to_le32(0x1),
525 id
= qemu_try_memalign(s
->page_size
, sizeof(*id
));
527 error_setg(errp
, "Cannot allocate buffer for identify response");
530 r
= qemu_vfio_dma_map(s
->vfio
, id
, sizeof(*id
), true, &iova
);
532 error_setg(errp
, "Cannot map buffer for DMA");
536 memset(id
, 0, sizeof(*id
));
537 cmd
.dptr
.prp1
= cpu_to_le64(iova
);
538 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
539 error_setg(errp
, "Failed to identify controller");
543 if (le32_to_cpu(id
->ctrl
.nn
) < namespace) {
544 error_setg(errp
, "Invalid namespace");
547 s
->write_cache_supported
= le32_to_cpu(id
->ctrl
.vwc
) & 0x1;
548 s
->max_transfer
= (id
->ctrl
.mdts
? 1 << id
->ctrl
.mdts
: 0) * s
->page_size
;
549 /* For now the page list buffer per command is one page, to hold at most
550 * s->page_size / sizeof(uint64_t) entries. */
551 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
552 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
554 oncs
= le16_to_cpu(id
->ctrl
.oncs
);
555 s
->supports_write_zeroes
= !!(oncs
& NVME_ONCS_WRITE_ZEROES
);
556 s
->supports_discard
= !!(oncs
& NVME_ONCS_DSM
);
558 memset(id
, 0, sizeof(*id
));
560 cmd
.nsid
= cpu_to_le32(namespace);
561 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
562 error_setg(errp
, "Failed to identify namespace");
566 s
->nsze
= le64_to_cpu(id
->ns
.nsze
);
567 lbaf
= &id
->ns
.lbaf
[NVME_ID_NS_FLBAS_INDEX(id
->ns
.flbas
)];
569 if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id
->ns
.dlfeat
) &&
570 NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id
->ns
.dlfeat
) ==
571 NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES
) {
572 bs
->supported_write_flags
|= BDRV_REQ_MAY_UNMAP
;
576 error_setg(errp
, "Namespaces with metadata are not yet supported");
580 if (lbaf
->ds
< BDRV_SECTOR_BITS
|| lbaf
->ds
> 12 ||
581 (1 << lbaf
->ds
) > s
->page_size
)
583 error_setg(errp
, "Namespace has unsupported block size (2^%d)",
588 s
->blkshift
= lbaf
->ds
;
590 qemu_vfio_dma_unmap(s
->vfio
, id
);
594 static bool nvme_poll_queues(BDRVNVMeState
*s
)
596 bool progress
= false;
599 for (i
= 0; i
< s
->nr_queues
; i
++) {
600 NVMeQueuePair
*q
= s
->queues
[i
];
601 const size_t cqe_offset
= q
->cq
.head
* NVME_CQ_ENTRY_BYTES
;
602 NvmeCqe
*cqe
= (NvmeCqe
*)&q
->cq
.queue
[cqe_offset
];
605 * Do an early check for completions. q->lock isn't needed because
606 * nvme_process_completion() only runs in the event loop thread and
607 * cannot race with itself.
609 if ((le16_to_cpu(cqe
->status
) & 0x1) == q
->cq_phase
) {
613 qemu_mutex_lock(&q
->lock
);
614 while (nvme_process_completion(q
)) {
618 qemu_mutex_unlock(&q
->lock
);
623 static void nvme_handle_event(EventNotifier
*n
)
625 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
, irq_notifier
);
627 trace_nvme_handle_event(s
);
628 event_notifier_test_and_clear(n
);
632 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
634 BDRVNVMeState
*s
= bs
->opaque
;
635 int n
= s
->nr_queues
;
638 int queue_size
= NVME_QUEUE_SIZE
;
640 q
= nvme_create_queue_pair(bs
, n
, queue_size
, errp
);
645 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
646 .dptr
.prp1
= cpu_to_le64(q
->cq
.iova
),
647 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
648 .cdw11
= cpu_to_le32(0x3),
650 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
651 error_setg(errp
, "Failed to create CQ io queue [%d]", n
);
655 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
656 .dptr
.prp1
= cpu_to_le64(q
->sq
.iova
),
657 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
658 .cdw11
= cpu_to_le32(0x1 | (n
<< 16)),
660 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
661 error_setg(errp
, "Failed to create SQ io queue [%d]", n
);
664 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
669 nvme_free_queue_pair(q
);
673 static bool nvme_poll_cb(void *opaque
)
675 EventNotifier
*e
= opaque
;
676 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
, irq_notifier
);
678 trace_nvme_poll_cb(s
);
679 return nvme_poll_queues(s
);
682 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
685 BDRVNVMeState
*s
= bs
->opaque
;
689 uint64_t deadline
, now
;
690 Error
*local_err
= NULL
;
692 qemu_co_mutex_init(&s
->dma_map_lock
);
693 qemu_co_queue_init(&s
->dma_flush_queue
);
694 s
->device
= g_strdup(device
);
696 s
->aio_context
= bdrv_get_aio_context(bs
);
697 ret
= event_notifier_init(&s
->irq_notifier
, 0);
699 error_setg(errp
, "Failed to init event notifier");
703 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
709 s
->regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, NVME_BAR_SIZE
, errp
);
715 /* Perform initialize sequence as described in NVMe spec "7.6.1
716 * Initialization". */
718 cap
= le64_to_cpu(s
->regs
->cap
);
719 if (!(cap
& (1ULL << 37))) {
720 error_setg(errp
, "Device doesn't support NVMe command set");
725 s
->page_size
= MAX(4096, 1 << (12 + ((cap
>> 48) & 0xF)));
726 s
->doorbell_scale
= (4 << (((cap
>> 32) & 0xF))) / sizeof(uint32_t);
727 bs
->bl
.opt_mem_alignment
= s
->page_size
;
728 timeout_ms
= MIN(500 * ((cap
>> 24) & 0xFF), 30000);
730 /* Reset device to get a clean state. */
731 s
->regs
->cc
= cpu_to_le32(le32_to_cpu(s
->regs
->cc
) & 0xFE);
732 /* Wait for CSTS.RDY = 0. */
733 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* SCALE_MS
;
734 while (le32_to_cpu(s
->regs
->csts
) & 0x1) {
735 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
736 error_setg(errp
, "Timeout while waiting for device to reset (%"
744 /* Set up admin queue. */
745 s
->queues
= g_new(NVMeQueuePair
*, 1);
746 s
->queues
[INDEX_ADMIN
] = nvme_create_queue_pair(bs
, 0,
749 if (!s
->queues
[INDEX_ADMIN
]) {
754 QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE
& 0xF000);
755 s
->regs
->aqa
= cpu_to_le32((NVME_QUEUE_SIZE
<< 16) | NVME_QUEUE_SIZE
);
756 s
->regs
->asq
= cpu_to_le64(s
->queues
[INDEX_ADMIN
]->sq
.iova
);
757 s
->regs
->acq
= cpu_to_le64(s
->queues
[INDEX_ADMIN
]->cq
.iova
);
759 /* After setting up all control registers we can enable device now. */
760 s
->regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << 20) |
761 (ctz32(NVME_SQ_ENTRY_BYTES
) << 16) |
763 /* Wait for CSTS.RDY = 1. */
764 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
765 deadline
= now
+ timeout_ms
* 1000000;
766 while (!(le32_to_cpu(s
->regs
->csts
) & 0x1)) {
767 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
768 error_setg(errp
, "Timeout while waiting for device to start (%"
776 ret
= qemu_vfio_pci_init_irq(s
->vfio
, &s
->irq_notifier
,
777 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
781 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
782 false, nvme_handle_event
, nvme_poll_cb
);
784 nvme_identify(bs
, namespace, &local_err
);
786 error_propagate(errp
, local_err
);
791 /* Set up command queues. */
792 if (!nvme_add_io_queue(bs
, errp
)) {
796 /* Cleaning up is done in nvme_file_open() upon error. */
800 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
802 * nvme://0000:44:00.0/1
804 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
805 * is the PCI address, and the last part is the namespace number starting from
806 * 1 according to the NVMe spec. */
807 static void nvme_parse_filename(const char *filename
, QDict
*options
,
810 int pref
= strlen("nvme://");
812 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
813 const char *tmp
= filename
+ pref
;
815 const char *namespace;
817 const char *slash
= strchr(tmp
, '/');
819 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, tmp
);
822 device
= g_strndup(tmp
, slash
- tmp
);
823 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, device
);
825 namespace = slash
+ 1;
826 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
827 error_setg(errp
, "Invalid namespace '%s', positive number expected",
831 qdict_put_str(options
, NVME_BLOCK_OPT_NAMESPACE
,
832 *namespace ? namespace : "1");
836 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
840 BDRVNVMeState
*s
= bs
->opaque
;
842 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
843 .nsid
= cpu_to_le32(s
->nsid
),
844 .cdw10
= cpu_to_le32(0x06),
845 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
848 ret
= nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
);
850 error_setg(errp
, "Failed to configure NVMe write cache");
855 static void nvme_close(BlockDriverState
*bs
)
858 BDRVNVMeState
*s
= bs
->opaque
;
860 for (i
= 0; i
< s
->nr_queues
; ++i
) {
861 nvme_free_queue_pair(s
->queues
[i
]);
864 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
866 event_notifier_cleanup(&s
->irq_notifier
);
867 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)s
->regs
, 0, NVME_BAR_SIZE
);
868 qemu_vfio_close(s
->vfio
);
873 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
880 BDRVNVMeState
*s
= bs
->opaque
;
882 bs
->supported_write_flags
= BDRV_REQ_FUA
;
884 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
885 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
886 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
888 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
893 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
894 ret
= nvme_init(bs
, device
, namespace, errp
);
899 if (flags
& BDRV_O_NOCACHE
) {
900 if (!s
->write_cache_supported
) {
902 "NVMe controller doesn't support write cache configuration");
905 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
918 static int64_t nvme_getlength(BlockDriverState
*bs
)
920 BDRVNVMeState
*s
= bs
->opaque
;
921 return s
->nsze
<< s
->blkshift
;
924 static uint32_t nvme_get_blocksize(BlockDriverState
*bs
)
926 BDRVNVMeState
*s
= bs
->opaque
;
927 assert(s
->blkshift
>= BDRV_SECTOR_BITS
&& s
->blkshift
<= 12);
928 return UINT32_C(1) << s
->blkshift
;
931 static int nvme_probe_blocksizes(BlockDriverState
*bs
, BlockSizes
*bsz
)
933 uint32_t blocksize
= nvme_get_blocksize(bs
);
934 bsz
->phys
= blocksize
;
935 bsz
->log
= blocksize
;
939 /* Called with s->dma_map_lock */
940 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
944 BDRVNVMeState
*s
= bs
->opaque
;
946 s
->dma_map_count
-= qiov
->size
;
947 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
948 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
950 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
956 /* Called with s->dma_map_lock */
957 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
958 NVMeRequest
*req
, QEMUIOVector
*qiov
)
960 BDRVNVMeState
*s
= bs
->opaque
;
961 uint64_t *pagelist
= req
->prp_list_page
;
966 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
967 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
968 for (i
= 0; i
< qiov
->niov
; ++i
) {
972 r
= qemu_vfio_dma_map(s
->vfio
,
973 qiov
->iov
[i
].iov_base
,
974 qiov
->iov
[i
].iov_len
,
976 if (r
== -ENOMEM
&& retry
) {
978 trace_nvme_dma_flush_queue_wait(s
);
979 if (s
->dma_map_count
) {
980 trace_nvme_dma_map_flush(s
);
981 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
983 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
994 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
995 pagelist
[entries
++] = cpu_to_le64(iova
+ j
* s
->page_size
);
997 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
998 qiov
->iov
[i
].iov_len
/ s
->page_size
);
1001 s
->dma_map_count
+= qiov
->size
;
1003 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
1008 cmd
->dptr
.prp1
= pagelist
[0];
1012 cmd
->dptr
.prp1
= pagelist
[0];
1013 cmd
->dptr
.prp2
= pagelist
[1];
1016 cmd
->dptr
.prp1
= pagelist
[0];
1017 cmd
->dptr
.prp2
= cpu_to_le64(req
->prp_list_iova
+ sizeof(uint64_t));
1020 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
1021 for (i
= 0; i
< entries
; ++i
) {
1022 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
1026 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
1027 * increment s->dma_map_count. This is okay for fixed mapping memory areas
1028 * because they are already mapped before calling this function; for
1029 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
1030 * calling qemu_vfio_dma_reset_temporary when necessary. */
1040 static void nvme_rw_cb_bh(void *opaque
)
1042 NVMeCoData
*data
= opaque
;
1043 qemu_coroutine_enter(data
->co
);
1046 static void nvme_rw_cb(void *opaque
, int ret
)
1048 NVMeCoData
*data
= opaque
;
1051 /* The rw coroutine hasn't yielded, don't try to enter. */
1054 replay_bh_schedule_oneshot_event(data
->ctx
, nvme_rw_cb_bh
, data
);
1057 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
1058 uint64_t offset
, uint64_t bytes
,
1064 BDRVNVMeState
*s
= bs
->opaque
;
1065 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1068 uint32_t cdw12
= (((bytes
>> s
->blkshift
) - 1) & 0xFFFF) |
1069 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
1071 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
1072 .nsid
= cpu_to_le32(s
->nsid
),
1073 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1074 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1075 .cdw12
= cpu_to_le32(cdw12
),
1078 .ctx
= bdrv_get_aio_context(bs
),
1079 .ret
= -EINPROGRESS
,
1082 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
1083 assert(s
->nr_queues
> 1);
1084 req
= nvme_get_free_req(ioq
);
1087 qemu_co_mutex_lock(&s
->dma_map_lock
);
1088 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
1089 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1091 nvme_put_free_req_and_wake(ioq
, req
);
1094 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1096 data
.co
= qemu_coroutine_self();
1097 while (data
.ret
== -EINPROGRESS
) {
1098 qemu_coroutine_yield();
1101 qemu_co_mutex_lock(&s
->dma_map_lock
);
1102 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
1103 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1108 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
1112 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
1113 const QEMUIOVector
*qiov
)
1116 BDRVNVMeState
*s
= bs
->opaque
;
1118 for (i
= 0; i
< qiov
->niov
; ++i
) {
1119 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
, s
->page_size
) ||
1120 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, s
->page_size
)) {
1121 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
1122 qiov
->iov
[i
].iov_len
, s
->page_size
);
1129 static int nvme_co_prw(BlockDriverState
*bs
, uint64_t offset
, uint64_t bytes
,
1130 QEMUIOVector
*qiov
, bool is_write
, int flags
)
1132 BDRVNVMeState
*s
= bs
->opaque
;
1134 uint8_t *buf
= NULL
;
1135 QEMUIOVector local_qiov
;
1137 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
1138 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
1139 assert(bytes
<= s
->max_transfer
);
1140 if (nvme_qiov_aligned(bs
, qiov
)) {
1141 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
1143 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
1144 buf
= qemu_try_memalign(s
->page_size
, bytes
);
1149 qemu_iovec_init(&local_qiov
, 1);
1151 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1153 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1154 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1155 qemu_iovec_destroy(&local_qiov
);
1156 if (!r
&& !is_write
) {
1157 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1163 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1164 uint64_t offset
, uint64_t bytes
,
1165 QEMUIOVector
*qiov
, int flags
)
1167 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1170 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1171 uint64_t offset
, uint64_t bytes
,
1172 QEMUIOVector
*qiov
, int flags
)
1174 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1177 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1179 BDRVNVMeState
*s
= bs
->opaque
;
1180 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1183 .opcode
= NVME_CMD_FLUSH
,
1184 .nsid
= cpu_to_le32(s
->nsid
),
1187 .ctx
= bdrv_get_aio_context(bs
),
1188 .ret
= -EINPROGRESS
,
1191 assert(s
->nr_queues
> 1);
1192 req
= nvme_get_free_req(ioq
);
1194 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1196 data
.co
= qemu_coroutine_self();
1197 if (data
.ret
== -EINPROGRESS
) {
1198 qemu_coroutine_yield();
1205 static coroutine_fn
int nvme_co_pwrite_zeroes(BlockDriverState
*bs
,
1208 BdrvRequestFlags flags
)
1210 BDRVNVMeState
*s
= bs
->opaque
;
1211 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1214 uint32_t cdw12
= ((bytes
>> s
->blkshift
) - 1) & 0xFFFF;
1216 if (!s
->supports_write_zeroes
) {
1221 .opcode
= NVME_CMD_WRITE_ZEROES
,
1222 .nsid
= cpu_to_le32(s
->nsid
),
1223 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1224 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1228 .ctx
= bdrv_get_aio_context(bs
),
1229 .ret
= -EINPROGRESS
,
1232 if (flags
& BDRV_REQ_MAY_UNMAP
) {
1236 if (flags
& BDRV_REQ_FUA
) {
1240 cmd
.cdw12
= cpu_to_le32(cdw12
);
1242 trace_nvme_write_zeroes(s
, offset
, bytes
, flags
);
1243 assert(s
->nr_queues
> 1);
1244 req
= nvme_get_free_req(ioq
);
1247 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1249 data
.co
= qemu_coroutine_self();
1250 while (data
.ret
== -EINPROGRESS
) {
1251 qemu_coroutine_yield();
1254 trace_nvme_rw_done(s
, true, offset
, bytes
, data
.ret
);
1259 static int coroutine_fn
nvme_co_pdiscard(BlockDriverState
*bs
,
1263 BDRVNVMeState
*s
= bs
->opaque
;
1264 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1267 QEMUIOVector local_qiov
;
1271 .opcode
= NVME_CMD_DSM
,
1272 .nsid
= cpu_to_le32(s
->nsid
),
1273 .cdw10
= cpu_to_le32(0), /*number of ranges - 0 based*/
1274 .cdw11
= cpu_to_le32(1 << 2), /*deallocate bit*/
1278 .ctx
= bdrv_get_aio_context(bs
),
1279 .ret
= -EINPROGRESS
,
1282 if (!s
->supports_discard
) {
1286 assert(s
->nr_queues
> 1);
1288 buf
= qemu_try_memalign(s
->page_size
, s
->page_size
);
1292 memset(buf
, 0, s
->page_size
);
1293 buf
->nlb
= cpu_to_le32(bytes
>> s
->blkshift
);
1294 buf
->slba
= cpu_to_le64(offset
>> s
->blkshift
);
1297 qemu_iovec_init(&local_qiov
, 1);
1298 qemu_iovec_add(&local_qiov
, buf
, 4096);
1300 req
= nvme_get_free_req(ioq
);
1303 qemu_co_mutex_lock(&s
->dma_map_lock
);
1304 ret
= nvme_cmd_map_qiov(bs
, &cmd
, req
, &local_qiov
);
1305 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1308 nvme_put_free_req_and_wake(ioq
, req
);
1312 trace_nvme_dsm(s
, offset
, bytes
);
1314 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1316 data
.co
= qemu_coroutine_self();
1317 while (data
.ret
== -EINPROGRESS
) {
1318 qemu_coroutine_yield();
1321 qemu_co_mutex_lock(&s
->dma_map_lock
);
1322 ret
= nvme_cmd_unmap_qiov(bs
, &local_qiov
);
1323 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1330 trace_nvme_dsm_done(s
, offset
, bytes
, ret
);
1332 qemu_iovec_destroy(&local_qiov
);
1339 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1340 BlockReopenQueue
*queue
, Error
**errp
)
1345 static void nvme_refresh_filename(BlockDriverState
*bs
)
1347 BDRVNVMeState
*s
= bs
->opaque
;
1349 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "nvme://%s/%i",
1350 s
->device
, s
->nsid
);
1353 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1355 BDRVNVMeState
*s
= bs
->opaque
;
1357 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1358 bs
->bl
.request_alignment
= s
->page_size
;
1359 bs
->bl
.max_transfer
= s
->max_transfer
;
1362 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1364 BDRVNVMeState
*s
= bs
->opaque
;
1366 for (int i
= 0; i
< s
->nr_queues
; i
++) {
1367 NVMeQueuePair
*q
= s
->queues
[i
];
1369 qemu_bh_delete(q
->completion_bh
);
1370 q
->completion_bh
= NULL
;
1373 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
1377 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1378 AioContext
*new_context
)
1380 BDRVNVMeState
*s
= bs
->opaque
;
1382 s
->aio_context
= new_context
;
1383 aio_set_event_notifier(new_context
, &s
->irq_notifier
,
1384 false, nvme_handle_event
, nvme_poll_cb
);
1386 for (int i
= 0; i
< s
->nr_queues
; i
++) {
1387 NVMeQueuePair
*q
= s
->queues
[i
];
1390 aio_bh_new(new_context
, nvme_process_completion_bh
, q
);
1394 static void nvme_aio_plug(BlockDriverState
*bs
)
1396 BDRVNVMeState
*s
= bs
->opaque
;
1397 assert(!s
->plugged
);
1401 static void nvme_aio_unplug(BlockDriverState
*bs
)
1404 BDRVNVMeState
*s
= bs
->opaque
;
1407 for (i
= INDEX_IO(0); i
< s
->nr_queues
; i
++) {
1408 NVMeQueuePair
*q
= s
->queues
[i
];
1409 qemu_mutex_lock(&q
->lock
);
1411 nvme_process_completion(q
);
1412 qemu_mutex_unlock(&q
->lock
);
1416 static void nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1419 BDRVNVMeState
*s
= bs
->opaque
;
1421 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
);
1423 /* FIXME: we may run out of IOVA addresses after repeated
1424 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1425 * doesn't reclaim addresses for fixed mappings. */
1426 error_report("nvme_register_buf failed: %s", strerror(-ret
));
1430 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
)
1432 BDRVNVMeState
*s
= bs
->opaque
;
1434 qemu_vfio_dma_unmap(s
->vfio
, host
);
1437 static const char *const nvme_strong_runtime_opts
[] = {
1438 NVME_BLOCK_OPT_DEVICE
,
1439 NVME_BLOCK_OPT_NAMESPACE
,
1444 static BlockDriver bdrv_nvme
= {
1445 .format_name
= "nvme",
1446 .protocol_name
= "nvme",
1447 .instance_size
= sizeof(BDRVNVMeState
),
1449 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
1450 .create_opts
= &bdrv_create_opts_simple
,
1452 .bdrv_parse_filename
= nvme_parse_filename
,
1453 .bdrv_file_open
= nvme_file_open
,
1454 .bdrv_close
= nvme_close
,
1455 .bdrv_getlength
= nvme_getlength
,
1456 .bdrv_probe_blocksizes
= nvme_probe_blocksizes
,
1458 .bdrv_co_preadv
= nvme_co_preadv
,
1459 .bdrv_co_pwritev
= nvme_co_pwritev
,
1461 .bdrv_co_pwrite_zeroes
= nvme_co_pwrite_zeroes
,
1462 .bdrv_co_pdiscard
= nvme_co_pdiscard
,
1464 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1465 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1467 .bdrv_refresh_filename
= nvme_refresh_filename
,
1468 .bdrv_refresh_limits
= nvme_refresh_limits
,
1469 .strong_runtime_opts
= nvme_strong_runtime_opts
,
1471 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1472 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1474 .bdrv_io_plug
= nvme_aio_plug
,
1475 .bdrv_io_unplug
= nvme_aio_unplug
,
1477 .bdrv_register_buf
= nvme_register_buf
,
1478 .bdrv_unregister_buf
= nvme_unregister_buf
,
1481 static void bdrv_nvme_init(void)
1483 bdrv_register(&bdrv_nvme
);
1486 block_init(bdrv_nvme_init
);