2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "qemu/cutils.h"
23 #include "qemu/option.h"
24 #include "qemu/vfio-helpers.h"
25 #include "block/block_int.h"
26 #include "sysemu/replay.h"
29 #include "block/nvme.h"
31 #define NVME_SQ_ENTRY_BYTES 64
32 #define NVME_CQ_ENTRY_BYTES 16
33 #define NVME_QUEUE_SIZE 128
34 #define NVME_DOORBELL_SIZE 4096
37 * We have to leave one slot empty as that is the full queue case where
40 #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
42 typedef struct BDRVNVMeState BDRVNVMeState
;
44 /* Same index is used for queues and IRQs */
46 #define INDEX_IO(n) (1 + n)
48 /* This driver shares a single MSIX IRQ for the admin and I/O queues */
50 MSIX_SHARED_IRQ_IDX
= 0,
58 /* Hardware MMIO register */
59 volatile uint32_t *doorbell
;
63 BlockCompletionFunc
*cb
;
67 uint64_t prp_list_iova
;
68 int free_req_next
; /* q->reqs[] index of next free req */
74 /* Read from I/O code path, initialized under BQL */
78 /* Fields protected by BQL */
79 uint8_t *prp_list_pages
;
81 /* Fields protected by @lock */
82 CoQueue free_req_queue
;
86 NVMeRequest reqs
[NVME_NUM_REQS
];
90 /* Thread-safe, no lock necessary */
91 QEMUBH
*completion_bh
;
94 struct BDRVNVMeState
{
95 AioContext
*aio_context
;
98 /* Memory mapped registers */
103 /* The submission/completion queue pairs.
107 NVMeQueuePair
**queues
;
108 unsigned queue_count
;
110 /* How many uint32_t elements does each doorbell entry take. */
111 size_t doorbell_scale
;
112 bool write_cache_supported
;
113 EventNotifier irq_notifier
[MSIX_IRQ_COUNT
];
115 uint64_t nsze
; /* Namespace size reported by identify command */
116 int nsid
; /* The namespace id to read/write data. */
119 uint64_t max_transfer
;
122 bool supports_write_zeroes
;
123 bool supports_discard
;
125 CoMutex dma_map_lock
;
126 CoQueue dma_flush_queue
;
128 /* Total size of mapped qiov, accessed under dma_map_lock */
131 /* PCI address (required for nvme_refresh_filename()) */
135 uint64_t completion_errors
;
136 uint64_t aligned_accesses
;
137 uint64_t unaligned_accesses
;
141 #define NVME_BLOCK_OPT_DEVICE "device"
142 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
144 static void nvme_process_completion_bh(void *opaque
);
146 static QemuOptsList runtime_opts
= {
148 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
151 .name
= NVME_BLOCK_OPT_DEVICE
,
152 .type
= QEMU_OPT_STRING
,
153 .help
= "NVMe PCI device address",
156 .name
= NVME_BLOCK_OPT_NAMESPACE
,
157 .type
= QEMU_OPT_NUMBER
,
158 .help
= "NVMe namespace",
160 { /* end of list */ }
164 /* Returns true on success, false on failure. */
165 static bool nvme_init_queue(BDRVNVMeState
*s
, NVMeQueue
*q
,
166 unsigned nentries
, size_t entry_bytes
, Error
**errp
)
171 bytes
= ROUND_UP(nentries
* entry_bytes
, qemu_real_host_page_size
);
172 q
->head
= q
->tail
= 0;
173 q
->queue
= qemu_try_memalign(qemu_real_host_page_size
, bytes
);
175 error_setg(errp
, "Cannot allocate queue");
178 memset(q
->queue
, 0, bytes
);
179 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
, errp
);
181 error_prepend(errp
, "Cannot map queue: ");
186 static void nvme_free_queue_pair(NVMeQueuePair
*q
)
188 trace_nvme_free_queue_pair(q
->index
, q
, &q
->cq
, &q
->sq
);
189 if (q
->completion_bh
) {
190 qemu_bh_delete(q
->completion_bh
);
192 qemu_vfree(q
->prp_list_pages
);
193 qemu_vfree(q
->sq
.queue
);
194 qemu_vfree(q
->cq
.queue
);
195 qemu_mutex_destroy(&q
->lock
);
199 static void nvme_free_req_queue_cb(void *opaque
)
201 NVMeQueuePair
*q
= opaque
;
203 qemu_mutex_lock(&q
->lock
);
204 while (qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
205 /* Retry all pending requests */
207 qemu_mutex_unlock(&q
->lock
);
210 static NVMeQueuePair
*nvme_create_queue_pair(BDRVNVMeState
*s
,
211 AioContext
*aio_context
,
212 unsigned idx
, size_t size
,
217 uint64_t prp_list_iova
;
220 q
= g_try_new0(NVMeQueuePair
, 1);
222 error_setg(errp
, "Cannot allocate queue pair");
225 trace_nvme_create_queue_pair(idx
, q
, size
, aio_context
,
226 event_notifier_get_fd(s
->irq_notifier
));
227 bytes
= QEMU_ALIGN_UP(s
->page_size
* NVME_NUM_REQS
,
228 qemu_real_host_page_size
);
229 q
->prp_list_pages
= qemu_try_memalign(qemu_real_host_page_size
, bytes
);
230 if (!q
->prp_list_pages
) {
231 error_setg(errp
, "Cannot allocate PRP page list");
234 memset(q
->prp_list_pages
, 0, bytes
);
235 qemu_mutex_init(&q
->lock
);
238 qemu_co_queue_init(&q
->free_req_queue
);
239 q
->completion_bh
= aio_bh_new(aio_context
, nvme_process_completion_bh
, q
);
240 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
, bytes
,
241 false, &prp_list_iova
, errp
);
243 error_prepend(errp
, "Cannot map buffer for DMA: ");
246 q
->free_req_head
= -1;
247 for (i
= 0; i
< NVME_NUM_REQS
; i
++) {
248 NVMeRequest
*req
= &q
->reqs
[i
];
250 req
->free_req_next
= q
->free_req_head
;
251 q
->free_req_head
= i
;
252 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
253 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
256 if (!nvme_init_queue(s
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, errp
)) {
259 q
->sq
.doorbell
= &s
->doorbells
[idx
* s
->doorbell_scale
].sq_tail
;
261 if (!nvme_init_queue(s
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, errp
)) {
264 q
->cq
.doorbell
= &s
->doorbells
[idx
* s
->doorbell_scale
].cq_head
;
268 nvme_free_queue_pair(q
);
273 static void nvme_kick(NVMeQueuePair
*q
)
275 BDRVNVMeState
*s
= q
->s
;
277 if (s
->plugged
|| !q
->need_kick
) {
280 trace_nvme_kick(s
, q
->index
);
281 assert(!(q
->sq
.tail
& 0xFF00));
282 /* Fence the write to submission queue entry before notifying the device. */
284 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
285 q
->inflight
+= q
->need_kick
;
289 /* Find a free request element if any, otherwise:
290 * a) if in coroutine context, try to wait for one to become available;
291 * b) if not in coroutine, return NULL;
293 static NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
297 qemu_mutex_lock(&q
->lock
);
299 while (q
->free_req_head
== -1) {
300 if (qemu_in_coroutine()) {
301 trace_nvme_free_req_queue_wait(q
->s
, q
->index
);
302 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
304 qemu_mutex_unlock(&q
->lock
);
309 req
= &q
->reqs
[q
->free_req_head
];
310 q
->free_req_head
= req
->free_req_next
;
311 req
->free_req_next
= -1;
313 qemu_mutex_unlock(&q
->lock
);
318 static void nvme_put_free_req_locked(NVMeQueuePair
*q
, NVMeRequest
*req
)
320 req
->free_req_next
= q
->free_req_head
;
321 q
->free_req_head
= req
- q
->reqs
;
325 static void nvme_wake_free_req_locked(NVMeQueuePair
*q
)
327 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
328 replay_bh_schedule_oneshot_event(q
->s
->aio_context
,
329 nvme_free_req_queue_cb
, q
);
333 /* Insert a request in the freelist and wake waiters */
334 static void nvme_put_free_req_and_wake(NVMeQueuePair
*q
, NVMeRequest
*req
)
336 qemu_mutex_lock(&q
->lock
);
337 nvme_put_free_req_locked(q
, req
);
338 nvme_wake_free_req_locked(q
);
339 qemu_mutex_unlock(&q
->lock
);
342 static inline int nvme_translate_error(const NvmeCqe
*c
)
344 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
346 trace_nvme_error(le32_to_cpu(c
->result
),
347 le16_to_cpu(c
->sq_head
),
348 le16_to_cpu(c
->sq_id
),
350 le16_to_cpu(status
));
365 static bool nvme_process_completion(NVMeQueuePair
*q
)
367 BDRVNVMeState
*s
= q
->s
;
368 bool progress
= false;
373 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
375 trace_nvme_process_completion_queue_plugged(s
, q
->index
);
380 * Support re-entrancy when a request cb() function invokes aio_poll().
381 * Pending completions must be visible to aio_poll() so that a cb()
382 * function can wait for the completion of another request.
384 * The aio_poll() loop will execute our BH and we'll resume completion
387 qemu_bh_schedule(q
->completion_bh
);
389 assert(q
->inflight
>= 0);
390 while (q
->inflight
) {
394 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
395 if ((le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
398 ret
= nvme_translate_error(c
);
400 s
->stats
.completion_errors
++;
402 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
404 q
->cq_phase
= !q
->cq_phase
;
406 cid
= le16_to_cpu(c
->cid
);
407 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
408 warn_report("NVMe: Unexpected CID in completion queue: %"PRIu32
", "
409 "queue size: %u", cid
, NVME_QUEUE_SIZE
);
412 trace_nvme_complete_command(s
, q
->index
, cid
);
413 preq
= &q
->reqs
[cid
- 1];
415 assert(req
.cid
== cid
);
417 nvme_put_free_req_locked(q
, preq
);
418 preq
->cb
= preq
->opaque
= NULL
;
420 qemu_mutex_unlock(&q
->lock
);
421 req
.cb(req
.opaque
, ret
);
422 qemu_mutex_lock(&q
->lock
);
426 /* Notify the device so it can post more completions. */
428 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
429 nvme_wake_free_req_locked(q
);
432 qemu_bh_cancel(q
->completion_bh
);
437 static void nvme_process_completion_bh(void *opaque
)
439 NVMeQueuePair
*q
= opaque
;
442 * We're being invoked because a nvme_process_completion() cb() function
443 * called aio_poll(). The callback may be waiting for further completions
444 * so notify the device that it has space to fill in more completions now.
447 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
448 nvme_wake_free_req_locked(q
);
450 nvme_process_completion(q
);
453 static void nvme_trace_command(const NvmeCmd
*cmd
)
457 if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW
)) {
460 for (i
= 0; i
< 8; ++i
) {
461 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
462 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
463 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
467 static void nvme_submit_command(NVMeQueuePair
*q
, NVMeRequest
*req
,
468 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
473 req
->opaque
= opaque
;
474 cmd
->cid
= cpu_to_le16(req
->cid
);
476 trace_nvme_submit_command(q
->s
, q
->index
, req
->cid
);
477 nvme_trace_command(cmd
);
478 qemu_mutex_lock(&q
->lock
);
479 memcpy((uint8_t *)q
->sq
.queue
+
480 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
481 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
484 nvme_process_completion(q
);
485 qemu_mutex_unlock(&q
->lock
);
488 static void nvme_admin_cmd_sync_cb(void *opaque
, int ret
)
495 static int nvme_admin_cmd_sync(BlockDriverState
*bs
, NvmeCmd
*cmd
)
497 BDRVNVMeState
*s
= bs
->opaque
;
498 NVMeQueuePair
*q
= s
->queues
[INDEX_ADMIN
];
499 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
501 int ret
= -EINPROGRESS
;
502 req
= nvme_get_free_req(q
);
506 nvme_submit_command(q
, req
, cmd
, nvme_admin_cmd_sync_cb
, &ret
);
508 AIO_WAIT_WHILE(aio_context
, ret
== -EINPROGRESS
);
512 /* Returns true on success, false on failure. */
513 static bool nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
515 BDRVNVMeState
*s
= bs
->opaque
;
517 QEMU_AUTO_VFREE
union {
526 .opcode
= NVME_ADM_CMD_IDENTIFY
,
527 .cdw10
= cpu_to_le32(0x1),
529 size_t id_size
= QEMU_ALIGN_UP(sizeof(*id
), qemu_real_host_page_size
);
531 id
= qemu_try_memalign(qemu_real_host_page_size
, id_size
);
533 error_setg(errp
, "Cannot allocate buffer for identify response");
536 r
= qemu_vfio_dma_map(s
->vfio
, id
, id_size
, true, &iova
, errp
);
538 error_prepend(errp
, "Cannot map buffer for DMA: ");
542 memset(id
, 0, id_size
);
543 cmd
.dptr
.prp1
= cpu_to_le64(iova
);
544 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
545 error_setg(errp
, "Failed to identify controller");
549 if (le32_to_cpu(id
->ctrl
.nn
) < namespace) {
550 error_setg(errp
, "Invalid namespace");
553 s
->write_cache_supported
= le32_to_cpu(id
->ctrl
.vwc
) & 0x1;
554 s
->max_transfer
= (id
->ctrl
.mdts
? 1 << id
->ctrl
.mdts
: 0) * s
->page_size
;
555 /* For now the page list buffer per command is one page, to hold at most
556 * s->page_size / sizeof(uint64_t) entries. */
557 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
558 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
560 oncs
= le16_to_cpu(id
->ctrl
.oncs
);
561 s
->supports_write_zeroes
= !!(oncs
& NVME_ONCS_WRITE_ZEROES
);
562 s
->supports_discard
= !!(oncs
& NVME_ONCS_DSM
);
564 memset(id
, 0, id_size
);
566 cmd
.nsid
= cpu_to_le32(namespace);
567 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
568 error_setg(errp
, "Failed to identify namespace");
572 s
->nsze
= le64_to_cpu(id
->ns
.nsze
);
573 lbaf
= &id
->ns
.lbaf
[NVME_ID_NS_FLBAS_INDEX(id
->ns
.flbas
)];
575 if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id
->ns
.dlfeat
) &&
576 NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id
->ns
.dlfeat
) ==
577 NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES
) {
578 bs
->supported_write_flags
|= BDRV_REQ_MAY_UNMAP
;
582 error_setg(errp
, "Namespaces with metadata are not yet supported");
586 if (lbaf
->ds
< BDRV_SECTOR_BITS
|| lbaf
->ds
> 12 ||
587 (1 << lbaf
->ds
) > s
->page_size
)
589 error_setg(errp
, "Namespace has unsupported block size (2^%d)",
595 s
->blkshift
= lbaf
->ds
;
597 qemu_vfio_dma_unmap(s
->vfio
, id
);
602 static bool nvme_poll_queue(NVMeQueuePair
*q
)
604 bool progress
= false;
606 const size_t cqe_offset
= q
->cq
.head
* NVME_CQ_ENTRY_BYTES
;
607 NvmeCqe
*cqe
= (NvmeCqe
*)&q
->cq
.queue
[cqe_offset
];
609 trace_nvme_poll_queue(q
->s
, q
->index
);
611 * Do an early check for completions. q->lock isn't needed because
612 * nvme_process_completion() only runs in the event loop thread and
613 * cannot race with itself.
615 if ((le16_to_cpu(cqe
->status
) & 0x1) == q
->cq_phase
) {
619 qemu_mutex_lock(&q
->lock
);
620 while (nvme_process_completion(q
)) {
624 qemu_mutex_unlock(&q
->lock
);
629 static bool nvme_poll_queues(BDRVNVMeState
*s
)
631 bool progress
= false;
634 for (i
= 0; i
< s
->queue_count
; i
++) {
635 if (nvme_poll_queue(s
->queues
[i
])) {
642 static void nvme_handle_event(EventNotifier
*n
)
644 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
,
645 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
647 trace_nvme_handle_event(s
);
648 event_notifier_test_and_clear(n
);
652 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
654 BDRVNVMeState
*s
= bs
->opaque
;
655 unsigned n
= s
->queue_count
;
658 unsigned queue_size
= NVME_QUEUE_SIZE
;
660 assert(n
<= UINT16_MAX
);
661 q
= nvme_create_queue_pair(s
, bdrv_get_aio_context(bs
),
662 n
, queue_size
, errp
);
667 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
668 .dptr
.prp1
= cpu_to_le64(q
->cq
.iova
),
669 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | n
),
670 .cdw11
= cpu_to_le32(NVME_CQ_IEN
| NVME_CQ_PC
),
672 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
673 error_setg(errp
, "Failed to create CQ io queue [%u]", n
);
677 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
678 .dptr
.prp1
= cpu_to_le64(q
->sq
.iova
),
679 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | n
),
680 .cdw11
= cpu_to_le32(NVME_SQ_PC
| (n
<< 16)),
682 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
683 error_setg(errp
, "Failed to create SQ io queue [%u]", n
);
686 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
691 nvme_free_queue_pair(q
);
695 static bool nvme_poll_cb(void *opaque
)
697 EventNotifier
*e
= opaque
;
698 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
,
699 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
701 return nvme_poll_queues(s
);
704 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
707 BDRVNVMeState
*s
= bs
->opaque
;
709 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
714 uint64_t deadline
, now
;
715 volatile NvmeBar
*regs
= NULL
;
717 qemu_co_mutex_init(&s
->dma_map_lock
);
718 qemu_co_queue_init(&s
->dma_flush_queue
);
719 s
->device
= g_strdup(device
);
721 s
->aio_context
= bdrv_get_aio_context(bs
);
722 ret
= event_notifier_init(&s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
], 0);
724 error_setg(errp
, "Failed to init event notifier");
728 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
734 regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, sizeof(NvmeBar
),
735 PROT_READ
| PROT_WRITE
, errp
);
740 /* Perform initialize sequence as described in NVMe spec "7.6.1
741 * Initialization". */
743 cap
= le64_to_cpu(regs
->cap
);
744 trace_nvme_controller_capability_raw(cap
);
745 trace_nvme_controller_capability("Maximum Queue Entries Supported",
746 1 + NVME_CAP_MQES(cap
));
747 trace_nvme_controller_capability("Contiguous Queues Required",
749 trace_nvme_controller_capability("Doorbell Stride",
750 1 << (2 + NVME_CAP_DSTRD(cap
)));
751 trace_nvme_controller_capability("Subsystem Reset Supported",
752 NVME_CAP_NSSRS(cap
));
753 trace_nvme_controller_capability("Memory Page Size Minimum",
754 1 << (12 + NVME_CAP_MPSMIN(cap
)));
755 trace_nvme_controller_capability("Memory Page Size Maximum",
756 1 << (12 + NVME_CAP_MPSMAX(cap
)));
757 if (!NVME_CAP_CSS(cap
)) {
758 error_setg(errp
, "Device doesn't support NVMe command set");
763 s
->page_size
= 1u << (12 + NVME_CAP_MPSMIN(cap
));
764 s
->doorbell_scale
= (4 << NVME_CAP_DSTRD(cap
)) / sizeof(uint32_t);
765 bs
->bl
.opt_mem_alignment
= s
->page_size
;
766 bs
->bl
.request_alignment
= s
->page_size
;
767 timeout_ms
= MIN(500 * NVME_CAP_TO(cap
), 30000);
769 ver
= le32_to_cpu(regs
->vs
);
770 trace_nvme_controller_spec_version(extract32(ver
, 16, 16),
771 extract32(ver
, 8, 8),
772 extract32(ver
, 0, 8));
774 /* Reset device to get a clean state. */
775 regs
->cc
= cpu_to_le32(le32_to_cpu(regs
->cc
) & 0xFE);
776 /* Wait for CSTS.RDY = 0. */
777 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* SCALE_MS
;
778 while (NVME_CSTS_RDY(le32_to_cpu(regs
->csts
))) {
779 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
780 error_setg(errp
, "Timeout while waiting for device to reset (%"
788 s
->bar0_wo_map
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0,
789 sizeof(NvmeBar
) + NVME_DOORBELL_SIZE
,
791 s
->doorbells
= (void *)((uintptr_t)s
->bar0_wo_map
+ sizeof(NvmeBar
));
797 /* Set up admin queue. */
798 s
->queues
= g_new(NVMeQueuePair
*, 1);
799 q
= nvme_create_queue_pair(s
, aio_context
, 0, NVME_QUEUE_SIZE
, errp
);
804 s
->queues
[INDEX_ADMIN
] = q
;
806 QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE
- 1) & 0xF000);
807 regs
->aqa
= cpu_to_le32(((NVME_QUEUE_SIZE
- 1) << AQA_ACQS_SHIFT
) |
808 ((NVME_QUEUE_SIZE
- 1) << AQA_ASQS_SHIFT
));
809 regs
->asq
= cpu_to_le64(q
->sq
.iova
);
810 regs
->acq
= cpu_to_le64(q
->cq
.iova
);
812 /* After setting up all control registers we can enable device now. */
813 regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << CC_IOCQES_SHIFT
) |
814 (ctz32(NVME_SQ_ENTRY_BYTES
) << CC_IOSQES_SHIFT
) |
816 /* Wait for CSTS.RDY = 1. */
817 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
818 deadline
= now
+ timeout_ms
* SCALE_MS
;
819 while (!NVME_CSTS_RDY(le32_to_cpu(regs
->csts
))) {
820 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
821 error_setg(errp
, "Timeout while waiting for device to start (%"
829 ret
= qemu_vfio_pci_init_irq(s
->vfio
, s
->irq_notifier
,
830 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
834 aio_set_event_notifier(bdrv_get_aio_context(bs
),
835 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
836 false, nvme_handle_event
, nvme_poll_cb
);
838 if (!nvme_identify(bs
, namespace, errp
)) {
843 /* Set up command queues. */
844 if (!nvme_add_io_queue(bs
, errp
)) {
849 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)regs
, 0, sizeof(NvmeBar
));
852 /* Cleaning up is done in nvme_file_open() upon error. */
856 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
858 * nvme://0000:44:00.0/1
860 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
861 * is the PCI address, and the last part is the namespace number starting from
862 * 1 according to the NVMe spec. */
863 static void nvme_parse_filename(const char *filename
, QDict
*options
,
866 int pref
= strlen("nvme://");
868 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
869 const char *tmp
= filename
+ pref
;
871 const char *namespace;
873 const char *slash
= strchr(tmp
, '/');
875 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, tmp
);
878 device
= g_strndup(tmp
, slash
- tmp
);
879 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, device
);
881 namespace = slash
+ 1;
882 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
883 error_setg(errp
, "Invalid namespace '%s', positive number expected",
887 qdict_put_str(options
, NVME_BLOCK_OPT_NAMESPACE
,
888 *namespace ? namespace : "1");
892 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
896 BDRVNVMeState
*s
= bs
->opaque
;
898 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
899 .nsid
= cpu_to_le32(s
->nsid
),
900 .cdw10
= cpu_to_le32(0x06),
901 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
904 ret
= nvme_admin_cmd_sync(bs
, &cmd
);
906 error_setg(errp
, "Failed to configure NVMe write cache");
911 static void nvme_close(BlockDriverState
*bs
)
913 BDRVNVMeState
*s
= bs
->opaque
;
915 for (unsigned i
= 0; i
< s
->queue_count
; ++i
) {
916 nvme_free_queue_pair(s
->queues
[i
]);
919 aio_set_event_notifier(bdrv_get_aio_context(bs
),
920 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
922 event_notifier_cleanup(&s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
923 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, s
->bar0_wo_map
,
924 0, sizeof(NvmeBar
) + NVME_DOORBELL_SIZE
);
925 qemu_vfio_close(s
->vfio
);
930 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
937 BDRVNVMeState
*s
= bs
->opaque
;
939 bs
->supported_write_flags
= BDRV_REQ_FUA
;
941 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
942 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
943 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
945 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
950 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
951 ret
= nvme_init(bs
, device
, namespace, errp
);
956 if (flags
& BDRV_O_NOCACHE
) {
957 if (!s
->write_cache_supported
) {
959 "NVMe controller doesn't support write cache configuration");
962 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
975 static int64_t nvme_getlength(BlockDriverState
*bs
)
977 BDRVNVMeState
*s
= bs
->opaque
;
978 return s
->nsze
<< s
->blkshift
;
981 static uint32_t nvme_get_blocksize(BlockDriverState
*bs
)
983 BDRVNVMeState
*s
= bs
->opaque
;
984 assert(s
->blkshift
>= BDRV_SECTOR_BITS
&& s
->blkshift
<= 12);
985 return UINT32_C(1) << s
->blkshift
;
988 static int nvme_probe_blocksizes(BlockDriverState
*bs
, BlockSizes
*bsz
)
990 uint32_t blocksize
= nvme_get_blocksize(bs
);
991 bsz
->phys
= blocksize
;
992 bsz
->log
= blocksize
;
996 /* Called with s->dma_map_lock */
997 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
1001 BDRVNVMeState
*s
= bs
->opaque
;
1003 s
->dma_map_count
-= qiov
->size
;
1004 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
1005 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
1007 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
1013 /* Called with s->dma_map_lock */
1014 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
1015 NVMeRequest
*req
, QEMUIOVector
*qiov
)
1017 BDRVNVMeState
*s
= bs
->opaque
;
1018 uint64_t *pagelist
= req
->prp_list_page
;
1021 Error
*local_err
= NULL
, **errp
= NULL
;
1024 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
1025 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
1026 for (i
= 0; i
< qiov
->niov
; ++i
) {
1029 size_t len
= QEMU_ALIGN_UP(qiov
->iov
[i
].iov_len
,
1030 qemu_real_host_page_size
);
1032 r
= qemu_vfio_dma_map(s
->vfio
,
1033 qiov
->iov
[i
].iov_base
,
1034 len
, true, &iova
, errp
);
1037 * In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
1038 * ioctl returns -ENOSPC to signal the user exhausted the DMA
1039 * mappings available for a container since Linux kernel commit
1040 * 492855939bdb ("vfio/type1: Limit DMA mappings per container",
1041 * April 2019, see CVE-2019-3882).
1043 * This block driver already handles this error path by checking
1044 * for the -ENOMEM error, so we directly replace -ENOSPC by
1045 * -ENOMEM. Beside, -ENOSPC has a specific meaning for blockdev
1046 * coroutines: it triggers BLOCKDEV_ON_ERROR_ENOSPC and
1047 * BLOCK_ERROR_ACTION_STOP which stops the VM, asking the operator
1048 * to add more storage to the blockdev. Not something we can do
1049 * easily with an IOMMU :)
1053 if (r
== -ENOMEM
&& retry
) {
1055 * We exhausted the DMA mappings available for our container:
1056 * recycle the volatile IOVA mappings.
1059 trace_nvme_dma_flush_queue_wait(s
);
1060 if (s
->dma_map_count
) {
1061 trace_nvme_dma_map_flush(s
);
1062 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
1064 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
1077 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
1078 pagelist
[entries
++] = cpu_to_le64(iova
+ j
* s
->page_size
);
1080 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
1081 qiov
->iov
[i
].iov_len
/ s
->page_size
);
1084 s
->dma_map_count
+= qiov
->size
;
1086 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
1091 cmd
->dptr
.prp1
= pagelist
[0];
1095 cmd
->dptr
.prp1
= pagelist
[0];
1096 cmd
->dptr
.prp2
= pagelist
[1];
1099 cmd
->dptr
.prp1
= pagelist
[0];
1100 cmd
->dptr
.prp2
= cpu_to_le64(req
->prp_list_iova
+ sizeof(uint64_t));
1103 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
1104 for (i
= 0; i
< entries
; ++i
) {
1105 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
1109 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
1110 * increment s->dma_map_count. This is okay for fixed mapping memory areas
1111 * because they are already mapped before calling this function; for
1112 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
1113 * calling qemu_vfio_dma_reset_temporary when necessary. */
1115 error_reportf_err(local_err
, "Cannot map buffer for DMA: ");
1126 static void nvme_rw_cb_bh(void *opaque
)
1128 NVMeCoData
*data
= opaque
;
1129 qemu_coroutine_enter(data
->co
);
1132 static void nvme_rw_cb(void *opaque
, int ret
)
1134 NVMeCoData
*data
= opaque
;
1137 /* The rw coroutine hasn't yielded, don't try to enter. */
1140 replay_bh_schedule_oneshot_event(data
->ctx
, nvme_rw_cb_bh
, data
);
1143 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
1144 uint64_t offset
, uint64_t bytes
,
1150 BDRVNVMeState
*s
= bs
->opaque
;
1151 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1154 uint32_t cdw12
= (((bytes
>> s
->blkshift
) - 1) & 0xFFFF) |
1155 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
1157 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
1158 .nsid
= cpu_to_le32(s
->nsid
),
1159 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1160 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1161 .cdw12
= cpu_to_le32(cdw12
),
1164 .ctx
= bdrv_get_aio_context(bs
),
1165 .ret
= -EINPROGRESS
,
1168 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
1169 assert(s
->queue_count
> 1);
1170 req
= nvme_get_free_req(ioq
);
1173 qemu_co_mutex_lock(&s
->dma_map_lock
);
1174 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
1175 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1177 nvme_put_free_req_and_wake(ioq
, req
);
1180 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1182 data
.co
= qemu_coroutine_self();
1183 while (data
.ret
== -EINPROGRESS
) {
1184 qemu_coroutine_yield();
1187 qemu_co_mutex_lock(&s
->dma_map_lock
);
1188 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
1189 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1194 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
1198 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
1199 const QEMUIOVector
*qiov
)
1202 BDRVNVMeState
*s
= bs
->opaque
;
1204 for (i
= 0; i
< qiov
->niov
; ++i
) {
1205 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
,
1206 qemu_real_host_page_size
) ||
1207 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, qemu_real_host_page_size
)) {
1208 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
1209 qiov
->iov
[i
].iov_len
, s
->page_size
);
1216 static int nvme_co_prw(BlockDriverState
*bs
, uint64_t offset
, uint64_t bytes
,
1217 QEMUIOVector
*qiov
, bool is_write
, int flags
)
1219 BDRVNVMeState
*s
= bs
->opaque
;
1221 QEMU_AUTO_VFREE
uint8_t *buf
= NULL
;
1222 QEMUIOVector local_qiov
;
1223 size_t len
= QEMU_ALIGN_UP(bytes
, qemu_real_host_page_size
);
1224 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
1225 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
1226 assert(bytes
<= s
->max_transfer
);
1227 if (nvme_qiov_aligned(bs
, qiov
)) {
1228 s
->stats
.aligned_accesses
++;
1229 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
1231 s
->stats
.unaligned_accesses
++;
1232 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
1233 buf
= qemu_try_memalign(qemu_real_host_page_size
, len
);
1238 qemu_iovec_init(&local_qiov
, 1);
1240 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1242 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1243 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1244 qemu_iovec_destroy(&local_qiov
);
1245 if (!r
&& !is_write
) {
1246 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1251 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1252 int64_t offset
, int64_t bytes
,
1254 BdrvRequestFlags flags
)
1256 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1259 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1260 int64_t offset
, int64_t bytes
,
1262 BdrvRequestFlags flags
)
1264 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1267 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1269 BDRVNVMeState
*s
= bs
->opaque
;
1270 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1273 .opcode
= NVME_CMD_FLUSH
,
1274 .nsid
= cpu_to_le32(s
->nsid
),
1277 .ctx
= bdrv_get_aio_context(bs
),
1278 .ret
= -EINPROGRESS
,
1281 assert(s
->queue_count
> 1);
1282 req
= nvme_get_free_req(ioq
);
1284 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1286 data
.co
= qemu_coroutine_self();
1287 if (data
.ret
== -EINPROGRESS
) {
1288 qemu_coroutine_yield();
1295 static coroutine_fn
int nvme_co_pwrite_zeroes(BlockDriverState
*bs
,
1298 BdrvRequestFlags flags
)
1300 BDRVNVMeState
*s
= bs
->opaque
;
1301 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1305 if (!s
->supports_write_zeroes
) {
1313 cdw12
= ((bytes
>> s
->blkshift
) - 1) & 0xFFFF;
1315 * We should not lose information. pwrite_zeroes_alignment and
1316 * max_pwrite_zeroes guarantees it.
1318 assert(((cdw12
+ 1) << s
->blkshift
) == bytes
);
1321 .opcode
= NVME_CMD_WRITE_ZEROES
,
1322 .nsid
= cpu_to_le32(s
->nsid
),
1323 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1324 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1328 .ctx
= bdrv_get_aio_context(bs
),
1329 .ret
= -EINPROGRESS
,
1332 if (flags
& BDRV_REQ_MAY_UNMAP
) {
1336 if (flags
& BDRV_REQ_FUA
) {
1340 cmd
.cdw12
= cpu_to_le32(cdw12
);
1342 trace_nvme_write_zeroes(s
, offset
, bytes
, flags
);
1343 assert(s
->queue_count
> 1);
1344 req
= nvme_get_free_req(ioq
);
1347 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1349 data
.co
= qemu_coroutine_self();
1350 while (data
.ret
== -EINPROGRESS
) {
1351 qemu_coroutine_yield();
1354 trace_nvme_rw_done(s
, true, offset
, bytes
, data
.ret
);
1359 static int coroutine_fn
nvme_co_pdiscard(BlockDriverState
*bs
,
1363 BDRVNVMeState
*s
= bs
->opaque
;
1364 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1366 QEMU_AUTO_VFREE NvmeDsmRange
*buf
= NULL
;
1367 QEMUIOVector local_qiov
;
1371 .opcode
= NVME_CMD_DSM
,
1372 .nsid
= cpu_to_le32(s
->nsid
),
1373 .cdw10
= cpu_to_le32(0), /*number of ranges - 0 based*/
1374 .cdw11
= cpu_to_le32(1 << 2), /*deallocate bit*/
1378 .ctx
= bdrv_get_aio_context(bs
),
1379 .ret
= -EINPROGRESS
,
1382 if (!s
->supports_discard
) {
1386 assert(s
->queue_count
> 1);
1389 * Filling the @buf requires @offset and @bytes to satisfy restrictions
1390 * defined in nvme_refresh_limits().
1392 assert(QEMU_IS_ALIGNED(bytes
, 1UL << s
->blkshift
));
1393 assert(QEMU_IS_ALIGNED(offset
, 1UL << s
->blkshift
));
1394 assert((bytes
>> s
->blkshift
) <= UINT32_MAX
);
1396 buf
= qemu_try_memalign(s
->page_size
, s
->page_size
);
1400 memset(buf
, 0, s
->page_size
);
1401 buf
->nlb
= cpu_to_le32(bytes
>> s
->blkshift
);
1402 buf
->slba
= cpu_to_le64(offset
>> s
->blkshift
);
1405 qemu_iovec_init(&local_qiov
, 1);
1406 qemu_iovec_add(&local_qiov
, buf
, 4096);
1408 req
= nvme_get_free_req(ioq
);
1411 qemu_co_mutex_lock(&s
->dma_map_lock
);
1412 ret
= nvme_cmd_map_qiov(bs
, &cmd
, req
, &local_qiov
);
1413 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1416 nvme_put_free_req_and_wake(ioq
, req
);
1420 trace_nvme_dsm(s
, offset
, bytes
);
1422 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1424 data
.co
= qemu_coroutine_self();
1425 while (data
.ret
== -EINPROGRESS
) {
1426 qemu_coroutine_yield();
1429 qemu_co_mutex_lock(&s
->dma_map_lock
);
1430 ret
= nvme_cmd_unmap_qiov(bs
, &local_qiov
);
1431 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1438 trace_nvme_dsm_done(s
, offset
, bytes
, ret
);
1440 qemu_iovec_destroy(&local_qiov
);
1445 static int coroutine_fn
nvme_co_truncate(BlockDriverState
*bs
, int64_t offset
,
1446 bool exact
, PreallocMode prealloc
,
1447 BdrvRequestFlags flags
, Error
**errp
)
1451 if (prealloc
!= PREALLOC_MODE_OFF
) {
1452 error_setg(errp
, "Unsupported preallocation mode '%s'",
1453 PreallocMode_str(prealloc
));
1457 cur_length
= nvme_getlength(bs
);
1458 if (offset
!= cur_length
&& exact
) {
1459 error_setg(errp
, "Cannot resize NVMe devices");
1461 } else if (offset
> cur_length
) {
1462 error_setg(errp
, "Cannot grow NVMe devices");
1469 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1470 BlockReopenQueue
*queue
, Error
**errp
)
1475 static void nvme_refresh_filename(BlockDriverState
*bs
)
1477 BDRVNVMeState
*s
= bs
->opaque
;
1479 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "nvme://%s/%i",
1480 s
->device
, s
->nsid
);
1483 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1485 BDRVNVMeState
*s
= bs
->opaque
;
1487 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1488 bs
->bl
.request_alignment
= s
->page_size
;
1489 bs
->bl
.max_transfer
= s
->max_transfer
;
1492 * Look at nvme_co_pwrite_zeroes: after shift and decrement we should get
1495 bs
->bl
.max_pwrite_zeroes
= 1ULL << (s
->blkshift
+ 16);
1496 bs
->bl
.pwrite_zeroes_alignment
= MAX(bs
->bl
.request_alignment
,
1497 1UL << s
->blkshift
);
1499 bs
->bl
.max_pdiscard
= (uint64_t)UINT32_MAX
<< s
->blkshift
;
1500 bs
->bl
.pdiscard_alignment
= MAX(bs
->bl
.request_alignment
,
1501 1UL << s
->blkshift
);
1504 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1506 BDRVNVMeState
*s
= bs
->opaque
;
1508 for (unsigned i
= 0; i
< s
->queue_count
; i
++) {
1509 NVMeQueuePair
*q
= s
->queues
[i
];
1511 qemu_bh_delete(q
->completion_bh
);
1512 q
->completion_bh
= NULL
;
1515 aio_set_event_notifier(bdrv_get_aio_context(bs
),
1516 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
1520 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1521 AioContext
*new_context
)
1523 BDRVNVMeState
*s
= bs
->opaque
;
1525 s
->aio_context
= new_context
;
1526 aio_set_event_notifier(new_context
, &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
1527 false, nvme_handle_event
, nvme_poll_cb
);
1529 for (unsigned i
= 0; i
< s
->queue_count
; i
++) {
1530 NVMeQueuePair
*q
= s
->queues
[i
];
1533 aio_bh_new(new_context
, nvme_process_completion_bh
, q
);
1537 static void nvme_aio_plug(BlockDriverState
*bs
)
1539 BDRVNVMeState
*s
= bs
->opaque
;
1540 assert(!s
->plugged
);
1544 static void nvme_aio_unplug(BlockDriverState
*bs
)
1546 BDRVNVMeState
*s
= bs
->opaque
;
1549 for (unsigned i
= INDEX_IO(0); i
< s
->queue_count
; i
++) {
1550 NVMeQueuePair
*q
= s
->queues
[i
];
1551 qemu_mutex_lock(&q
->lock
);
1553 nvme_process_completion(q
);
1554 qemu_mutex_unlock(&q
->lock
);
1558 static void nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1561 Error
*local_err
= NULL
;
1562 BDRVNVMeState
*s
= bs
->opaque
;
1564 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
, &local_err
);
1566 /* FIXME: we may run out of IOVA addresses after repeated
1567 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1568 * doesn't reclaim addresses for fixed mappings. */
1569 error_reportf_err(local_err
, "nvme_register_buf failed: ");
1573 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
)
1575 BDRVNVMeState
*s
= bs
->opaque
;
1577 qemu_vfio_dma_unmap(s
->vfio
, host
);
1580 static BlockStatsSpecific
*nvme_get_specific_stats(BlockDriverState
*bs
)
1582 BlockStatsSpecific
*stats
= g_new(BlockStatsSpecific
, 1);
1583 BDRVNVMeState
*s
= bs
->opaque
;
1585 stats
->driver
= BLOCKDEV_DRIVER_NVME
;
1586 stats
->u
.nvme
= (BlockStatsSpecificNvme
) {
1587 .completion_errors
= s
->stats
.completion_errors
,
1588 .aligned_accesses
= s
->stats
.aligned_accesses
,
1589 .unaligned_accesses
= s
->stats
.unaligned_accesses
,
1595 static const char *const nvme_strong_runtime_opts
[] = {
1596 NVME_BLOCK_OPT_DEVICE
,
1597 NVME_BLOCK_OPT_NAMESPACE
,
1602 static BlockDriver bdrv_nvme
= {
1603 .format_name
= "nvme",
1604 .protocol_name
= "nvme",
1605 .instance_size
= sizeof(BDRVNVMeState
),
1607 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
1608 .create_opts
= &bdrv_create_opts_simple
,
1610 .bdrv_parse_filename
= nvme_parse_filename
,
1611 .bdrv_file_open
= nvme_file_open
,
1612 .bdrv_close
= nvme_close
,
1613 .bdrv_getlength
= nvme_getlength
,
1614 .bdrv_probe_blocksizes
= nvme_probe_blocksizes
,
1615 .bdrv_co_truncate
= nvme_co_truncate
,
1617 .bdrv_co_preadv
= nvme_co_preadv
,
1618 .bdrv_co_pwritev
= nvme_co_pwritev
,
1620 .bdrv_co_pwrite_zeroes
= nvme_co_pwrite_zeroes
,
1621 .bdrv_co_pdiscard
= nvme_co_pdiscard
,
1623 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1624 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1626 .bdrv_refresh_filename
= nvme_refresh_filename
,
1627 .bdrv_refresh_limits
= nvme_refresh_limits
,
1628 .strong_runtime_opts
= nvme_strong_runtime_opts
,
1629 .bdrv_get_specific_stats
= nvme_get_specific_stats
,
1631 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1632 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1634 .bdrv_io_plug
= nvme_aio_plug
,
1635 .bdrv_io_unplug
= nvme_aio_unplug
,
1637 .bdrv_register_buf
= nvme_register_buf
,
1638 .bdrv_unregister_buf
= nvme_unregister_buf
,
1641 static void bdrv_nvme_init(void)
1643 bdrv_register(&bdrv_nvme
);
1646 block_init(bdrv_nvme_init
);