2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "qemu/cutils.h"
23 #include "qemu/option.h"
24 #include "qemu/vfio-helpers.h"
25 #include "block/block_int.h"
26 #include "sysemu/replay.h"
29 #include "block/nvme.h"
31 #define NVME_SQ_ENTRY_BYTES 64
32 #define NVME_CQ_ENTRY_BYTES 16
33 #define NVME_QUEUE_SIZE 128
34 #define NVME_DOORBELL_SIZE 4096
37 * We have to leave one slot empty as that is the full queue case where
40 #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
42 typedef struct BDRVNVMeState BDRVNVMeState
;
44 /* Same index is used for queues and IRQs */
46 #define INDEX_IO(n) (1 + n)
48 /* This driver shares a single MSIX IRQ for the admin and I/O queues */
50 MSIX_SHARED_IRQ_IDX
= 0,
58 /* Hardware MMIO register */
59 volatile uint32_t *doorbell
;
63 BlockCompletionFunc
*cb
;
67 uint64_t prp_list_iova
;
68 int free_req_next
; /* q->reqs[] index of next free req */
74 /* Read from I/O code path, initialized under BQL */
78 /* Fields protected by BQL */
79 uint8_t *prp_list_pages
;
81 /* Fields protected by @lock */
82 CoQueue free_req_queue
;
86 NVMeRequest reqs
[NVME_NUM_REQS
];
90 /* Thread-safe, no lock necessary */
91 QEMUBH
*completion_bh
;
94 struct BDRVNVMeState
{
95 AioContext
*aio_context
;
98 /* Memory mapped registers */
103 /* The submission/completion queue pairs.
107 NVMeQueuePair
**queues
;
108 unsigned queue_count
;
110 /* How many uint32_t elements does each doorbell entry take. */
111 size_t doorbell_scale
;
112 bool write_cache_supported
;
113 EventNotifier irq_notifier
[MSIX_IRQ_COUNT
];
115 uint64_t nsze
; /* Namespace size reported by identify command */
116 int nsid
; /* The namespace id to read/write data. */
119 uint64_t max_transfer
;
122 bool supports_write_zeroes
;
123 bool supports_discard
;
125 CoMutex dma_map_lock
;
126 CoQueue dma_flush_queue
;
128 /* Total size of mapped qiov, accessed under dma_map_lock */
131 /* PCI address (required for nvme_refresh_filename()) */
135 uint64_t completion_errors
;
136 uint64_t aligned_accesses
;
137 uint64_t unaligned_accesses
;
141 #define NVME_BLOCK_OPT_DEVICE "device"
142 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
144 static void nvme_process_completion_bh(void *opaque
);
146 static QemuOptsList runtime_opts
= {
148 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
151 .name
= NVME_BLOCK_OPT_DEVICE
,
152 .type
= QEMU_OPT_STRING
,
153 .help
= "NVMe PCI device address",
156 .name
= NVME_BLOCK_OPT_NAMESPACE
,
157 .type
= QEMU_OPT_NUMBER
,
158 .help
= "NVMe namespace",
160 { /* end of list */ }
164 /* Returns true on success, false on failure. */
165 static bool nvme_init_queue(BDRVNVMeState
*s
, NVMeQueue
*q
,
166 unsigned nentries
, size_t entry_bytes
, Error
**errp
)
171 bytes
= ROUND_UP(nentries
* entry_bytes
, qemu_real_host_page_size
);
172 q
->head
= q
->tail
= 0;
173 q
->queue
= qemu_try_memalign(qemu_real_host_page_size
, bytes
);
175 error_setg(errp
, "Cannot allocate queue");
178 memset(q
->queue
, 0, bytes
);
179 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
, errp
);
181 error_prepend(errp
, "Cannot map queue: ");
186 static void nvme_free_queue(NVMeQueue
*q
)
188 qemu_vfree(q
->queue
);
191 static void nvme_free_queue_pair(NVMeQueuePair
*q
)
193 trace_nvme_free_queue_pair(q
->index
, q
, &q
->cq
, &q
->sq
);
194 if (q
->completion_bh
) {
195 qemu_bh_delete(q
->completion_bh
);
197 nvme_free_queue(&q
->sq
);
198 nvme_free_queue(&q
->cq
);
199 qemu_vfree(q
->prp_list_pages
);
200 qemu_mutex_destroy(&q
->lock
);
204 static void nvme_free_req_queue_cb(void *opaque
)
206 NVMeQueuePair
*q
= opaque
;
208 qemu_mutex_lock(&q
->lock
);
209 while (qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
210 /* Retry all pending requests */
212 qemu_mutex_unlock(&q
->lock
);
215 static NVMeQueuePair
*nvme_create_queue_pair(BDRVNVMeState
*s
,
216 AioContext
*aio_context
,
217 unsigned idx
, size_t size
,
222 uint64_t prp_list_iova
;
225 q
= g_try_new0(NVMeQueuePair
, 1);
227 error_setg(errp
, "Cannot allocate queue pair");
230 trace_nvme_create_queue_pair(idx
, q
, size
, aio_context
,
231 event_notifier_get_fd(s
->irq_notifier
));
232 bytes
= QEMU_ALIGN_UP(s
->page_size
* NVME_NUM_REQS
,
233 qemu_real_host_page_size
);
234 q
->prp_list_pages
= qemu_try_memalign(qemu_real_host_page_size
, bytes
);
235 if (!q
->prp_list_pages
) {
236 error_setg(errp
, "Cannot allocate PRP page list");
239 memset(q
->prp_list_pages
, 0, bytes
);
240 qemu_mutex_init(&q
->lock
);
243 qemu_co_queue_init(&q
->free_req_queue
);
244 q
->completion_bh
= aio_bh_new(aio_context
, nvme_process_completion_bh
, q
);
245 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
, bytes
,
246 false, &prp_list_iova
, errp
);
248 error_prepend(errp
, "Cannot map buffer for DMA: ");
251 q
->free_req_head
= -1;
252 for (i
= 0; i
< NVME_NUM_REQS
; i
++) {
253 NVMeRequest
*req
= &q
->reqs
[i
];
255 req
->free_req_next
= q
->free_req_head
;
256 q
->free_req_head
= i
;
257 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
258 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
261 if (!nvme_init_queue(s
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, errp
)) {
264 q
->sq
.doorbell
= &s
->doorbells
[idx
* s
->doorbell_scale
].sq_tail
;
266 if (!nvme_init_queue(s
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, errp
)) {
269 q
->cq
.doorbell
= &s
->doorbells
[idx
* s
->doorbell_scale
].cq_head
;
273 nvme_free_queue_pair(q
);
278 static void nvme_kick(NVMeQueuePair
*q
)
280 BDRVNVMeState
*s
= q
->s
;
282 if (s
->plugged
|| !q
->need_kick
) {
285 trace_nvme_kick(s
, q
->index
);
286 assert(!(q
->sq
.tail
& 0xFF00));
287 /* Fence the write to submission queue entry before notifying the device. */
289 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
290 q
->inflight
+= q
->need_kick
;
294 /* Find a free request element if any, otherwise:
295 * a) if in coroutine context, try to wait for one to become available;
296 * b) if not in coroutine, return NULL;
298 static NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
302 qemu_mutex_lock(&q
->lock
);
304 while (q
->free_req_head
== -1) {
305 if (qemu_in_coroutine()) {
306 trace_nvme_free_req_queue_wait(q
->s
, q
->index
);
307 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
309 qemu_mutex_unlock(&q
->lock
);
314 req
= &q
->reqs
[q
->free_req_head
];
315 q
->free_req_head
= req
->free_req_next
;
316 req
->free_req_next
= -1;
318 qemu_mutex_unlock(&q
->lock
);
323 static void nvme_put_free_req_locked(NVMeQueuePair
*q
, NVMeRequest
*req
)
325 req
->free_req_next
= q
->free_req_head
;
326 q
->free_req_head
= req
- q
->reqs
;
330 static void nvme_wake_free_req_locked(NVMeQueuePair
*q
)
332 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
333 replay_bh_schedule_oneshot_event(q
->s
->aio_context
,
334 nvme_free_req_queue_cb
, q
);
338 /* Insert a request in the freelist and wake waiters */
339 static void nvme_put_free_req_and_wake(NVMeQueuePair
*q
, NVMeRequest
*req
)
341 qemu_mutex_lock(&q
->lock
);
342 nvme_put_free_req_locked(q
, req
);
343 nvme_wake_free_req_locked(q
);
344 qemu_mutex_unlock(&q
->lock
);
347 static inline int nvme_translate_error(const NvmeCqe
*c
)
349 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
351 trace_nvme_error(le32_to_cpu(c
->result
),
352 le16_to_cpu(c
->sq_head
),
353 le16_to_cpu(c
->sq_id
),
355 le16_to_cpu(status
));
370 static bool nvme_process_completion(NVMeQueuePair
*q
)
372 BDRVNVMeState
*s
= q
->s
;
373 bool progress
= false;
378 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
380 trace_nvme_process_completion_queue_plugged(s
, q
->index
);
385 * Support re-entrancy when a request cb() function invokes aio_poll().
386 * Pending completions must be visible to aio_poll() so that a cb()
387 * function can wait for the completion of another request.
389 * The aio_poll() loop will execute our BH and we'll resume completion
392 qemu_bh_schedule(q
->completion_bh
);
394 assert(q
->inflight
>= 0);
395 while (q
->inflight
) {
399 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
400 if ((le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
403 ret
= nvme_translate_error(c
);
405 s
->stats
.completion_errors
++;
407 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
409 q
->cq_phase
= !q
->cq_phase
;
411 cid
= le16_to_cpu(c
->cid
);
412 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
413 warn_report("NVMe: Unexpected CID in completion queue: %"PRIu32
", "
414 "queue size: %u", cid
, NVME_QUEUE_SIZE
);
417 trace_nvme_complete_command(s
, q
->index
, cid
);
418 preq
= &q
->reqs
[cid
- 1];
420 assert(req
.cid
== cid
);
422 nvme_put_free_req_locked(q
, preq
);
423 preq
->cb
= preq
->opaque
= NULL
;
425 qemu_mutex_unlock(&q
->lock
);
426 req
.cb(req
.opaque
, ret
);
427 qemu_mutex_lock(&q
->lock
);
431 /* Notify the device so it can post more completions. */
433 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
434 nvme_wake_free_req_locked(q
);
437 qemu_bh_cancel(q
->completion_bh
);
442 static void nvme_process_completion_bh(void *opaque
)
444 NVMeQueuePair
*q
= opaque
;
447 * We're being invoked because a nvme_process_completion() cb() function
448 * called aio_poll(). The callback may be waiting for further completions
449 * so notify the device that it has space to fill in more completions now.
452 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
453 nvme_wake_free_req_locked(q
);
455 nvme_process_completion(q
);
458 static void nvme_trace_command(const NvmeCmd
*cmd
)
462 if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW
)) {
465 for (i
= 0; i
< 8; ++i
) {
466 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
467 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
468 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
472 static void nvme_submit_command(NVMeQueuePair
*q
, NVMeRequest
*req
,
473 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
478 req
->opaque
= opaque
;
479 cmd
->cid
= cpu_to_le16(req
->cid
);
481 trace_nvme_submit_command(q
->s
, q
->index
, req
->cid
);
482 nvme_trace_command(cmd
);
483 qemu_mutex_lock(&q
->lock
);
484 memcpy((uint8_t *)q
->sq
.queue
+
485 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
486 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
489 nvme_process_completion(q
);
490 qemu_mutex_unlock(&q
->lock
);
493 static void nvme_admin_cmd_sync_cb(void *opaque
, int ret
)
500 static int nvme_admin_cmd_sync(BlockDriverState
*bs
, NvmeCmd
*cmd
)
502 BDRVNVMeState
*s
= bs
->opaque
;
503 NVMeQueuePair
*q
= s
->queues
[INDEX_ADMIN
];
504 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
506 int ret
= -EINPROGRESS
;
507 req
= nvme_get_free_req(q
);
511 nvme_submit_command(q
, req
, cmd
, nvme_admin_cmd_sync_cb
, &ret
);
513 AIO_WAIT_WHILE(aio_context
, ret
== -EINPROGRESS
);
517 /* Returns true on success, false on failure. */
518 static bool nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
520 BDRVNVMeState
*s
= bs
->opaque
;
522 QEMU_AUTO_VFREE
union {
531 .opcode
= NVME_ADM_CMD_IDENTIFY
,
532 .cdw10
= cpu_to_le32(0x1),
534 size_t id_size
= QEMU_ALIGN_UP(sizeof(*id
), qemu_real_host_page_size
);
536 id
= qemu_try_memalign(qemu_real_host_page_size
, id_size
);
538 error_setg(errp
, "Cannot allocate buffer for identify response");
541 r
= qemu_vfio_dma_map(s
->vfio
, id
, id_size
, true, &iova
, errp
);
543 error_prepend(errp
, "Cannot map buffer for DMA: ");
547 memset(id
, 0, id_size
);
548 cmd
.dptr
.prp1
= cpu_to_le64(iova
);
549 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
550 error_setg(errp
, "Failed to identify controller");
554 if (le32_to_cpu(id
->ctrl
.nn
) < namespace) {
555 error_setg(errp
, "Invalid namespace");
558 s
->write_cache_supported
= le32_to_cpu(id
->ctrl
.vwc
) & 0x1;
559 s
->max_transfer
= (id
->ctrl
.mdts
? 1 << id
->ctrl
.mdts
: 0) * s
->page_size
;
560 /* For now the page list buffer per command is one page, to hold at most
561 * s->page_size / sizeof(uint64_t) entries. */
562 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
563 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
565 oncs
= le16_to_cpu(id
->ctrl
.oncs
);
566 s
->supports_write_zeroes
= !!(oncs
& NVME_ONCS_WRITE_ZEROES
);
567 s
->supports_discard
= !!(oncs
& NVME_ONCS_DSM
);
569 memset(id
, 0, id_size
);
571 cmd
.nsid
= cpu_to_le32(namespace);
572 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
573 error_setg(errp
, "Failed to identify namespace");
577 s
->nsze
= le64_to_cpu(id
->ns
.nsze
);
578 lbaf
= &id
->ns
.lbaf
[NVME_ID_NS_FLBAS_INDEX(id
->ns
.flbas
)];
580 if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id
->ns
.dlfeat
) &&
581 NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id
->ns
.dlfeat
) ==
582 NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES
) {
583 bs
->supported_write_flags
|= BDRV_REQ_MAY_UNMAP
;
587 error_setg(errp
, "Namespaces with metadata are not yet supported");
591 if (lbaf
->ds
< BDRV_SECTOR_BITS
|| lbaf
->ds
> 12 ||
592 (1 << lbaf
->ds
) > s
->page_size
)
594 error_setg(errp
, "Namespace has unsupported block size (2^%d)",
600 s
->blkshift
= lbaf
->ds
;
602 qemu_vfio_dma_unmap(s
->vfio
, id
);
607 static bool nvme_poll_queue(NVMeQueuePair
*q
)
609 bool progress
= false;
611 const size_t cqe_offset
= q
->cq
.head
* NVME_CQ_ENTRY_BYTES
;
612 NvmeCqe
*cqe
= (NvmeCqe
*)&q
->cq
.queue
[cqe_offset
];
614 trace_nvme_poll_queue(q
->s
, q
->index
);
616 * Do an early check for completions. q->lock isn't needed because
617 * nvme_process_completion() only runs in the event loop thread and
618 * cannot race with itself.
620 if ((le16_to_cpu(cqe
->status
) & 0x1) == q
->cq_phase
) {
624 qemu_mutex_lock(&q
->lock
);
625 while (nvme_process_completion(q
)) {
629 qemu_mutex_unlock(&q
->lock
);
634 static bool nvme_poll_queues(BDRVNVMeState
*s
)
636 bool progress
= false;
639 for (i
= 0; i
< s
->queue_count
; i
++) {
640 if (nvme_poll_queue(s
->queues
[i
])) {
647 static void nvme_handle_event(EventNotifier
*n
)
649 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
,
650 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
652 trace_nvme_handle_event(s
);
653 event_notifier_test_and_clear(n
);
657 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
659 BDRVNVMeState
*s
= bs
->opaque
;
660 unsigned n
= s
->queue_count
;
663 unsigned queue_size
= NVME_QUEUE_SIZE
;
665 assert(n
<= UINT16_MAX
);
666 q
= nvme_create_queue_pair(s
, bdrv_get_aio_context(bs
),
667 n
, queue_size
, errp
);
672 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
673 .dptr
.prp1
= cpu_to_le64(q
->cq
.iova
),
674 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | n
),
675 .cdw11
= cpu_to_le32(NVME_CQ_IEN
| NVME_CQ_PC
),
677 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
678 error_setg(errp
, "Failed to create CQ io queue [%u]", n
);
682 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
683 .dptr
.prp1
= cpu_to_le64(q
->sq
.iova
),
684 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | n
),
685 .cdw11
= cpu_to_le32(NVME_SQ_PC
| (n
<< 16)),
687 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
688 error_setg(errp
, "Failed to create SQ io queue [%u]", n
);
691 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
696 nvme_free_queue_pair(q
);
700 static bool nvme_poll_cb(void *opaque
)
702 EventNotifier
*e
= opaque
;
703 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
,
704 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
706 return nvme_poll_queues(s
);
709 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
712 BDRVNVMeState
*s
= bs
->opaque
;
714 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
719 uint64_t deadline
, now
;
720 volatile NvmeBar
*regs
= NULL
;
722 qemu_co_mutex_init(&s
->dma_map_lock
);
723 qemu_co_queue_init(&s
->dma_flush_queue
);
724 s
->device
= g_strdup(device
);
726 s
->aio_context
= bdrv_get_aio_context(bs
);
727 ret
= event_notifier_init(&s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
], 0);
729 error_setg(errp
, "Failed to init event notifier");
733 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
739 regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, sizeof(NvmeBar
),
740 PROT_READ
| PROT_WRITE
, errp
);
745 /* Perform initialize sequence as described in NVMe spec "7.6.1
746 * Initialization". */
748 cap
= le64_to_cpu(regs
->cap
);
749 trace_nvme_controller_capability_raw(cap
);
750 trace_nvme_controller_capability("Maximum Queue Entries Supported",
751 1 + NVME_CAP_MQES(cap
));
752 trace_nvme_controller_capability("Contiguous Queues Required",
754 trace_nvme_controller_capability("Doorbell Stride",
755 1 << (2 + NVME_CAP_DSTRD(cap
)));
756 trace_nvme_controller_capability("Subsystem Reset Supported",
757 NVME_CAP_NSSRS(cap
));
758 trace_nvme_controller_capability("Memory Page Size Minimum",
759 1 << (12 + NVME_CAP_MPSMIN(cap
)));
760 trace_nvme_controller_capability("Memory Page Size Maximum",
761 1 << (12 + NVME_CAP_MPSMAX(cap
)));
762 if (!NVME_CAP_CSS(cap
)) {
763 error_setg(errp
, "Device doesn't support NVMe command set");
768 s
->page_size
= 1u << (12 + NVME_CAP_MPSMIN(cap
));
769 s
->doorbell_scale
= (4 << NVME_CAP_DSTRD(cap
)) / sizeof(uint32_t);
770 bs
->bl
.opt_mem_alignment
= s
->page_size
;
771 bs
->bl
.request_alignment
= s
->page_size
;
772 timeout_ms
= MIN(500 * NVME_CAP_TO(cap
), 30000);
774 ver
= le32_to_cpu(regs
->vs
);
775 trace_nvme_controller_spec_version(extract32(ver
, 16, 16),
776 extract32(ver
, 8, 8),
777 extract32(ver
, 0, 8));
779 /* Reset device to get a clean state. */
780 regs
->cc
= cpu_to_le32(le32_to_cpu(regs
->cc
) & 0xFE);
781 /* Wait for CSTS.RDY = 0. */
782 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* SCALE_MS
;
783 while (NVME_CSTS_RDY(le32_to_cpu(regs
->csts
))) {
784 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
785 error_setg(errp
, "Timeout while waiting for device to reset (%"
793 s
->bar0_wo_map
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0,
794 sizeof(NvmeBar
) + NVME_DOORBELL_SIZE
,
796 s
->doorbells
= (void *)((uintptr_t)s
->bar0_wo_map
+ sizeof(NvmeBar
));
802 /* Set up admin queue. */
803 s
->queues
= g_new(NVMeQueuePair
*, 1);
804 q
= nvme_create_queue_pair(s
, aio_context
, 0, NVME_QUEUE_SIZE
, errp
);
809 s
->queues
[INDEX_ADMIN
] = q
;
811 QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE
- 1) & 0xF000);
812 regs
->aqa
= cpu_to_le32(((NVME_QUEUE_SIZE
- 1) << AQA_ACQS_SHIFT
) |
813 ((NVME_QUEUE_SIZE
- 1) << AQA_ASQS_SHIFT
));
814 regs
->asq
= cpu_to_le64(q
->sq
.iova
);
815 regs
->acq
= cpu_to_le64(q
->cq
.iova
);
817 /* After setting up all control registers we can enable device now. */
818 regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << CC_IOCQES_SHIFT
) |
819 (ctz32(NVME_SQ_ENTRY_BYTES
) << CC_IOSQES_SHIFT
) |
821 /* Wait for CSTS.RDY = 1. */
822 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
823 deadline
= now
+ timeout_ms
* SCALE_MS
;
824 while (!NVME_CSTS_RDY(le32_to_cpu(regs
->csts
))) {
825 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
826 error_setg(errp
, "Timeout while waiting for device to start (%"
834 ret
= qemu_vfio_pci_init_irq(s
->vfio
, s
->irq_notifier
,
835 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
839 aio_set_event_notifier(bdrv_get_aio_context(bs
),
840 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
841 false, nvme_handle_event
, nvme_poll_cb
);
843 if (!nvme_identify(bs
, namespace, errp
)) {
848 /* Set up command queues. */
849 if (!nvme_add_io_queue(bs
, errp
)) {
854 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)regs
, 0, sizeof(NvmeBar
));
857 /* Cleaning up is done in nvme_file_open() upon error. */
861 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
863 * nvme://0000:44:00.0/1
865 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
866 * is the PCI address, and the last part is the namespace number starting from
867 * 1 according to the NVMe spec. */
868 static void nvme_parse_filename(const char *filename
, QDict
*options
,
871 int pref
= strlen("nvme://");
873 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
874 const char *tmp
= filename
+ pref
;
876 const char *namespace;
878 const char *slash
= strchr(tmp
, '/');
880 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, tmp
);
883 device
= g_strndup(tmp
, slash
- tmp
);
884 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, device
);
886 namespace = slash
+ 1;
887 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
888 error_setg(errp
, "Invalid namespace '%s', positive number expected",
892 qdict_put_str(options
, NVME_BLOCK_OPT_NAMESPACE
,
893 *namespace ? namespace : "1");
897 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
901 BDRVNVMeState
*s
= bs
->opaque
;
903 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
904 .nsid
= cpu_to_le32(s
->nsid
),
905 .cdw10
= cpu_to_le32(0x06),
906 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
909 ret
= nvme_admin_cmd_sync(bs
, &cmd
);
911 error_setg(errp
, "Failed to configure NVMe write cache");
916 static void nvme_close(BlockDriverState
*bs
)
918 BDRVNVMeState
*s
= bs
->opaque
;
920 for (unsigned i
= 0; i
< s
->queue_count
; ++i
) {
921 nvme_free_queue_pair(s
->queues
[i
]);
924 aio_set_event_notifier(bdrv_get_aio_context(bs
),
925 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
927 event_notifier_cleanup(&s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
928 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, s
->bar0_wo_map
,
929 0, sizeof(NvmeBar
) + NVME_DOORBELL_SIZE
);
930 qemu_vfio_close(s
->vfio
);
935 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
942 BDRVNVMeState
*s
= bs
->opaque
;
944 bs
->supported_write_flags
= BDRV_REQ_FUA
;
946 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
947 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
948 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
950 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
955 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
956 ret
= nvme_init(bs
, device
, namespace, errp
);
961 if (flags
& BDRV_O_NOCACHE
) {
962 if (!s
->write_cache_supported
) {
964 "NVMe controller doesn't support write cache configuration");
967 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
980 static int64_t nvme_getlength(BlockDriverState
*bs
)
982 BDRVNVMeState
*s
= bs
->opaque
;
983 return s
->nsze
<< s
->blkshift
;
986 static uint32_t nvme_get_blocksize(BlockDriverState
*bs
)
988 BDRVNVMeState
*s
= bs
->opaque
;
989 assert(s
->blkshift
>= BDRV_SECTOR_BITS
&& s
->blkshift
<= 12);
990 return UINT32_C(1) << s
->blkshift
;
993 static int nvme_probe_blocksizes(BlockDriverState
*bs
, BlockSizes
*bsz
)
995 uint32_t blocksize
= nvme_get_blocksize(bs
);
996 bsz
->phys
= blocksize
;
997 bsz
->log
= blocksize
;
1001 /* Called with s->dma_map_lock */
1002 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
1006 BDRVNVMeState
*s
= bs
->opaque
;
1008 s
->dma_map_count
-= qiov
->size
;
1009 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
1010 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
1012 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
1018 /* Called with s->dma_map_lock */
1019 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
1020 NVMeRequest
*req
, QEMUIOVector
*qiov
)
1022 BDRVNVMeState
*s
= bs
->opaque
;
1023 uint64_t *pagelist
= req
->prp_list_page
;
1026 Error
*local_err
= NULL
, **errp
= NULL
;
1029 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
1030 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
1031 for (i
= 0; i
< qiov
->niov
; ++i
) {
1034 size_t len
= QEMU_ALIGN_UP(qiov
->iov
[i
].iov_len
,
1035 qemu_real_host_page_size
);
1037 r
= qemu_vfio_dma_map(s
->vfio
,
1038 qiov
->iov
[i
].iov_base
,
1039 len
, true, &iova
, errp
);
1042 * In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
1043 * ioctl returns -ENOSPC to signal the user exhausted the DMA
1044 * mappings available for a container since Linux kernel commit
1045 * 492855939bdb ("vfio/type1: Limit DMA mappings per container",
1046 * April 2019, see CVE-2019-3882).
1048 * This block driver already handles this error path by checking
1049 * for the -ENOMEM error, so we directly replace -ENOSPC by
1050 * -ENOMEM. Beside, -ENOSPC has a specific meaning for blockdev
1051 * coroutines: it triggers BLOCKDEV_ON_ERROR_ENOSPC and
1052 * BLOCK_ERROR_ACTION_STOP which stops the VM, asking the operator
1053 * to add more storage to the blockdev. Not something we can do
1054 * easily with an IOMMU :)
1058 if (r
== -ENOMEM
&& retry
) {
1060 * We exhausted the DMA mappings available for our container:
1061 * recycle the volatile IOVA mappings.
1064 trace_nvme_dma_flush_queue_wait(s
);
1065 if (s
->dma_map_count
) {
1066 trace_nvme_dma_map_flush(s
);
1067 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
1069 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
1082 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
1083 pagelist
[entries
++] = cpu_to_le64(iova
+ j
* s
->page_size
);
1085 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
1086 qiov
->iov
[i
].iov_len
/ s
->page_size
);
1089 s
->dma_map_count
+= qiov
->size
;
1091 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
1096 cmd
->dptr
.prp1
= pagelist
[0];
1100 cmd
->dptr
.prp1
= pagelist
[0];
1101 cmd
->dptr
.prp2
= pagelist
[1];
1104 cmd
->dptr
.prp1
= pagelist
[0];
1105 cmd
->dptr
.prp2
= cpu_to_le64(req
->prp_list_iova
+ sizeof(uint64_t));
1108 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
1109 for (i
= 0; i
< entries
; ++i
) {
1110 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
1114 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
1115 * increment s->dma_map_count. This is okay for fixed mapping memory areas
1116 * because they are already mapped before calling this function; for
1117 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
1118 * calling qemu_vfio_dma_reset_temporary when necessary. */
1120 error_reportf_err(local_err
, "Cannot map buffer for DMA: ");
1131 static void nvme_rw_cb_bh(void *opaque
)
1133 NVMeCoData
*data
= opaque
;
1134 qemu_coroutine_enter(data
->co
);
1137 static void nvme_rw_cb(void *opaque
, int ret
)
1139 NVMeCoData
*data
= opaque
;
1142 /* The rw coroutine hasn't yielded, don't try to enter. */
1145 replay_bh_schedule_oneshot_event(data
->ctx
, nvme_rw_cb_bh
, data
);
1148 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
1149 uint64_t offset
, uint64_t bytes
,
1155 BDRVNVMeState
*s
= bs
->opaque
;
1156 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1159 uint32_t cdw12
= (((bytes
>> s
->blkshift
) - 1) & 0xFFFF) |
1160 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
1162 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
1163 .nsid
= cpu_to_le32(s
->nsid
),
1164 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1165 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1166 .cdw12
= cpu_to_le32(cdw12
),
1169 .ctx
= bdrv_get_aio_context(bs
),
1170 .ret
= -EINPROGRESS
,
1173 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
1174 assert(s
->queue_count
> 1);
1175 req
= nvme_get_free_req(ioq
);
1178 qemu_co_mutex_lock(&s
->dma_map_lock
);
1179 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
1180 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1182 nvme_put_free_req_and_wake(ioq
, req
);
1185 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1187 data
.co
= qemu_coroutine_self();
1188 while (data
.ret
== -EINPROGRESS
) {
1189 qemu_coroutine_yield();
1192 qemu_co_mutex_lock(&s
->dma_map_lock
);
1193 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
1194 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1199 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
1203 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
1204 const QEMUIOVector
*qiov
)
1207 BDRVNVMeState
*s
= bs
->opaque
;
1209 for (i
= 0; i
< qiov
->niov
; ++i
) {
1210 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
,
1211 qemu_real_host_page_size
) ||
1212 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, qemu_real_host_page_size
)) {
1213 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
1214 qiov
->iov
[i
].iov_len
, s
->page_size
);
1221 static int nvme_co_prw(BlockDriverState
*bs
, uint64_t offset
, uint64_t bytes
,
1222 QEMUIOVector
*qiov
, bool is_write
, int flags
)
1224 BDRVNVMeState
*s
= bs
->opaque
;
1226 QEMU_AUTO_VFREE
uint8_t *buf
= NULL
;
1227 QEMUIOVector local_qiov
;
1228 size_t len
= QEMU_ALIGN_UP(bytes
, qemu_real_host_page_size
);
1229 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
1230 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
1231 assert(bytes
<= s
->max_transfer
);
1232 if (nvme_qiov_aligned(bs
, qiov
)) {
1233 s
->stats
.aligned_accesses
++;
1234 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
1236 s
->stats
.unaligned_accesses
++;
1237 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
1238 buf
= qemu_try_memalign(qemu_real_host_page_size
, len
);
1243 qemu_iovec_init(&local_qiov
, 1);
1245 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1247 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1248 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1249 qemu_iovec_destroy(&local_qiov
);
1250 if (!r
&& !is_write
) {
1251 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1256 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1257 int64_t offset
, int64_t bytes
,
1259 BdrvRequestFlags flags
)
1261 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1264 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1265 int64_t offset
, int64_t bytes
,
1267 BdrvRequestFlags flags
)
1269 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1272 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1274 BDRVNVMeState
*s
= bs
->opaque
;
1275 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1278 .opcode
= NVME_CMD_FLUSH
,
1279 .nsid
= cpu_to_le32(s
->nsid
),
1282 .ctx
= bdrv_get_aio_context(bs
),
1283 .ret
= -EINPROGRESS
,
1286 assert(s
->queue_count
> 1);
1287 req
= nvme_get_free_req(ioq
);
1289 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1291 data
.co
= qemu_coroutine_self();
1292 if (data
.ret
== -EINPROGRESS
) {
1293 qemu_coroutine_yield();
1300 static coroutine_fn
int nvme_co_pwrite_zeroes(BlockDriverState
*bs
,
1303 BdrvRequestFlags flags
)
1305 BDRVNVMeState
*s
= bs
->opaque
;
1306 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1310 if (!s
->supports_write_zeroes
) {
1318 cdw12
= ((bytes
>> s
->blkshift
) - 1) & 0xFFFF;
1320 * We should not lose information. pwrite_zeroes_alignment and
1321 * max_pwrite_zeroes guarantees it.
1323 assert(((cdw12
+ 1) << s
->blkshift
) == bytes
);
1326 .opcode
= NVME_CMD_WRITE_ZEROES
,
1327 .nsid
= cpu_to_le32(s
->nsid
),
1328 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1329 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1333 .ctx
= bdrv_get_aio_context(bs
),
1334 .ret
= -EINPROGRESS
,
1337 if (flags
& BDRV_REQ_MAY_UNMAP
) {
1341 if (flags
& BDRV_REQ_FUA
) {
1345 cmd
.cdw12
= cpu_to_le32(cdw12
);
1347 trace_nvme_write_zeroes(s
, offset
, bytes
, flags
);
1348 assert(s
->queue_count
> 1);
1349 req
= nvme_get_free_req(ioq
);
1352 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1354 data
.co
= qemu_coroutine_self();
1355 while (data
.ret
== -EINPROGRESS
) {
1356 qemu_coroutine_yield();
1359 trace_nvme_rw_done(s
, true, offset
, bytes
, data
.ret
);
1364 static int coroutine_fn
nvme_co_pdiscard(BlockDriverState
*bs
,
1368 BDRVNVMeState
*s
= bs
->opaque
;
1369 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1371 QEMU_AUTO_VFREE NvmeDsmRange
*buf
= NULL
;
1372 QEMUIOVector local_qiov
;
1376 .opcode
= NVME_CMD_DSM
,
1377 .nsid
= cpu_to_le32(s
->nsid
),
1378 .cdw10
= cpu_to_le32(0), /*number of ranges - 0 based*/
1379 .cdw11
= cpu_to_le32(1 << 2), /*deallocate bit*/
1383 .ctx
= bdrv_get_aio_context(bs
),
1384 .ret
= -EINPROGRESS
,
1387 if (!s
->supports_discard
) {
1391 assert(s
->queue_count
> 1);
1394 * Filling the @buf requires @offset and @bytes to satisfy restrictions
1395 * defined in nvme_refresh_limits().
1397 assert(QEMU_IS_ALIGNED(bytes
, 1UL << s
->blkshift
));
1398 assert(QEMU_IS_ALIGNED(offset
, 1UL << s
->blkshift
));
1399 assert((bytes
>> s
->blkshift
) <= UINT32_MAX
);
1401 buf
= qemu_try_memalign(s
->page_size
, s
->page_size
);
1405 memset(buf
, 0, s
->page_size
);
1406 buf
->nlb
= cpu_to_le32(bytes
>> s
->blkshift
);
1407 buf
->slba
= cpu_to_le64(offset
>> s
->blkshift
);
1410 qemu_iovec_init(&local_qiov
, 1);
1411 qemu_iovec_add(&local_qiov
, buf
, 4096);
1413 req
= nvme_get_free_req(ioq
);
1416 qemu_co_mutex_lock(&s
->dma_map_lock
);
1417 ret
= nvme_cmd_map_qiov(bs
, &cmd
, req
, &local_qiov
);
1418 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1421 nvme_put_free_req_and_wake(ioq
, req
);
1425 trace_nvme_dsm(s
, offset
, bytes
);
1427 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1429 data
.co
= qemu_coroutine_self();
1430 while (data
.ret
== -EINPROGRESS
) {
1431 qemu_coroutine_yield();
1434 qemu_co_mutex_lock(&s
->dma_map_lock
);
1435 ret
= nvme_cmd_unmap_qiov(bs
, &local_qiov
);
1436 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1443 trace_nvme_dsm_done(s
, offset
, bytes
, ret
);
1445 qemu_iovec_destroy(&local_qiov
);
1450 static int coroutine_fn
nvme_co_truncate(BlockDriverState
*bs
, int64_t offset
,
1451 bool exact
, PreallocMode prealloc
,
1452 BdrvRequestFlags flags
, Error
**errp
)
1456 if (prealloc
!= PREALLOC_MODE_OFF
) {
1457 error_setg(errp
, "Unsupported preallocation mode '%s'",
1458 PreallocMode_str(prealloc
));
1462 cur_length
= nvme_getlength(bs
);
1463 if (offset
!= cur_length
&& exact
) {
1464 error_setg(errp
, "Cannot resize NVMe devices");
1466 } else if (offset
> cur_length
) {
1467 error_setg(errp
, "Cannot grow NVMe devices");
1474 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1475 BlockReopenQueue
*queue
, Error
**errp
)
1480 static void nvme_refresh_filename(BlockDriverState
*bs
)
1482 BDRVNVMeState
*s
= bs
->opaque
;
1484 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "nvme://%s/%i",
1485 s
->device
, s
->nsid
);
1488 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1490 BDRVNVMeState
*s
= bs
->opaque
;
1492 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1493 bs
->bl
.request_alignment
= s
->page_size
;
1494 bs
->bl
.max_transfer
= s
->max_transfer
;
1497 * Look at nvme_co_pwrite_zeroes: after shift and decrement we should get
1500 bs
->bl
.max_pwrite_zeroes
= 1ULL << (s
->blkshift
+ 16);
1501 bs
->bl
.pwrite_zeroes_alignment
= MAX(bs
->bl
.request_alignment
,
1502 1UL << s
->blkshift
);
1504 bs
->bl
.max_pdiscard
= (uint64_t)UINT32_MAX
<< s
->blkshift
;
1505 bs
->bl
.pdiscard_alignment
= MAX(bs
->bl
.request_alignment
,
1506 1UL << s
->blkshift
);
1509 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1511 BDRVNVMeState
*s
= bs
->opaque
;
1513 for (unsigned i
= 0; i
< s
->queue_count
; i
++) {
1514 NVMeQueuePair
*q
= s
->queues
[i
];
1516 qemu_bh_delete(q
->completion_bh
);
1517 q
->completion_bh
= NULL
;
1520 aio_set_event_notifier(bdrv_get_aio_context(bs
),
1521 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
1525 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1526 AioContext
*new_context
)
1528 BDRVNVMeState
*s
= bs
->opaque
;
1530 s
->aio_context
= new_context
;
1531 aio_set_event_notifier(new_context
, &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
1532 false, nvme_handle_event
, nvme_poll_cb
);
1534 for (unsigned i
= 0; i
< s
->queue_count
; i
++) {
1535 NVMeQueuePair
*q
= s
->queues
[i
];
1538 aio_bh_new(new_context
, nvme_process_completion_bh
, q
);
1542 static void nvme_aio_plug(BlockDriverState
*bs
)
1544 BDRVNVMeState
*s
= bs
->opaque
;
1545 assert(!s
->plugged
);
1549 static void nvme_aio_unplug(BlockDriverState
*bs
)
1551 BDRVNVMeState
*s
= bs
->opaque
;
1554 for (unsigned i
= INDEX_IO(0); i
< s
->queue_count
; i
++) {
1555 NVMeQueuePair
*q
= s
->queues
[i
];
1556 qemu_mutex_lock(&q
->lock
);
1558 nvme_process_completion(q
);
1559 qemu_mutex_unlock(&q
->lock
);
1563 static void nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1566 Error
*local_err
= NULL
;
1567 BDRVNVMeState
*s
= bs
->opaque
;
1569 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
, &local_err
);
1571 /* FIXME: we may run out of IOVA addresses after repeated
1572 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1573 * doesn't reclaim addresses for fixed mappings. */
1574 error_reportf_err(local_err
, "nvme_register_buf failed: ");
1578 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
)
1580 BDRVNVMeState
*s
= bs
->opaque
;
1582 qemu_vfio_dma_unmap(s
->vfio
, host
);
1585 static BlockStatsSpecific
*nvme_get_specific_stats(BlockDriverState
*bs
)
1587 BlockStatsSpecific
*stats
= g_new(BlockStatsSpecific
, 1);
1588 BDRVNVMeState
*s
= bs
->opaque
;
1590 stats
->driver
= BLOCKDEV_DRIVER_NVME
;
1591 stats
->u
.nvme
= (BlockStatsSpecificNvme
) {
1592 .completion_errors
= s
->stats
.completion_errors
,
1593 .aligned_accesses
= s
->stats
.aligned_accesses
,
1594 .unaligned_accesses
= s
->stats
.unaligned_accesses
,
1600 static const char *const nvme_strong_runtime_opts
[] = {
1601 NVME_BLOCK_OPT_DEVICE
,
1602 NVME_BLOCK_OPT_NAMESPACE
,
1607 static BlockDriver bdrv_nvme
= {
1608 .format_name
= "nvme",
1609 .protocol_name
= "nvme",
1610 .instance_size
= sizeof(BDRVNVMeState
),
1612 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
1613 .create_opts
= &bdrv_create_opts_simple
,
1615 .bdrv_parse_filename
= nvme_parse_filename
,
1616 .bdrv_file_open
= nvme_file_open
,
1617 .bdrv_close
= nvme_close
,
1618 .bdrv_getlength
= nvme_getlength
,
1619 .bdrv_probe_blocksizes
= nvme_probe_blocksizes
,
1620 .bdrv_co_truncate
= nvme_co_truncate
,
1622 .bdrv_co_preadv
= nvme_co_preadv
,
1623 .bdrv_co_pwritev
= nvme_co_pwritev
,
1625 .bdrv_co_pwrite_zeroes
= nvme_co_pwrite_zeroes
,
1626 .bdrv_co_pdiscard
= nvme_co_pdiscard
,
1628 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1629 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1631 .bdrv_refresh_filename
= nvme_refresh_filename
,
1632 .bdrv_refresh_limits
= nvme_refresh_limits
,
1633 .strong_runtime_opts
= nvme_strong_runtime_opts
,
1634 .bdrv_get_specific_stats
= nvme_get_specific_stats
,
1636 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1637 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1639 .bdrv_io_plug
= nvme_aio_plug
,
1640 .bdrv_io_unplug
= nvme_aio_unplug
,
1642 .bdrv_register_buf
= nvme_register_buf
,
1643 .bdrv_unregister_buf
= nvme_unregister_buf
,
1646 static void bdrv_nvme_init(void)
1648 bdrv_register(&bdrv_nvme
);
1651 block_init(bdrv_nvme_init
);