2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "qemu/cutils.h"
23 #include "qemu/option.h"
24 #include "qemu/vfio-helpers.h"
25 #include "block/block_int.h"
26 #include "sysemu/replay.h"
29 #include "block/nvme.h"
31 #define NVME_SQ_ENTRY_BYTES 64
32 #define NVME_CQ_ENTRY_BYTES 16
33 #define NVME_QUEUE_SIZE 128
34 #define NVME_DOORBELL_SIZE 4096
37 * We have to leave one slot empty as that is the full queue case where
40 #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
42 typedef struct BDRVNVMeState BDRVNVMeState
;
44 /* Same index is used for queues and IRQs */
46 #define INDEX_IO(n) (1 + n)
48 /* This driver shares a single MSIX IRQ for the admin and I/O queues */
50 MSIX_SHARED_IRQ_IDX
= 0,
58 /* Hardware MMIO register */
59 volatile uint32_t *doorbell
;
63 BlockCompletionFunc
*cb
;
67 uint64_t prp_list_iova
;
68 int free_req_next
; /* q->reqs[] index of next free req */
74 /* Read from I/O code path, initialized under BQL */
78 /* Fields protected by BQL */
79 uint8_t *prp_list_pages
;
81 /* Fields protected by @lock */
82 CoQueue free_req_queue
;
86 NVMeRequest reqs
[NVME_NUM_REQS
];
90 /* Thread-safe, no lock necessary */
91 QEMUBH
*completion_bh
;
94 struct BDRVNVMeState
{
95 AioContext
*aio_context
;
97 /* Memory mapped registers */
102 /* The submission/completion queue pairs.
106 NVMeQueuePair
**queues
;
107 unsigned queue_count
;
109 /* How many uint32_t elements does each doorbell entry take. */
110 size_t doorbell_scale
;
111 bool write_cache_supported
;
112 EventNotifier irq_notifier
[MSIX_IRQ_COUNT
];
114 uint64_t nsze
; /* Namespace size reported by identify command */
115 int nsid
; /* The namespace id to read/write data. */
118 uint64_t max_transfer
;
121 bool supports_write_zeroes
;
122 bool supports_discard
;
124 CoMutex dma_map_lock
;
125 CoQueue dma_flush_queue
;
127 /* Total size of mapped qiov, accessed under dma_map_lock */
130 /* PCI address (required for nvme_refresh_filename()) */
134 uint64_t completion_errors
;
135 uint64_t aligned_accesses
;
136 uint64_t unaligned_accesses
;
140 #define NVME_BLOCK_OPT_DEVICE "device"
141 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
143 static void nvme_process_completion_bh(void *opaque
);
145 static QemuOptsList runtime_opts
= {
147 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
150 .name
= NVME_BLOCK_OPT_DEVICE
,
151 .type
= QEMU_OPT_STRING
,
152 .help
= "NVMe PCI device address",
155 .name
= NVME_BLOCK_OPT_NAMESPACE
,
156 .type
= QEMU_OPT_NUMBER
,
157 .help
= "NVMe namespace",
159 { /* end of list */ }
163 /* Returns true on success, false on failure. */
164 static bool nvme_init_queue(BDRVNVMeState
*s
, NVMeQueue
*q
,
165 unsigned nentries
, size_t entry_bytes
, Error
**errp
)
170 bytes
= ROUND_UP(nentries
* entry_bytes
, s
->page_size
);
171 q
->head
= q
->tail
= 0;
172 q
->queue
= qemu_try_memalign(s
->page_size
, bytes
);
174 error_setg(errp
, "Cannot allocate queue");
177 memset(q
->queue
, 0, bytes
);
178 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
);
180 error_setg(errp
, "Cannot map queue");
186 static void nvme_free_queue_pair(NVMeQueuePair
*q
)
188 trace_nvme_free_queue_pair(q
->index
, q
);
189 if (q
->completion_bh
) {
190 qemu_bh_delete(q
->completion_bh
);
192 qemu_vfree(q
->prp_list_pages
);
193 qemu_vfree(q
->sq
.queue
);
194 qemu_vfree(q
->cq
.queue
);
195 qemu_mutex_destroy(&q
->lock
);
199 static void nvme_free_req_queue_cb(void *opaque
)
201 NVMeQueuePair
*q
= opaque
;
203 qemu_mutex_lock(&q
->lock
);
204 while (qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
205 /* Retry all pending requests */
207 qemu_mutex_unlock(&q
->lock
);
210 static NVMeQueuePair
*nvme_create_queue_pair(BDRVNVMeState
*s
,
211 AioContext
*aio_context
,
212 unsigned idx
, size_t size
,
217 uint64_t prp_list_iova
;
219 q
= g_try_new0(NVMeQueuePair
, 1);
223 trace_nvme_create_queue_pair(idx
, q
, size
, aio_context
,
224 event_notifier_get_fd(s
->irq_notifier
));
225 q
->prp_list_pages
= qemu_try_memalign(s
->page_size
,
226 s
->page_size
* NVME_NUM_REQS
);
227 if (!q
->prp_list_pages
) {
230 memset(q
->prp_list_pages
, 0, s
->page_size
* NVME_NUM_REQS
);
231 qemu_mutex_init(&q
->lock
);
234 qemu_co_queue_init(&q
->free_req_queue
);
235 q
->completion_bh
= aio_bh_new(aio_context
, nvme_process_completion_bh
, q
);
236 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
,
237 s
->page_size
* NVME_NUM_REQS
,
238 false, &prp_list_iova
);
242 q
->free_req_head
= -1;
243 for (i
= 0; i
< NVME_NUM_REQS
; i
++) {
244 NVMeRequest
*req
= &q
->reqs
[i
];
246 req
->free_req_next
= q
->free_req_head
;
247 q
->free_req_head
= i
;
248 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
249 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
252 if (!nvme_init_queue(s
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, errp
)) {
255 q
->sq
.doorbell
= &s
->doorbells
[idx
* s
->doorbell_scale
].sq_tail
;
257 if (!nvme_init_queue(s
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, errp
)) {
260 q
->cq
.doorbell
= &s
->doorbells
[idx
* s
->doorbell_scale
].cq_head
;
264 nvme_free_queue_pair(q
);
269 static void nvme_kick(NVMeQueuePair
*q
)
271 BDRVNVMeState
*s
= q
->s
;
273 if (s
->plugged
|| !q
->need_kick
) {
276 trace_nvme_kick(s
, q
->index
);
277 assert(!(q
->sq
.tail
& 0xFF00));
278 /* Fence the write to submission queue entry before notifying the device. */
280 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
281 q
->inflight
+= q
->need_kick
;
285 /* Find a free request element if any, otherwise:
286 * a) if in coroutine context, try to wait for one to become available;
287 * b) if not in coroutine, return NULL;
289 static NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
293 qemu_mutex_lock(&q
->lock
);
295 while (q
->free_req_head
== -1) {
296 if (qemu_in_coroutine()) {
297 trace_nvme_free_req_queue_wait(q
->s
, q
->index
);
298 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
300 qemu_mutex_unlock(&q
->lock
);
305 req
= &q
->reqs
[q
->free_req_head
];
306 q
->free_req_head
= req
->free_req_next
;
307 req
->free_req_next
= -1;
309 qemu_mutex_unlock(&q
->lock
);
314 static void nvme_put_free_req_locked(NVMeQueuePair
*q
, NVMeRequest
*req
)
316 req
->free_req_next
= q
->free_req_head
;
317 q
->free_req_head
= req
- q
->reqs
;
321 static void nvme_wake_free_req_locked(NVMeQueuePair
*q
)
323 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
324 replay_bh_schedule_oneshot_event(q
->s
->aio_context
,
325 nvme_free_req_queue_cb
, q
);
329 /* Insert a request in the freelist and wake waiters */
330 static void nvme_put_free_req_and_wake(NVMeQueuePair
*q
, NVMeRequest
*req
)
332 qemu_mutex_lock(&q
->lock
);
333 nvme_put_free_req_locked(q
, req
);
334 nvme_wake_free_req_locked(q
);
335 qemu_mutex_unlock(&q
->lock
);
338 static inline int nvme_translate_error(const NvmeCqe
*c
)
340 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
342 trace_nvme_error(le32_to_cpu(c
->result
),
343 le16_to_cpu(c
->sq_head
),
344 le16_to_cpu(c
->sq_id
),
346 le16_to_cpu(status
));
361 static bool nvme_process_completion(NVMeQueuePair
*q
)
363 BDRVNVMeState
*s
= q
->s
;
364 bool progress
= false;
369 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
371 trace_nvme_process_completion_queue_plugged(s
, q
->index
);
376 * Support re-entrancy when a request cb() function invokes aio_poll().
377 * Pending completions must be visible to aio_poll() so that a cb()
378 * function can wait for the completion of another request.
380 * The aio_poll() loop will execute our BH and we'll resume completion
383 qemu_bh_schedule(q
->completion_bh
);
385 assert(q
->inflight
>= 0);
386 while (q
->inflight
) {
390 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
391 if ((le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
394 ret
= nvme_translate_error(c
);
396 s
->stats
.completion_errors
++;
398 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
400 q
->cq_phase
= !q
->cq_phase
;
402 cid
= le16_to_cpu(c
->cid
);
403 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
404 warn_report("NVMe: Unexpected CID in completion queue: %"PRIu32
", "
405 "queue size: %u", cid
, NVME_QUEUE_SIZE
);
408 trace_nvme_complete_command(s
, q
->index
, cid
);
409 preq
= &q
->reqs
[cid
- 1];
411 assert(req
.cid
== cid
);
413 nvme_put_free_req_locked(q
, preq
);
414 preq
->cb
= preq
->opaque
= NULL
;
416 qemu_mutex_unlock(&q
->lock
);
417 req
.cb(req
.opaque
, ret
);
418 qemu_mutex_lock(&q
->lock
);
422 /* Notify the device so it can post more completions. */
424 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
425 nvme_wake_free_req_locked(q
);
428 qemu_bh_cancel(q
->completion_bh
);
433 static void nvme_process_completion_bh(void *opaque
)
435 NVMeQueuePair
*q
= opaque
;
438 * We're being invoked because a nvme_process_completion() cb() function
439 * called aio_poll(). The callback may be waiting for further completions
440 * so notify the device that it has space to fill in more completions now.
443 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
444 nvme_wake_free_req_locked(q
);
446 nvme_process_completion(q
);
449 static void nvme_trace_command(const NvmeCmd
*cmd
)
453 if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW
)) {
456 for (i
= 0; i
< 8; ++i
) {
457 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
458 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
459 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
463 static void nvme_submit_command(NVMeQueuePair
*q
, NVMeRequest
*req
,
464 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
469 req
->opaque
= opaque
;
470 cmd
->cid
= cpu_to_le32(req
->cid
);
472 trace_nvme_submit_command(q
->s
, q
->index
, req
->cid
);
473 nvme_trace_command(cmd
);
474 qemu_mutex_lock(&q
->lock
);
475 memcpy((uint8_t *)q
->sq
.queue
+
476 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
477 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
480 nvme_process_completion(q
);
481 qemu_mutex_unlock(&q
->lock
);
484 static void nvme_cmd_sync_cb(void *opaque
, int ret
)
491 static int nvme_cmd_sync(BlockDriverState
*bs
, NVMeQueuePair
*q
,
494 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
496 int ret
= -EINPROGRESS
;
497 req
= nvme_get_free_req(q
);
501 nvme_submit_command(q
, req
, cmd
, nvme_cmd_sync_cb
, &ret
);
503 AIO_WAIT_WHILE(aio_context
, ret
== -EINPROGRESS
);
507 /* Returns true on success, false on failure. */
508 static bool nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
510 BDRVNVMeState
*s
= bs
->opaque
;
521 .opcode
= NVME_ADM_CMD_IDENTIFY
,
522 .cdw10
= cpu_to_le32(0x1),
525 id
= qemu_try_memalign(s
->page_size
, sizeof(*id
));
527 error_setg(errp
, "Cannot allocate buffer for identify response");
530 r
= qemu_vfio_dma_map(s
->vfio
, id
, sizeof(*id
), true, &iova
);
532 error_setg(errp
, "Cannot map buffer for DMA");
536 memset(id
, 0, sizeof(*id
));
537 cmd
.dptr
.prp1
= cpu_to_le64(iova
);
538 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
539 error_setg(errp
, "Failed to identify controller");
543 if (le32_to_cpu(id
->ctrl
.nn
) < namespace) {
544 error_setg(errp
, "Invalid namespace");
547 s
->write_cache_supported
= le32_to_cpu(id
->ctrl
.vwc
) & 0x1;
548 s
->max_transfer
= (id
->ctrl
.mdts
? 1 << id
->ctrl
.mdts
: 0) * s
->page_size
;
549 /* For now the page list buffer per command is one page, to hold at most
550 * s->page_size / sizeof(uint64_t) entries. */
551 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
552 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
554 oncs
= le16_to_cpu(id
->ctrl
.oncs
);
555 s
->supports_write_zeroes
= !!(oncs
& NVME_ONCS_WRITE_ZEROES
);
556 s
->supports_discard
= !!(oncs
& NVME_ONCS_DSM
);
558 memset(id
, 0, sizeof(*id
));
560 cmd
.nsid
= cpu_to_le32(namespace);
561 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
562 error_setg(errp
, "Failed to identify namespace");
566 s
->nsze
= le64_to_cpu(id
->ns
.nsze
);
567 lbaf
= &id
->ns
.lbaf
[NVME_ID_NS_FLBAS_INDEX(id
->ns
.flbas
)];
569 if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id
->ns
.dlfeat
) &&
570 NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id
->ns
.dlfeat
) ==
571 NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES
) {
572 bs
->supported_write_flags
|= BDRV_REQ_MAY_UNMAP
;
576 error_setg(errp
, "Namespaces with metadata are not yet supported");
580 if (lbaf
->ds
< BDRV_SECTOR_BITS
|| lbaf
->ds
> 12 ||
581 (1 << lbaf
->ds
) > s
->page_size
)
583 error_setg(errp
, "Namespace has unsupported block size (2^%d)",
589 s
->blkshift
= lbaf
->ds
;
591 qemu_vfio_dma_unmap(s
->vfio
, id
);
597 static bool nvme_poll_queue(NVMeQueuePair
*q
)
599 bool progress
= false;
601 const size_t cqe_offset
= q
->cq
.head
* NVME_CQ_ENTRY_BYTES
;
602 NvmeCqe
*cqe
= (NvmeCqe
*)&q
->cq
.queue
[cqe_offset
];
604 trace_nvme_poll_queue(q
->s
, q
->index
);
606 * Do an early check for completions. q->lock isn't needed because
607 * nvme_process_completion() only runs in the event loop thread and
608 * cannot race with itself.
610 if ((le16_to_cpu(cqe
->status
) & 0x1) == q
->cq_phase
) {
614 qemu_mutex_lock(&q
->lock
);
615 while (nvme_process_completion(q
)) {
619 qemu_mutex_unlock(&q
->lock
);
624 static bool nvme_poll_queues(BDRVNVMeState
*s
)
626 bool progress
= false;
629 for (i
= 0; i
< s
->queue_count
; i
++) {
630 if (nvme_poll_queue(s
->queues
[i
])) {
637 static void nvme_handle_event(EventNotifier
*n
)
639 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
,
640 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
642 trace_nvme_handle_event(s
);
643 event_notifier_test_and_clear(n
);
647 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
649 BDRVNVMeState
*s
= bs
->opaque
;
650 unsigned n
= s
->queue_count
;
653 unsigned queue_size
= NVME_QUEUE_SIZE
;
655 assert(n
<= UINT16_MAX
);
656 q
= nvme_create_queue_pair(s
, bdrv_get_aio_context(bs
),
657 n
, queue_size
, errp
);
662 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
663 .dptr
.prp1
= cpu_to_le64(q
->cq
.iova
),
664 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | n
),
665 .cdw11
= cpu_to_le32(NVME_CQ_IEN
| NVME_CQ_PC
),
667 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
668 error_setg(errp
, "Failed to create CQ io queue [%u]", n
);
672 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
673 .dptr
.prp1
= cpu_to_le64(q
->sq
.iova
),
674 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | n
),
675 .cdw11
= cpu_to_le32(NVME_SQ_PC
| (n
<< 16)),
677 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
678 error_setg(errp
, "Failed to create SQ io queue [%u]", n
);
681 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
686 nvme_free_queue_pair(q
);
690 static bool nvme_poll_cb(void *opaque
)
692 EventNotifier
*e
= opaque
;
693 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
,
694 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
696 return nvme_poll_queues(s
);
699 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
702 BDRVNVMeState
*s
= bs
->opaque
;
703 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
707 uint64_t deadline
, now
;
708 volatile NvmeBar
*regs
= NULL
;
710 qemu_co_mutex_init(&s
->dma_map_lock
);
711 qemu_co_queue_init(&s
->dma_flush_queue
);
712 s
->device
= g_strdup(device
);
714 s
->aio_context
= bdrv_get_aio_context(bs
);
715 ret
= event_notifier_init(&s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
], 0);
717 error_setg(errp
, "Failed to init event notifier");
721 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
727 regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, sizeof(NvmeBar
),
728 PROT_READ
| PROT_WRITE
, errp
);
733 /* Perform initialize sequence as described in NVMe spec "7.6.1
734 * Initialization". */
736 cap
= le64_to_cpu(regs
->cap
);
737 trace_nvme_controller_capability_raw(cap
);
738 trace_nvme_controller_capability("Maximum Queue Entries Supported",
739 1 + NVME_CAP_MQES(cap
));
740 trace_nvme_controller_capability("Contiguous Queues Required",
742 trace_nvme_controller_capability("Doorbell Stride",
743 2 << (2 + NVME_CAP_DSTRD(cap
)));
744 trace_nvme_controller_capability("Subsystem Reset Supported",
745 NVME_CAP_NSSRS(cap
));
746 trace_nvme_controller_capability("Memory Page Size Minimum",
747 1 << (12 + NVME_CAP_MPSMIN(cap
)));
748 trace_nvme_controller_capability("Memory Page Size Maximum",
749 1 << (12 + NVME_CAP_MPSMAX(cap
)));
750 if (!NVME_CAP_CSS(cap
)) {
751 error_setg(errp
, "Device doesn't support NVMe command set");
756 s
->page_size
= MAX(4096, 1 << NVME_CAP_MPSMIN(cap
));
757 s
->doorbell_scale
= (4 << NVME_CAP_DSTRD(cap
)) / sizeof(uint32_t);
758 bs
->bl
.opt_mem_alignment
= s
->page_size
;
759 timeout_ms
= MIN(500 * NVME_CAP_TO(cap
), 30000);
761 /* Reset device to get a clean state. */
762 regs
->cc
= cpu_to_le32(le32_to_cpu(regs
->cc
) & 0xFE);
763 /* Wait for CSTS.RDY = 0. */
764 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* SCALE_MS
;
765 while (NVME_CSTS_RDY(le32_to_cpu(regs
->csts
))) {
766 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
767 error_setg(errp
, "Timeout while waiting for device to reset (%"
775 s
->doorbells
= qemu_vfio_pci_map_bar(s
->vfio
, 0, sizeof(NvmeBar
),
776 NVME_DOORBELL_SIZE
, PROT_WRITE
, errp
);
782 /* Set up admin queue. */
783 s
->queues
= g_new(NVMeQueuePair
*, 1);
784 s
->queues
[INDEX_ADMIN
] = nvme_create_queue_pair(s
, aio_context
, 0,
787 if (!s
->queues
[INDEX_ADMIN
]) {
792 QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE
& 0xF000);
793 regs
->aqa
= cpu_to_le32((NVME_QUEUE_SIZE
<< AQA_ACQS_SHIFT
) |
794 (NVME_QUEUE_SIZE
<< AQA_ASQS_SHIFT
));
795 regs
->asq
= cpu_to_le64(s
->queues
[INDEX_ADMIN
]->sq
.iova
);
796 regs
->acq
= cpu_to_le64(s
->queues
[INDEX_ADMIN
]->cq
.iova
);
798 /* After setting up all control registers we can enable device now. */
799 regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << CC_IOCQES_SHIFT
) |
800 (ctz32(NVME_SQ_ENTRY_BYTES
) << CC_IOSQES_SHIFT
) |
802 /* Wait for CSTS.RDY = 1. */
803 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
804 deadline
= now
+ timeout_ms
* SCALE_MS
;
805 while (!NVME_CSTS_RDY(le32_to_cpu(regs
->csts
))) {
806 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
807 error_setg(errp
, "Timeout while waiting for device to start (%"
815 ret
= qemu_vfio_pci_init_irq(s
->vfio
, s
->irq_notifier
,
816 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
820 aio_set_event_notifier(bdrv_get_aio_context(bs
),
821 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
822 false, nvme_handle_event
, nvme_poll_cb
);
824 if (!nvme_identify(bs
, namespace, errp
)) {
829 /* Set up command queues. */
830 if (!nvme_add_io_queue(bs
, errp
)) {
835 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)regs
, 0, sizeof(NvmeBar
));
838 /* Cleaning up is done in nvme_file_open() upon error. */
842 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
844 * nvme://0000:44:00.0/1
846 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
847 * is the PCI address, and the last part is the namespace number starting from
848 * 1 according to the NVMe spec. */
849 static void nvme_parse_filename(const char *filename
, QDict
*options
,
852 int pref
= strlen("nvme://");
854 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
855 const char *tmp
= filename
+ pref
;
857 const char *namespace;
859 const char *slash
= strchr(tmp
, '/');
861 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, tmp
);
864 device
= g_strndup(tmp
, slash
- tmp
);
865 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, device
);
867 namespace = slash
+ 1;
868 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
869 error_setg(errp
, "Invalid namespace '%s', positive number expected",
873 qdict_put_str(options
, NVME_BLOCK_OPT_NAMESPACE
,
874 *namespace ? namespace : "1");
878 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
882 BDRVNVMeState
*s
= bs
->opaque
;
884 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
885 .nsid
= cpu_to_le32(s
->nsid
),
886 .cdw10
= cpu_to_le32(0x06),
887 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
890 ret
= nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
);
892 error_setg(errp
, "Failed to configure NVMe write cache");
897 static void nvme_close(BlockDriverState
*bs
)
899 BDRVNVMeState
*s
= bs
->opaque
;
901 for (unsigned i
= 0; i
< s
->queue_count
; ++i
) {
902 nvme_free_queue_pair(s
->queues
[i
]);
905 aio_set_event_notifier(bdrv_get_aio_context(bs
),
906 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
908 event_notifier_cleanup(&s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
909 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)s
->doorbells
,
910 sizeof(NvmeBar
), NVME_DOORBELL_SIZE
);
911 qemu_vfio_close(s
->vfio
);
916 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
923 BDRVNVMeState
*s
= bs
->opaque
;
925 bs
->supported_write_flags
= BDRV_REQ_FUA
;
927 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
928 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
929 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
931 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
936 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
937 ret
= nvme_init(bs
, device
, namespace, errp
);
942 if (flags
& BDRV_O_NOCACHE
) {
943 if (!s
->write_cache_supported
) {
945 "NVMe controller doesn't support write cache configuration");
948 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
961 static int64_t nvme_getlength(BlockDriverState
*bs
)
963 BDRVNVMeState
*s
= bs
->opaque
;
964 return s
->nsze
<< s
->blkshift
;
967 static uint32_t nvme_get_blocksize(BlockDriverState
*bs
)
969 BDRVNVMeState
*s
= bs
->opaque
;
970 assert(s
->blkshift
>= BDRV_SECTOR_BITS
&& s
->blkshift
<= 12);
971 return UINT32_C(1) << s
->blkshift
;
974 static int nvme_probe_blocksizes(BlockDriverState
*bs
, BlockSizes
*bsz
)
976 uint32_t blocksize
= nvme_get_blocksize(bs
);
977 bsz
->phys
= blocksize
;
978 bsz
->log
= blocksize
;
982 /* Called with s->dma_map_lock */
983 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
987 BDRVNVMeState
*s
= bs
->opaque
;
989 s
->dma_map_count
-= qiov
->size
;
990 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
991 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
993 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
999 /* Called with s->dma_map_lock */
1000 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
1001 NVMeRequest
*req
, QEMUIOVector
*qiov
)
1003 BDRVNVMeState
*s
= bs
->opaque
;
1004 uint64_t *pagelist
= req
->prp_list_page
;
1009 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
1010 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
1011 for (i
= 0; i
< qiov
->niov
; ++i
) {
1015 r
= qemu_vfio_dma_map(s
->vfio
,
1016 qiov
->iov
[i
].iov_base
,
1017 qiov
->iov
[i
].iov_len
,
1019 if (r
== -ENOMEM
&& retry
) {
1021 trace_nvme_dma_flush_queue_wait(s
);
1022 if (s
->dma_map_count
) {
1023 trace_nvme_dma_map_flush(s
);
1024 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
1026 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
1037 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
1038 pagelist
[entries
++] = cpu_to_le64(iova
+ j
* s
->page_size
);
1040 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
1041 qiov
->iov
[i
].iov_len
/ s
->page_size
);
1044 s
->dma_map_count
+= qiov
->size
;
1046 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
1051 cmd
->dptr
.prp1
= pagelist
[0];
1055 cmd
->dptr
.prp1
= pagelist
[0];
1056 cmd
->dptr
.prp2
= pagelist
[1];
1059 cmd
->dptr
.prp1
= pagelist
[0];
1060 cmd
->dptr
.prp2
= cpu_to_le64(req
->prp_list_iova
+ sizeof(uint64_t));
1063 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
1064 for (i
= 0; i
< entries
; ++i
) {
1065 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
1069 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
1070 * increment s->dma_map_count. This is okay for fixed mapping memory areas
1071 * because they are already mapped before calling this function; for
1072 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
1073 * calling qemu_vfio_dma_reset_temporary when necessary. */
1083 static void nvme_rw_cb_bh(void *opaque
)
1085 NVMeCoData
*data
= opaque
;
1086 qemu_coroutine_enter(data
->co
);
1089 static void nvme_rw_cb(void *opaque
, int ret
)
1091 NVMeCoData
*data
= opaque
;
1094 /* The rw coroutine hasn't yielded, don't try to enter. */
1097 replay_bh_schedule_oneshot_event(data
->ctx
, nvme_rw_cb_bh
, data
);
1100 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
1101 uint64_t offset
, uint64_t bytes
,
1107 BDRVNVMeState
*s
= bs
->opaque
;
1108 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1111 uint32_t cdw12
= (((bytes
>> s
->blkshift
) - 1) & 0xFFFF) |
1112 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
1114 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
1115 .nsid
= cpu_to_le32(s
->nsid
),
1116 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1117 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1118 .cdw12
= cpu_to_le32(cdw12
),
1121 .ctx
= bdrv_get_aio_context(bs
),
1122 .ret
= -EINPROGRESS
,
1125 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
1126 assert(s
->queue_count
> 1);
1127 req
= nvme_get_free_req(ioq
);
1130 qemu_co_mutex_lock(&s
->dma_map_lock
);
1131 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
1132 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1134 nvme_put_free_req_and_wake(ioq
, req
);
1137 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1139 data
.co
= qemu_coroutine_self();
1140 while (data
.ret
== -EINPROGRESS
) {
1141 qemu_coroutine_yield();
1144 qemu_co_mutex_lock(&s
->dma_map_lock
);
1145 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
1146 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1151 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
1155 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
1156 const QEMUIOVector
*qiov
)
1159 BDRVNVMeState
*s
= bs
->opaque
;
1161 for (i
= 0; i
< qiov
->niov
; ++i
) {
1162 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
, s
->page_size
) ||
1163 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, s
->page_size
)) {
1164 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
1165 qiov
->iov
[i
].iov_len
, s
->page_size
);
1172 static int nvme_co_prw(BlockDriverState
*bs
, uint64_t offset
, uint64_t bytes
,
1173 QEMUIOVector
*qiov
, bool is_write
, int flags
)
1175 BDRVNVMeState
*s
= bs
->opaque
;
1177 uint8_t *buf
= NULL
;
1178 QEMUIOVector local_qiov
;
1180 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
1181 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
1182 assert(bytes
<= s
->max_transfer
);
1183 if (nvme_qiov_aligned(bs
, qiov
)) {
1184 s
->stats
.aligned_accesses
++;
1185 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
1187 s
->stats
.unaligned_accesses
++;
1188 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
1189 buf
= qemu_try_memalign(s
->page_size
, bytes
);
1194 qemu_iovec_init(&local_qiov
, 1);
1196 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1198 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1199 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1200 qemu_iovec_destroy(&local_qiov
);
1201 if (!r
&& !is_write
) {
1202 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1208 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1209 uint64_t offset
, uint64_t bytes
,
1210 QEMUIOVector
*qiov
, int flags
)
1212 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1215 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1216 uint64_t offset
, uint64_t bytes
,
1217 QEMUIOVector
*qiov
, int flags
)
1219 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1222 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1224 BDRVNVMeState
*s
= bs
->opaque
;
1225 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1228 .opcode
= NVME_CMD_FLUSH
,
1229 .nsid
= cpu_to_le32(s
->nsid
),
1232 .ctx
= bdrv_get_aio_context(bs
),
1233 .ret
= -EINPROGRESS
,
1236 assert(s
->queue_count
> 1);
1237 req
= nvme_get_free_req(ioq
);
1239 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1241 data
.co
= qemu_coroutine_self();
1242 if (data
.ret
== -EINPROGRESS
) {
1243 qemu_coroutine_yield();
1250 static coroutine_fn
int nvme_co_pwrite_zeroes(BlockDriverState
*bs
,
1253 BdrvRequestFlags flags
)
1255 BDRVNVMeState
*s
= bs
->opaque
;
1256 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1259 uint32_t cdw12
= ((bytes
>> s
->blkshift
) - 1) & 0xFFFF;
1261 if (!s
->supports_write_zeroes
) {
1266 .opcode
= NVME_CMD_WRITE_ZEROES
,
1267 .nsid
= cpu_to_le32(s
->nsid
),
1268 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1269 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1273 .ctx
= bdrv_get_aio_context(bs
),
1274 .ret
= -EINPROGRESS
,
1277 if (flags
& BDRV_REQ_MAY_UNMAP
) {
1281 if (flags
& BDRV_REQ_FUA
) {
1285 cmd
.cdw12
= cpu_to_le32(cdw12
);
1287 trace_nvme_write_zeroes(s
, offset
, bytes
, flags
);
1288 assert(s
->queue_count
> 1);
1289 req
= nvme_get_free_req(ioq
);
1292 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1294 data
.co
= qemu_coroutine_self();
1295 while (data
.ret
== -EINPROGRESS
) {
1296 qemu_coroutine_yield();
1299 trace_nvme_rw_done(s
, true, offset
, bytes
, data
.ret
);
1304 static int coroutine_fn
nvme_co_pdiscard(BlockDriverState
*bs
,
1308 BDRVNVMeState
*s
= bs
->opaque
;
1309 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1312 QEMUIOVector local_qiov
;
1316 .opcode
= NVME_CMD_DSM
,
1317 .nsid
= cpu_to_le32(s
->nsid
),
1318 .cdw10
= cpu_to_le32(0), /*number of ranges - 0 based*/
1319 .cdw11
= cpu_to_le32(1 << 2), /*deallocate bit*/
1323 .ctx
= bdrv_get_aio_context(bs
),
1324 .ret
= -EINPROGRESS
,
1327 if (!s
->supports_discard
) {
1331 assert(s
->queue_count
> 1);
1333 buf
= qemu_try_memalign(s
->page_size
, s
->page_size
);
1337 memset(buf
, 0, s
->page_size
);
1338 buf
->nlb
= cpu_to_le32(bytes
>> s
->blkshift
);
1339 buf
->slba
= cpu_to_le64(offset
>> s
->blkshift
);
1342 qemu_iovec_init(&local_qiov
, 1);
1343 qemu_iovec_add(&local_qiov
, buf
, 4096);
1345 req
= nvme_get_free_req(ioq
);
1348 qemu_co_mutex_lock(&s
->dma_map_lock
);
1349 ret
= nvme_cmd_map_qiov(bs
, &cmd
, req
, &local_qiov
);
1350 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1353 nvme_put_free_req_and_wake(ioq
, req
);
1357 trace_nvme_dsm(s
, offset
, bytes
);
1359 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1361 data
.co
= qemu_coroutine_self();
1362 while (data
.ret
== -EINPROGRESS
) {
1363 qemu_coroutine_yield();
1366 qemu_co_mutex_lock(&s
->dma_map_lock
);
1367 ret
= nvme_cmd_unmap_qiov(bs
, &local_qiov
);
1368 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1375 trace_nvme_dsm_done(s
, offset
, bytes
, ret
);
1377 qemu_iovec_destroy(&local_qiov
);
1384 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1385 BlockReopenQueue
*queue
, Error
**errp
)
1390 static void nvme_refresh_filename(BlockDriverState
*bs
)
1392 BDRVNVMeState
*s
= bs
->opaque
;
1394 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "nvme://%s/%i",
1395 s
->device
, s
->nsid
);
1398 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1400 BDRVNVMeState
*s
= bs
->opaque
;
1402 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1403 bs
->bl
.request_alignment
= s
->page_size
;
1404 bs
->bl
.max_transfer
= s
->max_transfer
;
1407 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1409 BDRVNVMeState
*s
= bs
->opaque
;
1411 for (unsigned i
= 0; i
< s
->queue_count
; i
++) {
1412 NVMeQueuePair
*q
= s
->queues
[i
];
1414 qemu_bh_delete(q
->completion_bh
);
1415 q
->completion_bh
= NULL
;
1418 aio_set_event_notifier(bdrv_get_aio_context(bs
),
1419 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
1423 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1424 AioContext
*new_context
)
1426 BDRVNVMeState
*s
= bs
->opaque
;
1428 s
->aio_context
= new_context
;
1429 aio_set_event_notifier(new_context
, &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
1430 false, nvme_handle_event
, nvme_poll_cb
);
1432 for (unsigned i
= 0; i
< s
->queue_count
; i
++) {
1433 NVMeQueuePair
*q
= s
->queues
[i
];
1436 aio_bh_new(new_context
, nvme_process_completion_bh
, q
);
1440 static void nvme_aio_plug(BlockDriverState
*bs
)
1442 BDRVNVMeState
*s
= bs
->opaque
;
1443 assert(!s
->plugged
);
1447 static void nvme_aio_unplug(BlockDriverState
*bs
)
1449 BDRVNVMeState
*s
= bs
->opaque
;
1452 for (unsigned i
= INDEX_IO(0); i
< s
->queue_count
; i
++) {
1453 NVMeQueuePair
*q
= s
->queues
[i
];
1454 qemu_mutex_lock(&q
->lock
);
1456 nvme_process_completion(q
);
1457 qemu_mutex_unlock(&q
->lock
);
1461 static void nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1464 BDRVNVMeState
*s
= bs
->opaque
;
1466 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
);
1468 /* FIXME: we may run out of IOVA addresses after repeated
1469 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1470 * doesn't reclaim addresses for fixed mappings. */
1471 error_report("nvme_register_buf failed: %s", strerror(-ret
));
1475 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
)
1477 BDRVNVMeState
*s
= bs
->opaque
;
1479 qemu_vfio_dma_unmap(s
->vfio
, host
);
1482 static BlockStatsSpecific
*nvme_get_specific_stats(BlockDriverState
*bs
)
1484 BlockStatsSpecific
*stats
= g_new(BlockStatsSpecific
, 1);
1485 BDRVNVMeState
*s
= bs
->opaque
;
1487 stats
->driver
= BLOCKDEV_DRIVER_NVME
;
1488 stats
->u
.nvme
= (BlockStatsSpecificNvme
) {
1489 .completion_errors
= s
->stats
.completion_errors
,
1490 .aligned_accesses
= s
->stats
.aligned_accesses
,
1491 .unaligned_accesses
= s
->stats
.unaligned_accesses
,
1497 static const char *const nvme_strong_runtime_opts
[] = {
1498 NVME_BLOCK_OPT_DEVICE
,
1499 NVME_BLOCK_OPT_NAMESPACE
,
1504 static BlockDriver bdrv_nvme
= {
1505 .format_name
= "nvme",
1506 .protocol_name
= "nvme",
1507 .instance_size
= sizeof(BDRVNVMeState
),
1509 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
1510 .create_opts
= &bdrv_create_opts_simple
,
1512 .bdrv_parse_filename
= nvme_parse_filename
,
1513 .bdrv_file_open
= nvme_file_open
,
1514 .bdrv_close
= nvme_close
,
1515 .bdrv_getlength
= nvme_getlength
,
1516 .bdrv_probe_blocksizes
= nvme_probe_blocksizes
,
1518 .bdrv_co_preadv
= nvme_co_preadv
,
1519 .bdrv_co_pwritev
= nvme_co_pwritev
,
1521 .bdrv_co_pwrite_zeroes
= nvme_co_pwrite_zeroes
,
1522 .bdrv_co_pdiscard
= nvme_co_pdiscard
,
1524 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1525 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1527 .bdrv_refresh_filename
= nvme_refresh_filename
,
1528 .bdrv_refresh_limits
= nvme_refresh_limits
,
1529 .strong_runtime_opts
= nvme_strong_runtime_opts
,
1530 .bdrv_get_specific_stats
= nvme_get_specific_stats
,
1532 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1533 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1535 .bdrv_io_plug
= nvme_aio_plug
,
1536 .bdrv_io_unplug
= nvme_aio_unplug
,
1538 .bdrv_register_buf
= nvme_register_buf
,
1539 .bdrv_unregister_buf
= nvme_unregister_buf
,
1542 static void bdrv_nvme_init(void)
1544 bdrv_register(&bdrv_nvme
);
1547 block_init(bdrv_nvme_init
);