2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * [pmrdev=<mem_backend_file_id>,] \
23 * max_ioqpairs=<N[optional]>, \
24 * aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
27 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
28 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
30 * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
31 * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
33 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
35 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
36 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
39 * nvme device parameters
40 * ~~~~~~~~~~~~~~~~~~~~~~
42 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
43 * of concurrently outstanding Asynchronous Event Request commands suppoert
44 * by the controller. This is a 0's based value.
47 * This is the maximum number of events that the device will enqueue for
48 * completion when there are no oustanding AERs. When the maximum number of
49 * enqueued events are reached, subsequent events will be dropped.
53 #include "qemu/osdep.h"
54 #include "qemu/units.h"
55 #include "qemu/error-report.h"
56 #include "hw/block/block.h"
57 #include "hw/pci/msix.h"
58 #include "hw/pci/pci.h"
59 #include "hw/qdev-properties.h"
60 #include "migration/vmstate.h"
61 #include "sysemu/sysemu.h"
62 #include "qapi/error.h"
63 #include "qapi/visitor.h"
64 #include "sysemu/hostmem.h"
65 #include "sysemu/block-backend.h"
66 #include "exec/memory.h"
68 #include "qemu/module.h"
69 #include "qemu/cutils.h"
73 #define NVME_MAX_IOQPAIRS 0xffff
74 #define NVME_DB_SIZE 4
75 #define NVME_SPEC_VER 0x00010300
76 #define NVME_CMB_BIR 2
77 #define NVME_PMR_BIR 2
78 #define NVME_TEMPERATURE 0x143
79 #define NVME_TEMPERATURE_WARNING 0x157
80 #define NVME_TEMPERATURE_CRITICAL 0x175
81 #define NVME_NUM_FW_SLOTS 1
83 #define NVME_GUEST_ERR(trace, fmt, ...) \
85 (trace_##trace)(__VA_ARGS__); \
86 qemu_log_mask(LOG_GUEST_ERROR, #trace \
87 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
90 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
91 [NVME_ARBITRATION
] = true,
92 [NVME_POWER_MANAGEMENT
] = true,
93 [NVME_TEMPERATURE_THRESHOLD
] = true,
94 [NVME_ERROR_RECOVERY
] = true,
95 [NVME_VOLATILE_WRITE_CACHE
] = true,
96 [NVME_NUMBER_OF_QUEUES
] = true,
97 [NVME_INTERRUPT_COALESCING
] = true,
98 [NVME_INTERRUPT_VECTOR_CONF
] = true,
99 [NVME_WRITE_ATOMICITY
] = true,
100 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
101 [NVME_TIMESTAMP
] = true,
104 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
105 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
106 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
107 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
108 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
109 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
112 static void nvme_process_sq(void *opaque
);
114 static uint16_t nvme_cid(NvmeRequest
*req
)
120 return le16_to_cpu(req
->cqe
.cid
);
123 static uint16_t nvme_sqid(NvmeRequest
*req
)
125 return le16_to_cpu(req
->sq
->sqid
);
128 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
130 hwaddr low
= n
->ctrl_mem
.addr
;
131 hwaddr hi
= n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
);
133 return addr
>= low
&& addr
< hi
;
136 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
138 assert(nvme_addr_is_cmb(n
, addr
));
140 return &n
->cmbuf
[addr
- n
->ctrl_mem
.addr
];
143 static void nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
145 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
)) {
146 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
150 pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
153 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
155 return sqid
< n
->params
.max_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
158 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
160 return cqid
< n
->params
.max_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
163 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
166 if (cq
->tail
>= cq
->size
) {
168 cq
->phase
= !cq
->phase
;
172 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
174 sq
->head
= (sq
->head
+ 1) % sq
->size
;
177 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
179 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
182 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
184 return sq
->head
== sq
->tail
;
187 static void nvme_irq_check(NvmeCtrl
*n
)
189 if (msix_enabled(&(n
->parent_obj
))) {
192 if (~n
->bar
.intms
& n
->irq_status
) {
193 pci_irq_assert(&n
->parent_obj
);
195 pci_irq_deassert(&n
->parent_obj
);
199 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
201 if (cq
->irq_enabled
) {
202 if (msix_enabled(&(n
->parent_obj
))) {
203 trace_pci_nvme_irq_msix(cq
->vector
);
204 msix_notify(&(n
->parent_obj
), cq
->vector
);
206 trace_pci_nvme_irq_pin();
207 assert(cq
->vector
< 32);
208 n
->irq_status
|= 1 << cq
->vector
;
212 trace_pci_nvme_irq_masked();
216 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
218 if (cq
->irq_enabled
) {
219 if (msix_enabled(&(n
->parent_obj
))) {
222 assert(cq
->vector
< 32);
223 n
->irq_status
&= ~(1 << cq
->vector
);
229 static void nvme_req_clear(NvmeRequest
*req
)
232 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
235 static void nvme_req_exit(NvmeRequest
*req
)
238 qemu_sglist_destroy(&req
->qsg
);
242 qemu_iovec_destroy(&req
->iov
);
246 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
253 trace_pci_nvme_map_addr_cmb(addr
, len
);
255 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
256 return NVME_DATA_TRAS_ERROR
;
259 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
264 static uint16_t nvme_map_addr(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
265 hwaddr addr
, size_t len
)
271 trace_pci_nvme_map_addr(addr
, len
);
273 if (nvme_addr_is_cmb(n
, addr
)) {
274 if (qsg
&& qsg
->sg
) {
275 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
281 qemu_iovec_init(iov
, 1);
284 return nvme_map_addr_cmb(n
, iov
, addr
, len
);
287 if (iov
&& iov
->iov
) {
288 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
294 pci_dma_sglist_init(qsg
, &n
->parent_obj
, 1);
297 qemu_sglist_add(qsg
, addr
, len
);
302 static uint16_t nvme_map_prp(NvmeCtrl
*n
, uint64_t prp1
, uint64_t prp2
,
303 uint32_t len
, NvmeRequest
*req
)
305 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
306 trans_len
= MIN(len
, trans_len
);
307 int num_prps
= (len
>> n
->page_bits
) + 1;
309 bool prp_list_in_cmb
= false;
311 QEMUSGList
*qsg
= &req
->qsg
;
312 QEMUIOVector
*iov
= &req
->iov
;
314 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
316 if (unlikely(!prp1
)) {
317 trace_pci_nvme_err_invalid_prp();
318 return NVME_INVALID_FIELD
| NVME_DNR
;
321 if (nvme_addr_is_cmb(n
, prp1
)) {
322 qemu_iovec_init(iov
, num_prps
);
324 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
327 status
= nvme_map_addr(n
, qsg
, iov
, prp1
, trans_len
);
334 if (unlikely(!prp2
)) {
335 trace_pci_nvme_err_invalid_prp2_missing();
336 return NVME_INVALID_FIELD
| NVME_DNR
;
339 if (len
> n
->page_size
) {
340 uint64_t prp_list
[n
->max_prp_ents
];
341 uint32_t nents
, prp_trans
;
344 if (nvme_addr_is_cmb(n
, prp2
)) {
345 prp_list_in_cmb
= true;
348 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
349 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
350 nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
352 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
354 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
355 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
356 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
357 return NVME_INVALID_FIELD
| NVME_DNR
;
360 if (prp_list_in_cmb
!= nvme_addr_is_cmb(n
, prp_ent
)) {
361 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
365 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
366 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
367 nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
369 prp_ent
= le64_to_cpu(prp_list
[i
]);
372 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
373 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
374 return NVME_INVALID_FIELD
| NVME_DNR
;
377 trans_len
= MIN(len
, n
->page_size
);
378 status
= nvme_map_addr(n
, qsg
, iov
, prp_ent
, trans_len
);
387 if (unlikely(prp2
& (n
->page_size
- 1))) {
388 trace_pci_nvme_err_invalid_prp2_align(prp2
);
389 return NVME_INVALID_FIELD
| NVME_DNR
;
391 status
= nvme_map_addr(n
, qsg
, iov
, prp2
, len
);
401 static uint16_t nvme_dma_prp(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
402 uint64_t prp1
, uint64_t prp2
, DMADirection dir
,
405 uint16_t status
= NVME_SUCCESS
;
407 status
= nvme_map_prp(n
, prp1
, prp2
, len
, req
);
412 /* assert that only one of qsg and iov carries data */
413 assert((req
->qsg
.nsg
> 0) != (req
->iov
.niov
> 0));
415 if (req
->qsg
.nsg
> 0) {
418 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
419 residual
= dma_buf_write(ptr
, len
, &req
->qsg
);
421 residual
= dma_buf_read(ptr
, len
, &req
->qsg
);
424 if (unlikely(residual
)) {
425 trace_pci_nvme_err_invalid_dma();
426 status
= NVME_INVALID_FIELD
| NVME_DNR
;
431 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
432 bytes
= qemu_iovec_to_buf(&req
->iov
, 0, ptr
, len
);
434 bytes
= qemu_iovec_from_buf(&req
->iov
, 0, ptr
, len
);
437 if (unlikely(bytes
!= len
)) {
438 trace_pci_nvme_err_invalid_dma();
439 status
= NVME_INVALID_FIELD
| NVME_DNR
;
446 static uint16_t nvme_map_dptr(NvmeCtrl
*n
, size_t len
, NvmeRequest
*req
)
448 NvmeCmd
*cmd
= &req
->cmd
;
449 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
450 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
452 return nvme_map_prp(n
, prp1
, prp2
, len
, req
);
455 static void nvme_post_cqes(void *opaque
)
457 NvmeCQueue
*cq
= opaque
;
458 NvmeCtrl
*n
= cq
->ctrl
;
459 NvmeRequest
*req
, *next
;
461 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
465 if (nvme_cq_full(cq
)) {
469 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
471 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
472 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
473 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
474 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
475 nvme_inc_cq_tail(cq
);
476 pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
479 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
481 if (cq
->tail
!= cq
->head
) {
482 nvme_irq_assert(n
, cq
);
486 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
488 assert(cq
->cqid
== req
->sq
->cqid
);
489 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
491 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
492 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
493 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
496 static void nvme_process_aers(void *opaque
)
498 NvmeCtrl
*n
= opaque
;
499 NvmeAsyncEvent
*event
, *next
;
501 trace_pci_nvme_process_aers(n
->aer_queued
);
503 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
505 NvmeAerResult
*result
;
507 /* can't post cqe if there is nothing to complete */
508 if (!n
->outstanding_aers
) {
509 trace_pci_nvme_no_outstanding_aers();
513 /* ignore if masked (cqe posted, but event not cleared) */
514 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
515 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
519 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
522 n
->aer_mask
|= 1 << event
->result
.event_type
;
523 n
->outstanding_aers
--;
525 req
= n
->aer_reqs
[n
->outstanding_aers
];
527 result
= (NvmeAerResult
*) &req
->cqe
.result
;
528 result
->event_type
= event
->result
.event_type
;
529 result
->event_info
= event
->result
.event_info
;
530 result
->log_page
= event
->result
.log_page
;
533 req
->status
= NVME_SUCCESS
;
535 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
538 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
542 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
543 uint8_t event_info
, uint8_t log_page
)
545 NvmeAsyncEvent
*event
;
547 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
549 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
550 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
554 event
= g_new(NvmeAsyncEvent
, 1);
555 event
->result
= (NvmeAerResult
) {
556 .event_type
= event_type
,
557 .event_info
= event_info
,
558 .log_page
= log_page
,
561 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
564 nvme_process_aers(n
);
567 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
569 n
->aer_mask
&= ~(1 << event_type
);
570 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
571 nvme_process_aers(n
);
575 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
577 uint8_t mdts
= n
->params
.mdts
;
579 if (mdts
&& len
> n
->page_size
<< mdts
) {
580 return NVME_INVALID_FIELD
| NVME_DNR
;
586 static inline uint16_t nvme_check_bounds(NvmeCtrl
*n
, NvmeNamespace
*ns
,
587 uint64_t slba
, uint32_t nlb
)
589 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
591 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
592 return NVME_LBA_RANGE
| NVME_DNR
;
598 static void nvme_rw_cb(void *opaque
, int ret
)
600 NvmeRequest
*req
= opaque
;
601 NvmeSQueue
*sq
= req
->sq
;
602 NvmeCtrl
*n
= sq
->ctrl
;
603 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
605 trace_pci_nvme_rw_cb(nvme_cid(req
));
608 block_acct_done(blk_get_stats(n
->conf
.blk
), &req
->acct
);
609 req
->status
= NVME_SUCCESS
;
611 block_acct_failed(blk_get_stats(n
->conf
.blk
), &req
->acct
);
612 req
->status
= NVME_INTERNAL_DEV_ERROR
;
615 nvme_enqueue_req_completion(cq
, req
);
618 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
620 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, 0,
622 req
->aiocb
= blk_aio_flush(n
->conf
.blk
, nvme_rw_cb
, req
);
624 return NVME_NO_COMPLETE
;
627 static uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
629 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
630 NvmeNamespace
*ns
= req
->ns
;
631 const uint8_t lba_index
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
632 const uint8_t data_shift
= ns
->id_ns
.lbaf
[lba_index
].ds
;
633 uint64_t slba
= le64_to_cpu(rw
->slba
);
634 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
635 uint64_t offset
= slba
<< data_shift
;
636 uint32_t count
= nlb
<< data_shift
;
639 trace_pci_nvme_write_zeroes(nvme_cid(req
), slba
, nlb
);
641 status
= nvme_check_bounds(n
, ns
, slba
, nlb
);
643 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
647 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, 0,
649 req
->aiocb
= blk_aio_pwrite_zeroes(n
->conf
.blk
, offset
, count
,
650 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
, req
);
651 return NVME_NO_COMPLETE
;
654 static uint16_t nvme_rw(NvmeCtrl
*n
, NvmeRequest
*req
)
656 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
657 NvmeNamespace
*ns
= req
->ns
;
658 uint32_t nlb
= le32_to_cpu(rw
->nlb
) + 1;
659 uint64_t slba
= le64_to_cpu(rw
->slba
);
661 uint8_t lba_index
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
662 uint8_t data_shift
= ns
->id_ns
.lbaf
[lba_index
].ds
;
663 uint64_t data_size
= (uint64_t)nlb
<< data_shift
;
664 uint64_t data_offset
= slba
<< data_shift
;
665 int is_write
= rw
->opcode
== NVME_CMD_WRITE
? 1 : 0;
666 enum BlockAcctType acct
= is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
;
669 trace_pci_nvme_rw(is_write
? "write" : "read", nlb
, data_size
, slba
);
671 status
= nvme_check_mdts(n
, data_size
);
673 trace_pci_nvme_err_mdts(nvme_cid(req
), data_size
);
674 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
678 status
= nvme_check_bounds(n
, ns
, slba
, nlb
);
680 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
681 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
685 if (nvme_map_dptr(n
, data_size
, req
)) {
686 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
687 return NVME_INVALID_FIELD
| NVME_DNR
;
690 if (req
->qsg
.nsg
> 0) {
691 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, req
->qsg
.size
,
693 req
->aiocb
= is_write
?
694 dma_blk_write(n
->conf
.blk
, &req
->qsg
, data_offset
, BDRV_SECTOR_SIZE
,
696 dma_blk_read(n
->conf
.blk
, &req
->qsg
, data_offset
, BDRV_SECTOR_SIZE
,
699 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, req
->iov
.size
,
701 req
->aiocb
= is_write
?
702 blk_aio_pwritev(n
->conf
.blk
, data_offset
, &req
->iov
, 0, nvme_rw_cb
,
704 blk_aio_preadv(n
->conf
.blk
, data_offset
, &req
->iov
, 0, nvme_rw_cb
,
708 return NVME_NO_COMPLETE
;
711 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
713 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
715 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
718 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
719 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
720 return NVME_INVALID_NSID
| NVME_DNR
;
723 req
->ns
= &n
->namespaces
[nsid
- 1];
724 switch (req
->cmd
.opcode
) {
726 return nvme_flush(n
, req
);
727 case NVME_CMD_WRITE_ZEROES
:
728 return nvme_write_zeroes(n
, req
);
731 return nvme_rw(n
, req
);
733 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
734 return NVME_INVALID_OPCODE
| NVME_DNR
;
738 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
740 n
->sq
[sq
->sqid
] = NULL
;
741 timer_del(sq
->timer
);
742 timer_free(sq
->timer
);
749 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
751 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
752 NvmeRequest
*r
, *next
;
755 uint16_t qid
= le16_to_cpu(c
->qid
);
757 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
758 trace_pci_nvme_err_invalid_del_sq(qid
);
759 return NVME_INVALID_QID
| NVME_DNR
;
762 trace_pci_nvme_del_sq(qid
);
765 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
766 r
= QTAILQ_FIRST(&sq
->out_req_list
);
768 blk_aio_cancel(r
->aiocb
);
770 if (!nvme_check_cqid(n
, sq
->cqid
)) {
771 cq
= n
->cq
[sq
->cqid
];
772 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
775 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
777 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
778 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
787 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
788 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
794 sq
->dma_addr
= dma_addr
;
798 sq
->head
= sq
->tail
= 0;
799 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
801 QTAILQ_INIT(&sq
->req_list
);
802 QTAILQ_INIT(&sq
->out_req_list
);
803 for (i
= 0; i
< sq
->size
; i
++) {
804 sq
->io_req
[i
].sq
= sq
;
805 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
807 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
811 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
815 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
818 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
820 uint16_t cqid
= le16_to_cpu(c
->cqid
);
821 uint16_t sqid
= le16_to_cpu(c
->sqid
);
822 uint16_t qsize
= le16_to_cpu(c
->qsize
);
823 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
824 uint64_t prp1
= le64_to_cpu(c
->prp1
);
826 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
828 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
829 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
830 return NVME_INVALID_CQID
| NVME_DNR
;
832 if (unlikely(!sqid
|| !nvme_check_sqid(n
, sqid
))) {
833 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
834 return NVME_INVALID_QID
| NVME_DNR
;
836 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
837 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
838 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
840 if (unlikely(!prp1
|| prp1
& (n
->page_size
- 1))) {
841 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
842 return NVME_INVALID_FIELD
| NVME_DNR
;
844 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
845 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
846 return NVME_INVALID_FIELD
| NVME_DNR
;
848 sq
= g_malloc0(sizeof(*sq
));
849 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
853 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
854 uint64_t off
, NvmeRequest
*req
)
856 NvmeCmd
*cmd
= &req
->cmd
;
857 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
858 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
859 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
863 uint64_t units_read
= 0, units_written
= 0;
864 uint64_t read_commands
= 0, write_commands
= 0;
868 if (nsid
&& nsid
!= 0xffffffff) {
869 return NVME_INVALID_FIELD
| NVME_DNR
;
872 s
= blk_get_stats(n
->conf
.blk
);
874 units_read
= s
->nr_bytes
[BLOCK_ACCT_READ
] >> BDRV_SECTOR_BITS
;
875 units_written
= s
->nr_bytes
[BLOCK_ACCT_WRITE
] >> BDRV_SECTOR_BITS
;
876 read_commands
= s
->nr_ops
[BLOCK_ACCT_READ
];
877 write_commands
= s
->nr_ops
[BLOCK_ACCT_WRITE
];
879 if (off
> sizeof(smart
)) {
880 return NVME_INVALID_FIELD
| NVME_DNR
;
883 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
885 memset(&smart
, 0x0, sizeof(smart
));
887 smart
.data_units_read
[0] = cpu_to_le64(DIV_ROUND_UP(units_read
, 1000));
888 smart
.data_units_written
[0] = cpu_to_le64(DIV_ROUND_UP(units_written
,
890 smart
.host_read_commands
[0] = cpu_to_le64(read_commands
);
891 smart
.host_write_commands
[0] = cpu_to_le64(write_commands
);
893 smart
.temperature
= cpu_to_le16(n
->temperature
);
895 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
896 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
897 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
900 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
901 smart
.power_on_hours
[0] =
902 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
905 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
908 return nvme_dma_prp(n
, (uint8_t *) &smart
+ off
, trans_len
, prp1
, prp2
,
909 DMA_DIRECTION_FROM_DEVICE
, req
);
912 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
916 NvmeCmd
*cmd
= &req
->cmd
;
917 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
918 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
919 NvmeFwSlotInfoLog fw_log
= {
923 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
925 if (off
> sizeof(fw_log
)) {
926 return NVME_INVALID_FIELD
| NVME_DNR
;
929 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
931 return nvme_dma_prp(n
, (uint8_t *) &fw_log
+ off
, trans_len
, prp1
, prp2
,
932 DMA_DIRECTION_FROM_DEVICE
, req
);
935 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
936 uint64_t off
, NvmeRequest
*req
)
939 NvmeCmd
*cmd
= &req
->cmd
;
940 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
941 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
945 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
948 if (off
> sizeof(errlog
)) {
949 return NVME_INVALID_FIELD
| NVME_DNR
;
952 memset(&errlog
, 0x0, sizeof(errlog
));
954 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
956 return nvme_dma_prp(n
, (uint8_t *)&errlog
, trans_len
, prp1
, prp2
,
957 DMA_DIRECTION_FROM_DEVICE
, req
);
960 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
962 NvmeCmd
*cmd
= &req
->cmd
;
964 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
965 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
966 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
967 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
968 uint8_t lid
= dw10
& 0xff;
969 uint8_t lsp
= (dw10
>> 8) & 0xf;
970 uint8_t rae
= (dw10
>> 15) & 0x1;
971 uint32_t numdl
, numdu
;
972 uint64_t off
, lpol
, lpou
;
976 numdl
= (dw10
>> 16);
977 numdu
= (dw11
& 0xffff);
981 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
982 off
= (lpou
<< 32ULL) | lpol
;
985 return NVME_INVALID_FIELD
| NVME_DNR
;
988 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
990 status
= nvme_check_mdts(n
, len
);
992 trace_pci_nvme_err_mdts(nvme_cid(req
), len
);
997 case NVME_LOG_ERROR_INFO
:
998 return nvme_error_info(n
, rae
, len
, off
, req
);
999 case NVME_LOG_SMART_INFO
:
1000 return nvme_smart_info(n
, rae
, len
, off
, req
);
1001 case NVME_LOG_FW_SLOT_INFO
:
1002 return nvme_fw_log_info(n
, len
, off
, req
);
1004 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
1005 return NVME_INVALID_FIELD
| NVME_DNR
;
1009 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
1011 n
->cq
[cq
->cqid
] = NULL
;
1012 timer_del(cq
->timer
);
1013 timer_free(cq
->timer
);
1014 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
1020 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1022 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
1024 uint16_t qid
= le16_to_cpu(c
->qid
);
1026 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
1027 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
1028 return NVME_INVALID_CQID
| NVME_DNR
;
1032 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
1033 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
1034 return NVME_INVALID_QUEUE_DEL
;
1036 nvme_irq_deassert(n
, cq
);
1037 trace_pci_nvme_del_cq(qid
);
1038 nvme_free_cq(cq
, n
);
1039 return NVME_SUCCESS
;
1042 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
1043 uint16_t cqid
, uint16_t vector
, uint16_t size
, uint16_t irq_enabled
)
1047 ret
= msix_vector_use(&n
->parent_obj
, vector
);
1052 cq
->dma_addr
= dma_addr
;
1054 cq
->irq_enabled
= irq_enabled
;
1055 cq
->vector
= vector
;
1056 cq
->head
= cq
->tail
= 0;
1057 QTAILQ_INIT(&cq
->req_list
);
1058 QTAILQ_INIT(&cq
->sq_list
);
1060 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
1063 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1066 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
1067 uint16_t cqid
= le16_to_cpu(c
->cqid
);
1068 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
1069 uint16_t qsize
= le16_to_cpu(c
->qsize
);
1070 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
1071 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1073 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
1074 NVME_CQ_FLAGS_IEN(qflags
) != 0);
1076 if (unlikely(!cqid
|| !nvme_check_cqid(n
, cqid
))) {
1077 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
1078 return NVME_INVALID_CQID
| NVME_DNR
;
1080 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
1081 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
1082 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
1084 if (unlikely(!prp1
)) {
1085 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
1086 return NVME_INVALID_FIELD
| NVME_DNR
;
1088 if (unlikely(!msix_enabled(&n
->parent_obj
) && vector
)) {
1089 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1090 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1092 if (unlikely(vector
>= n
->params
.msix_qsize
)) {
1093 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1094 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1096 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
1097 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
1098 return NVME_INVALID_FIELD
| NVME_DNR
;
1101 cq
= g_malloc0(sizeof(*cq
));
1102 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
1103 NVME_CQ_FLAGS_IEN(qflags
));
1106 * It is only required to set qs_created when creating a completion queue;
1107 * creating a submission queue without a matching completion queue will
1110 n
->qs_created
= true;
1111 return NVME_SUCCESS
;
1114 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
1116 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1117 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1118 uint64_t prp2
= le64_to_cpu(c
->prp2
);
1120 trace_pci_nvme_identify_ctrl();
1122 return nvme_dma_prp(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
), prp1
,
1123 prp2
, DMA_DIRECTION_FROM_DEVICE
, req
);
1126 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
)
1129 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1130 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1131 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1132 uint64_t prp2
= le64_to_cpu(c
->prp2
);
1134 trace_pci_nvme_identify_ns(nsid
);
1136 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
1137 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
1138 return NVME_INVALID_NSID
| NVME_DNR
;
1141 ns
= &n
->namespaces
[nsid
- 1];
1143 return nvme_dma_prp(n
, (uint8_t *)&ns
->id_ns
, sizeof(ns
->id_ns
), prp1
,
1144 prp2
, DMA_DIRECTION_FROM_DEVICE
, req
);
1147 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
)
1149 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1150 static const int data_len
= NVME_IDENTIFY_DATA_SIZE
;
1151 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
1152 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1153 uint64_t prp2
= le64_to_cpu(c
->prp2
);
1158 trace_pci_nvme_identify_nslist(min_nsid
);
1161 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
1162 * since the Active Namespace ID List should return namespaces with ids
1163 * *higher* than the NSID specified in the command. This is also specified
1164 * in the spec (NVM Express v1.3d, Section 5.15.4).
1166 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
1167 return NVME_INVALID_NSID
| NVME_DNR
;
1170 list
= g_malloc0(data_len
);
1171 for (i
= 0; i
< n
->num_namespaces
; i
++) {
1175 list
[j
++] = cpu_to_le32(i
+ 1);
1176 if (j
== data_len
/ sizeof(uint32_t)) {
1180 ret
= nvme_dma_prp(n
, (uint8_t *)list
, data_len
, prp1
, prp2
,
1181 DMA_DIRECTION_FROM_DEVICE
, req
);
1186 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
1188 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1189 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1190 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1191 uint64_t prp2
= le64_to_cpu(c
->prp2
);
1193 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
];
1202 struct data
*ns_descrs
= (struct data
*)list
;
1204 trace_pci_nvme_identify_ns_descr_list(nsid
);
1206 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
1207 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
1208 return NVME_INVALID_NSID
| NVME_DNR
;
1211 memset(list
, 0x0, sizeof(list
));
1214 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
1215 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
1216 * Namespace Identification Descriptor. Add a very basic Namespace UUID
1219 ns_descrs
->uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
1220 ns_descrs
->uuid
.hdr
.nidl
= NVME_NIDT_UUID_LEN
;
1221 stl_be_p(&ns_descrs
->uuid
.v
, nsid
);
1223 return nvme_dma_prp(n
, list
, NVME_IDENTIFY_DATA_SIZE
, prp1
, prp2
,
1224 DMA_DIRECTION_FROM_DEVICE
, req
);
1227 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
1229 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1231 switch (le32_to_cpu(c
->cns
)) {
1232 case NVME_ID_CNS_NS
:
1233 return nvme_identify_ns(n
, req
);
1234 case NVME_ID_CNS_CTRL
:
1235 return nvme_identify_ctrl(n
, req
);
1236 case NVME_ID_CNS_NS_ACTIVE_LIST
:
1237 return nvme_identify_nslist(n
, req
);
1238 case NVME_ID_CNS_NS_DESCR_LIST
:
1239 return nvme_identify_ns_descr_list(n
, req
);
1241 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
1242 return NVME_INVALID_FIELD
| NVME_DNR
;
1246 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
1248 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
1250 req
->cqe
.result
= 1;
1251 if (nvme_check_sqid(n
, sqid
)) {
1252 return NVME_INVALID_FIELD
| NVME_DNR
;
1255 return NVME_SUCCESS
;
1258 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
1260 trace_pci_nvme_setfeat_timestamp(ts
);
1262 n
->host_timestamp
= le64_to_cpu(ts
);
1263 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1266 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
1268 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1269 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
1271 union nvme_timestamp
{
1273 uint64_t timestamp
:48;
1281 union nvme_timestamp ts
;
1285 * If the sum of the Timestamp value set by the host and the elapsed
1286 * time exceeds 2^48, the value returned should be reduced modulo 2^48.
1288 ts
.timestamp
= (n
->host_timestamp
+ elapsed_time
) & 0xffffffffffff;
1290 /* If the host timestamp is non-zero, set the timestamp origin */
1291 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
1293 trace_pci_nvme_getfeat_timestamp(ts
.all
);
1295 return cpu_to_le64(ts
.all
);
1298 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
1300 NvmeCmd
*cmd
= &req
->cmd
;
1301 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
1302 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
1304 uint64_t timestamp
= nvme_get_timestamp(n
);
1306 return nvme_dma_prp(n
, (uint8_t *)×tamp
, sizeof(timestamp
), prp1
,
1307 prp2
, DMA_DIRECTION_FROM_DEVICE
, req
);
1310 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
1312 NvmeCmd
*cmd
= &req
->cmd
;
1313 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1314 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1315 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
1317 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
1318 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
1321 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
1322 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
1325 trace_pci_nvme_getfeat(nvme_cid(req
), fid
, sel
, dw11
);
1327 if (!nvme_feature_support
[fid
]) {
1328 return NVME_INVALID_FIELD
| NVME_DNR
;
1331 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
1332 if (!nsid
|| nsid
> n
->num_namespaces
) {
1334 * The Reservation Notification Mask and Reservation Persistence
1335 * features require a status code of Invalid Field in Command when
1336 * NSID is 0xFFFFFFFF. Since the device does not support those
1337 * features we can always return Invalid Namespace or Format as we
1338 * should do for all other features.
1340 return NVME_INVALID_NSID
| NVME_DNR
;
1345 case NVME_GETFEAT_SELECT_CURRENT
:
1347 case NVME_GETFEAT_SELECT_SAVED
:
1348 /* no features are saveable by the controller; fallthrough */
1349 case NVME_GETFEAT_SELECT_DEFAULT
:
1351 case NVME_GETFEAT_SELECT_CAP
:
1352 result
= nvme_feature_cap
[fid
];
1357 case NVME_TEMPERATURE_THRESHOLD
:
1361 * The controller only implements the Composite Temperature sensor, so
1362 * return 0 for all other sensors.
1364 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1368 switch (NVME_TEMP_THSEL(dw11
)) {
1369 case NVME_TEMP_THSEL_OVER
:
1370 result
= n
->features
.temp_thresh_hi
;
1372 case NVME_TEMP_THSEL_UNDER
:
1373 result
= n
->features
.temp_thresh_low
;
1377 return NVME_INVALID_FIELD
| NVME_DNR
;
1378 case NVME_VOLATILE_WRITE_CACHE
:
1379 result
= blk_enable_write_cache(n
->conf
.blk
);
1380 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
1382 case NVME_ASYNCHRONOUS_EVENT_CONF
:
1383 result
= n
->features
.async_config
;
1385 case NVME_TIMESTAMP
:
1386 return nvme_get_feature_timestamp(n
, req
);
1393 case NVME_TEMPERATURE_THRESHOLD
:
1396 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1400 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
1401 result
= NVME_TEMPERATURE_WARNING
;
1405 case NVME_NUMBER_OF_QUEUES
:
1406 result
= (n
->params
.max_ioqpairs
- 1) |
1407 ((n
->params
.max_ioqpairs
- 1) << 16);
1408 trace_pci_nvme_getfeat_numq(result
);
1410 case NVME_INTERRUPT_VECTOR_CONF
:
1412 if (iv
>= n
->params
.max_ioqpairs
+ 1) {
1413 return NVME_INVALID_FIELD
| NVME_DNR
;
1417 if (iv
== n
->admin_cq
.vector
) {
1418 result
|= NVME_INTVC_NOCOALESCING
;
1423 result
= nvme_feature_default
[fid
];
1428 req
->cqe
.result
= cpu_to_le32(result
);
1429 return NVME_SUCCESS
;
1432 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
1436 NvmeCmd
*cmd
= &req
->cmd
;
1437 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
1438 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
1440 ret
= nvme_dma_prp(n
, (uint8_t *)×tamp
, sizeof(timestamp
), prp1
,
1441 prp2
, DMA_DIRECTION_TO_DEVICE
, req
);
1442 if (ret
!= NVME_SUCCESS
) {
1446 nvme_set_timestamp(n
, timestamp
);
1448 return NVME_SUCCESS
;
1451 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
1453 NvmeCmd
*cmd
= &req
->cmd
;
1454 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1455 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1456 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
1457 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
1458 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
1460 trace_pci_nvme_setfeat(nvme_cid(req
), fid
, save
, dw11
);
1463 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
1466 if (!nvme_feature_support
[fid
]) {
1467 return NVME_INVALID_FIELD
| NVME_DNR
;
1470 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
1471 if (!nsid
|| (nsid
!= NVME_NSID_BROADCAST
&&
1472 nsid
> n
->num_namespaces
)) {
1473 return NVME_INVALID_NSID
| NVME_DNR
;
1475 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
1476 if (nsid
> n
->num_namespaces
) {
1477 return NVME_INVALID_NSID
| NVME_DNR
;
1480 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
1483 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
1484 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
1488 case NVME_TEMPERATURE_THRESHOLD
:
1489 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1493 switch (NVME_TEMP_THSEL(dw11
)) {
1494 case NVME_TEMP_THSEL_OVER
:
1495 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
1497 case NVME_TEMP_THSEL_UNDER
:
1498 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
1501 return NVME_INVALID_FIELD
| NVME_DNR
;
1504 if (((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
1505 (n
->temperature
<= n
->features
.temp_thresh_low
)) &&
1506 NVME_AEC_SMART(n
->features
.async_config
) & NVME_SMART_TEMPERATURE
) {
1507 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
,
1508 NVME_AER_INFO_SMART_TEMP_THRESH
,
1509 NVME_LOG_SMART_INFO
);
1513 case NVME_VOLATILE_WRITE_CACHE
:
1514 if (!(dw11
& 0x1) && blk_enable_write_cache(n
->conf
.blk
)) {
1515 blk_flush(n
->conf
.blk
);
1518 blk_set_enable_write_cache(n
->conf
.blk
, dw11
& 1);
1520 case NVME_NUMBER_OF_QUEUES
:
1521 if (n
->qs_created
) {
1522 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
1526 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
1529 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
1530 return NVME_INVALID_FIELD
| NVME_DNR
;
1533 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
1534 ((dw11
>> 16) & 0xFFFF) + 1,
1535 n
->params
.max_ioqpairs
,
1536 n
->params
.max_ioqpairs
);
1537 req
->cqe
.result
= cpu_to_le32((n
->params
.max_ioqpairs
- 1) |
1538 ((n
->params
.max_ioqpairs
- 1) << 16));
1540 case NVME_ASYNCHRONOUS_EVENT_CONF
:
1541 n
->features
.async_config
= dw11
;
1543 case NVME_TIMESTAMP
:
1544 return nvme_set_feature_timestamp(n
, req
);
1546 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
1548 return NVME_SUCCESS
;
1551 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
1553 trace_pci_nvme_aer(nvme_cid(req
));
1555 if (n
->outstanding_aers
> n
->params
.aerl
) {
1556 trace_pci_nvme_aer_aerl_exceeded();
1557 return NVME_AER_LIMIT_EXCEEDED
;
1560 n
->aer_reqs
[n
->outstanding_aers
] = req
;
1561 n
->outstanding_aers
++;
1563 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1564 nvme_process_aers(n
);
1567 return NVME_NO_COMPLETE
;
1570 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
1572 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
);
1574 switch (req
->cmd
.opcode
) {
1575 case NVME_ADM_CMD_DELETE_SQ
:
1576 return nvme_del_sq(n
, req
);
1577 case NVME_ADM_CMD_CREATE_SQ
:
1578 return nvme_create_sq(n
, req
);
1579 case NVME_ADM_CMD_GET_LOG_PAGE
:
1580 return nvme_get_log(n
, req
);
1581 case NVME_ADM_CMD_DELETE_CQ
:
1582 return nvme_del_cq(n
, req
);
1583 case NVME_ADM_CMD_CREATE_CQ
:
1584 return nvme_create_cq(n
, req
);
1585 case NVME_ADM_CMD_IDENTIFY
:
1586 return nvme_identify(n
, req
);
1587 case NVME_ADM_CMD_ABORT
:
1588 return nvme_abort(n
, req
);
1589 case NVME_ADM_CMD_SET_FEATURES
:
1590 return nvme_set_feature(n
, req
);
1591 case NVME_ADM_CMD_GET_FEATURES
:
1592 return nvme_get_feature(n
, req
);
1593 case NVME_ADM_CMD_ASYNC_EV_REQ
:
1594 return nvme_aer(n
, req
);
1596 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
1597 return NVME_INVALID_OPCODE
| NVME_DNR
;
1601 static void nvme_process_sq(void *opaque
)
1603 NvmeSQueue
*sq
= opaque
;
1604 NvmeCtrl
*n
= sq
->ctrl
;
1605 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
1612 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
1613 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
1614 nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
));
1615 nvme_inc_sq_head(sq
);
1617 req
= QTAILQ_FIRST(&sq
->req_list
);
1618 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
1619 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
1620 nvme_req_clear(req
);
1621 req
->cqe
.cid
= cmd
.cid
;
1622 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
1624 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
1625 nvme_admin_cmd(n
, req
);
1626 if (status
!= NVME_NO_COMPLETE
) {
1627 req
->status
= status
;
1628 nvme_enqueue_req_completion(cq
, req
);
1633 static void nvme_clear_ctrl(NvmeCtrl
*n
)
1637 blk_drain(n
->conf
.blk
);
1639 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
1640 if (n
->sq
[i
] != NULL
) {
1641 nvme_free_sq(n
->sq
[i
], n
);
1644 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
1645 if (n
->cq
[i
] != NULL
) {
1646 nvme_free_cq(n
->cq
[i
], n
);
1650 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1651 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
1652 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
1657 n
->outstanding_aers
= 0;
1658 n
->qs_created
= false;
1660 blk_flush(n
->conf
.blk
);
1664 static int nvme_start_ctrl(NvmeCtrl
*n
)
1666 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
1667 uint32_t page_size
= 1 << page_bits
;
1669 if (unlikely(n
->cq
[0])) {
1670 trace_pci_nvme_err_startfail_cq();
1673 if (unlikely(n
->sq
[0])) {
1674 trace_pci_nvme_err_startfail_sq();
1677 if (unlikely(!n
->bar
.asq
)) {
1678 trace_pci_nvme_err_startfail_nbarasq();
1681 if (unlikely(!n
->bar
.acq
)) {
1682 trace_pci_nvme_err_startfail_nbaracq();
1685 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
1686 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
1689 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
1690 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
1693 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
1694 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
1695 trace_pci_nvme_err_startfail_page_too_small(
1696 NVME_CC_MPS(n
->bar
.cc
),
1697 NVME_CAP_MPSMIN(n
->bar
.cap
));
1700 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
1701 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
1702 trace_pci_nvme_err_startfail_page_too_large(
1703 NVME_CC_MPS(n
->bar
.cc
),
1704 NVME_CAP_MPSMAX(n
->bar
.cap
));
1707 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
1708 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
1709 trace_pci_nvme_err_startfail_cqent_too_small(
1710 NVME_CC_IOCQES(n
->bar
.cc
),
1711 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
1714 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
1715 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
1716 trace_pci_nvme_err_startfail_cqent_too_large(
1717 NVME_CC_IOCQES(n
->bar
.cc
),
1718 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
1721 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
1722 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
1723 trace_pci_nvme_err_startfail_sqent_too_small(
1724 NVME_CC_IOSQES(n
->bar
.cc
),
1725 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
1728 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
1729 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
1730 trace_pci_nvme_err_startfail_sqent_too_large(
1731 NVME_CC_IOSQES(n
->bar
.cc
),
1732 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
1735 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
1736 trace_pci_nvme_err_startfail_asqent_sz_zero();
1739 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
1740 trace_pci_nvme_err_startfail_acqent_sz_zero();
1744 n
->page_bits
= page_bits
;
1745 n
->page_size
= page_size
;
1746 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
1747 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
1748 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
1749 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
1750 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
1751 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
1752 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
1754 nvme_set_timestamp(n
, 0ULL);
1756 QTAILQ_INIT(&n
->aer_queue
);
1761 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
1764 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
1765 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
1766 "MMIO write not 32-bit aligned,"
1767 " offset=0x%"PRIx64
"", offset
);
1768 /* should be ignored, fall through for now */
1771 if (unlikely(size
< sizeof(uint32_t))) {
1772 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
1773 "MMIO write smaller than 32-bits,"
1774 " offset=0x%"PRIx64
", size=%u",
1776 /* should be ignored, fall through for now */
1780 case 0xc: /* INTMS */
1781 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
1782 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
1783 "undefined access to interrupt mask set"
1784 " when MSI-X is enabled");
1785 /* should be ignored, fall through for now */
1787 n
->bar
.intms
|= data
& 0xffffffff;
1788 n
->bar
.intmc
= n
->bar
.intms
;
1789 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
1792 case 0x10: /* INTMC */
1793 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
1794 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
1795 "undefined access to interrupt mask clr"
1796 " when MSI-X is enabled");
1797 /* should be ignored, fall through for now */
1799 n
->bar
.intms
&= ~(data
& 0xffffffff);
1800 n
->bar
.intmc
= n
->bar
.intms
;
1801 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
1805 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
1806 /* Windows first sends data, then sends enable bit */
1807 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
1808 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
1813 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
1815 if (unlikely(nvme_start_ctrl(n
))) {
1816 trace_pci_nvme_err_startfail();
1817 n
->bar
.csts
= NVME_CSTS_FAILED
;
1819 trace_pci_nvme_mmio_start_success();
1820 n
->bar
.csts
= NVME_CSTS_READY
;
1822 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
1823 trace_pci_nvme_mmio_stopped();
1825 n
->bar
.csts
&= ~NVME_CSTS_READY
;
1827 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
1828 trace_pci_nvme_mmio_shutdown_set();
1831 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
1832 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
1833 trace_pci_nvme_mmio_shutdown_cleared();
1834 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
1838 case 0x1C: /* CSTS */
1839 if (data
& (1 << 4)) {
1840 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
1841 "attempted to W1C CSTS.NSSRO"
1842 " but CAP.NSSRS is zero (not supported)");
1843 } else if (data
!= 0) {
1844 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
1845 "attempted to set a read only bit"
1846 " of controller status");
1849 case 0x20: /* NSSR */
1850 if (data
== 0x4E564D65) {
1851 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
1853 /* The spec says that writes of other values have no effect */
1857 case 0x24: /* AQA */
1858 n
->bar
.aqa
= data
& 0xffffffff;
1859 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
1861 case 0x28: /* ASQ */
1863 trace_pci_nvme_mmio_asqaddr(data
);
1865 case 0x2c: /* ASQ hi */
1866 n
->bar
.asq
|= data
<< 32;
1867 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
1869 case 0x30: /* ACQ */
1870 trace_pci_nvme_mmio_acqaddr(data
);
1873 case 0x34: /* ACQ hi */
1874 n
->bar
.acq
|= data
<< 32;
1875 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
1877 case 0x38: /* CMBLOC */
1878 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
1879 "invalid write to reserved CMBLOC"
1880 " when CMBSZ is zero, ignored");
1882 case 0x3C: /* CMBSZ */
1883 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
1884 "invalid write to read only CMBSZ, ignored");
1886 case 0xE00: /* PMRCAP */
1887 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
1888 "invalid write to PMRCAP register, ignored");
1890 case 0xE04: /* TODO PMRCTL */
1892 case 0xE08: /* PMRSTS */
1893 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
1894 "invalid write to PMRSTS register, ignored");
1896 case 0xE0C: /* PMREBS */
1897 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
1898 "invalid write to PMREBS register, ignored");
1900 case 0xE10: /* PMRSWTP */
1901 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
1902 "invalid write to PMRSWTP register, ignored");
1904 case 0xE14: /* TODO PMRMSC */
1907 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
1908 "invalid MMIO write,"
1909 " offset=0x%"PRIx64
", data=%"PRIx64
"",
1915 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
1917 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1918 uint8_t *ptr
= (uint8_t *)&n
->bar
;
1921 trace_pci_nvme_mmio_read(addr
);
1923 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
1924 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
1925 "MMIO read not 32-bit aligned,"
1926 " offset=0x%"PRIx64
"", addr
);
1927 /* should RAZ, fall through for now */
1928 } else if (unlikely(size
< sizeof(uint32_t))) {
1929 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
1930 "MMIO read smaller than 32-bits,"
1931 " offset=0x%"PRIx64
"", addr
);
1932 /* should RAZ, fall through for now */
1935 if (addr
< sizeof(n
->bar
)) {
1937 * When PMRWBM bit 1 is set then read from
1938 * from PMRSTS should ensure prior writes
1939 * made it to persistent media
1941 if (addr
== 0xE08 &&
1942 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
1943 memory_region_msync(&n
->pmrdev
->mr
, 0, n
->pmrdev
->size
);
1945 memcpy(&val
, ptr
+ addr
, size
);
1947 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
1948 "MMIO read beyond last register,"
1949 " offset=0x%"PRIx64
", returning 0", addr
);
1955 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
1959 if (unlikely(addr
& ((1 << 2) - 1))) {
1960 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
1961 "doorbell write not 32-bit aligned,"
1962 " offset=0x%"PRIx64
", ignoring", addr
);
1966 if (((addr
- 0x1000) >> 2) & 1) {
1967 /* Completion queue doorbell write */
1969 uint16_t new_head
= val
& 0xffff;
1973 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
1974 if (unlikely(nvme_check_cqid(n
, qid
))) {
1975 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
1976 "completion queue doorbell write"
1977 " for nonexistent queue,"
1978 " sqid=%"PRIu32
", ignoring", qid
);
1981 * NVM Express v1.3d, Section 4.1 state: "If host software writes
1982 * an invalid value to the Submission Queue Tail Doorbell or
1983 * Completion Queue Head Doorbell regiter and an Asynchronous Event
1984 * Request command is outstanding, then an asynchronous event is
1985 * posted to the Admin Completion Queue with a status code of
1986 * Invalid Doorbell Write Value."
1988 * Also note that the spec includes the "Invalid Doorbell Register"
1989 * status code, but nowhere does it specify when to use it.
1990 * However, it seems reasonable to use it here in a similar
1993 if (n
->outstanding_aers
) {
1994 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
1995 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
1996 NVME_LOG_ERROR_INFO
);
2003 if (unlikely(new_head
>= cq
->size
)) {
2004 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
2005 "completion queue doorbell write value"
2006 " beyond queue size, sqid=%"PRIu32
","
2007 " new_head=%"PRIu16
", ignoring",
2010 if (n
->outstanding_aers
) {
2011 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2012 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2013 NVME_LOG_ERROR_INFO
);
2019 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
2021 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
2022 cq
->head
= new_head
;
2025 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
2026 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2028 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2031 if (cq
->tail
== cq
->head
) {
2032 nvme_irq_deassert(n
, cq
);
2035 /* Submission queue doorbell write */
2037 uint16_t new_tail
= val
& 0xffff;
2040 qid
= (addr
- 0x1000) >> 3;
2041 if (unlikely(nvme_check_sqid(n
, qid
))) {
2042 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
2043 "submission queue doorbell write"
2044 " for nonexistent queue,"
2045 " sqid=%"PRIu32
", ignoring", qid
);
2047 if (n
->outstanding_aers
) {
2048 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2049 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2050 NVME_LOG_ERROR_INFO
);
2057 if (unlikely(new_tail
>= sq
->size
)) {
2058 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
2059 "submission queue doorbell write value"
2060 " beyond queue size, sqid=%"PRIu32
","
2061 " new_tail=%"PRIu16
", ignoring",
2064 if (n
->outstanding_aers
) {
2065 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2066 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2067 NVME_LOG_ERROR_INFO
);
2073 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
2075 sq
->tail
= new_tail
;
2076 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2080 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
2083 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2085 trace_pci_nvme_mmio_write(addr
, data
);
2087 if (addr
< sizeof(n
->bar
)) {
2088 nvme_write_bar(n
, addr
, data
, size
);
2090 nvme_process_db(n
, addr
, data
);
2094 static const MemoryRegionOps nvme_mmio_ops
= {
2095 .read
= nvme_mmio_read
,
2096 .write
= nvme_mmio_write
,
2097 .endianness
= DEVICE_LITTLE_ENDIAN
,
2099 .min_access_size
= 2,
2100 .max_access_size
= 8,
2104 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
2107 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2108 stn_le_p(&n
->cmbuf
[addr
], size
, data
);
2111 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
2113 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2114 return ldn_le_p(&n
->cmbuf
[addr
], size
);
2117 static const MemoryRegionOps nvme_cmb_ops
= {
2118 .read
= nvme_cmb_read
,
2119 .write
= nvme_cmb_write
,
2120 .endianness
= DEVICE_LITTLE_ENDIAN
,
2122 .min_access_size
= 1,
2123 .max_access_size
= 8,
2127 static void nvme_check_constraints(NvmeCtrl
*n
, Error
**errp
)
2129 NvmeParams
*params
= &n
->params
;
2131 if (params
->num_queues
) {
2132 warn_report("num_queues is deprecated; please use max_ioqpairs "
2135 params
->max_ioqpairs
= params
->num_queues
- 1;
2138 if (params
->max_ioqpairs
< 1 ||
2139 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
2140 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
2145 if (params
->msix_qsize
< 1 ||
2146 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
2147 error_setg(errp
, "msix_qsize must be between 1 and %d",
2148 PCI_MSIX_FLAGS_QSIZE
+ 1);
2153 error_setg(errp
, "drive property not set");
2157 if (!params
->serial
) {
2158 error_setg(errp
, "serial property not set");
2162 if (!n
->params
.cmb_size_mb
&& n
->pmrdev
) {
2163 if (host_memory_backend_is_mapped(n
->pmrdev
)) {
2164 error_setg(errp
, "can't use already busy memdev: %s",
2165 object_get_canonical_path_component(OBJECT(n
->pmrdev
)));
2169 if (!is_power_of_2(n
->pmrdev
->size
)) {
2170 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
2174 host_memory_backend_set_mapped(n
->pmrdev
, true);
2178 static void nvme_init_state(NvmeCtrl
*n
)
2180 n
->num_namespaces
= 1;
2181 /* add one to max_ioqpairs to account for the admin queue pair */
2182 n
->reg_size
= pow2ceil(sizeof(NvmeBar
) +
2183 2 * (n
->params
.max_ioqpairs
+ 1) * NVME_DB_SIZE
);
2184 n
->namespaces
= g_new0(NvmeNamespace
, n
->num_namespaces
);
2185 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
2186 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
2187 n
->temperature
= NVME_TEMPERATURE
;
2188 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
2189 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
2190 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
2193 static void nvme_init_blk(NvmeCtrl
*n
, Error
**errp
)
2195 if (!blkconf_blocksizes(&n
->conf
, errp
)) {
2198 blkconf_apply_backend_options(&n
->conf
, blk_is_read_only(n
->conf
.blk
),
2202 static void nvme_init_namespace(NvmeCtrl
*n
, NvmeNamespace
*ns
, Error
**errp
)
2205 NvmeIdNs
*id_ns
= &ns
->id_ns
;
2207 bs_size
= blk_getlength(n
->conf
.blk
);
2209 error_setg_errno(errp
, -bs_size
, "could not get backing file size");
2213 n
->ns_size
= bs_size
;
2215 id_ns
->lbaf
[0].ds
= BDRV_SECTOR_BITS
;
2216 id_ns
->nsze
= cpu_to_le64(nvme_ns_nlbas(n
, ns
));
2218 /* no thin provisioning */
2219 id_ns
->ncap
= id_ns
->nsze
;
2220 id_ns
->nuse
= id_ns
->ncap
;
2223 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2225 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, NVME_CMB_BIR
);
2226 NVME_CMBLOC_SET_OFST(n
->bar
.cmbloc
, 0);
2228 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
2229 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
2230 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 1);
2231 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
2232 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
2233 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
2234 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
2236 n
->cmbuf
= g_malloc0(NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2237 memory_region_init_io(&n
->ctrl_mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
2238 "nvme-cmb", NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2239 pci_register_bar(pci_dev
, NVME_CMBLOC_BIR(n
->bar
.cmbloc
),
2240 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2241 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2242 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->ctrl_mem
);
2245 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2247 /* Controller Capabilities register */
2248 NVME_CAP_SET_PMRS(n
->bar
.cap
, 1);
2250 /* PMR Capabities register */
2252 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 0);
2253 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 0);
2254 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, NVME_PMR_BIR
);
2255 NVME_PMRCAP_SET_PMRTU(n
->bar
.pmrcap
, 0);
2256 /* Turn on bit 1 support */
2257 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
2258 NVME_PMRCAP_SET_PMRTO(n
->bar
.pmrcap
, 0);
2259 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 0);
2261 /* PMR Control register */
2263 NVME_PMRCTL_SET_EN(n
->bar
.pmrctl
, 0);
2265 /* PMR Status register */
2267 NVME_PMRSTS_SET_ERR(n
->bar
.pmrsts
, 0);
2268 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 0);
2269 NVME_PMRSTS_SET_HSTS(n
->bar
.pmrsts
, 0);
2270 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 0);
2272 /* PMR Elasticity Buffer Size register */
2274 NVME_PMREBS_SET_PMRSZU(n
->bar
.pmrebs
, 0);
2275 NVME_PMREBS_SET_RBB(n
->bar
.pmrebs
, 0);
2276 NVME_PMREBS_SET_PMRWBZ(n
->bar
.pmrebs
, 0);
2278 /* PMR Sustained Write Throughput register */
2280 NVME_PMRSWTP_SET_PMRSWTU(n
->bar
.pmrswtp
, 0);
2281 NVME_PMRSWTP_SET_PMRSWTV(n
->bar
.pmrswtp
, 0);
2283 /* PMR Memory Space Control register */
2285 NVME_PMRMSC_SET_CMSE(n
->bar
.pmrmsc
, 0);
2286 NVME_PMRMSC_SET_CBA(n
->bar
.pmrmsc
, 0);
2288 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
2289 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2290 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2291 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmrdev
->mr
);
2294 static void nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
2296 uint8_t *pci_conf
= pci_dev
->config
;
2298 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
2299 pci_config_set_prog_interface(pci_conf
, 0x2);
2300 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
2301 pcie_endpoint_cap_init(pci_dev
, 0x80);
2303 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
2305 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
2306 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->iomem
);
2307 if (msix_init_exclusive_bar(pci_dev
, n
->params
.msix_qsize
, 4, errp
)) {
2311 if (n
->params
.cmb_size_mb
) {
2312 nvme_init_cmb(n
, pci_dev
);
2313 } else if (n
->pmrdev
) {
2314 nvme_init_pmr(n
, pci_dev
);
2318 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2320 NvmeIdCtrl
*id
= &n
->id_ctrl
;
2321 uint8_t *pci_conf
= pci_dev
->config
;
2324 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
2325 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
2326 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
2327 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
2328 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
2333 id
->mdts
= n
->params
.mdts
;
2334 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
2335 id
->oacs
= cpu_to_le16(0);
2338 * Because the controller always completes the Abort command immediately,
2339 * there can never be more than one concurrently executing Abort command,
2340 * so this value is never used for anything. Note that there can easily be
2341 * many Abort commands in the queues, but they are not considered
2342 * "executing" until processed by nvme_abort.
2344 * The specification recommends a value of 3 for Abort Command Limit (four
2345 * concurrently outstanding Abort commands), so lets use that though it is
2349 id
->aerl
= n
->params
.aerl
;
2350 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
2351 id
->lpa
= NVME_LPA_EXTENDED
;
2353 /* recommended default value (~70 C) */
2354 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
2355 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
2357 id
->sqes
= (0x6 << 4) | 0x6;
2358 id
->cqes
= (0x4 << 4) | 0x4;
2359 id
->nn
= cpu_to_le32(n
->num_namespaces
);
2360 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
2361 NVME_ONCS_FEATURES
);
2363 subnqn
= g_strdup_printf("nqn.2019-08.org.qemu:%s", n
->params
.serial
);
2364 strpadcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), subnqn
, '\0');
2367 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
2368 id
->psd
[0].enlat
= cpu_to_le32(0x10);
2369 id
->psd
[0].exlat
= cpu_to_le32(0x4);
2370 if (blk_enable_write_cache(n
->conf
.blk
)) {
2375 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
2376 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
2377 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
2378 NVME_CAP_SET_CSS(n
->bar
.cap
, 1);
2379 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
2381 n
->bar
.vs
= NVME_SPEC_VER
;
2382 n
->bar
.intmc
= n
->bar
.intms
= 0;
2385 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
2387 NvmeCtrl
*n
= NVME(pci_dev
);
2388 Error
*local_err
= NULL
;
2392 nvme_check_constraints(n
, &local_err
);
2394 error_propagate(errp
, local_err
);
2399 nvme_init_blk(n
, &local_err
);
2401 error_propagate(errp
, local_err
);
2405 nvme_init_pci(n
, pci_dev
, &local_err
);
2407 error_propagate(errp
, local_err
);
2411 nvme_init_ctrl(n
, pci_dev
);
2413 for (i
= 0; i
< n
->num_namespaces
; i
++) {
2414 nvme_init_namespace(n
, &n
->namespaces
[i
], &local_err
);
2416 error_propagate(errp
, local_err
);
2422 static void nvme_exit(PCIDevice
*pci_dev
)
2424 NvmeCtrl
*n
= NVME(pci_dev
);
2427 g_free(n
->namespaces
);
2430 g_free(n
->aer_reqs
);
2432 if (n
->params
.cmb_size_mb
) {
2437 host_memory_backend_set_mapped(n
->pmrdev
, false);
2439 msix_uninit_exclusive_bar(pci_dev
);
2442 static Property nvme_props
[] = {
2443 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, conf
),
2444 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmrdev
, TYPE_MEMORY_BACKEND
,
2445 HostMemoryBackend
*),
2446 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
2447 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
2448 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
2449 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
2450 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
2451 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
2452 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
2453 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
2454 DEFINE_PROP_END_OF_LIST(),
2457 static const VMStateDescription nvme_vmstate
= {
2462 static void nvme_class_init(ObjectClass
*oc
, void *data
)
2464 DeviceClass
*dc
= DEVICE_CLASS(oc
);
2465 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
2467 pc
->realize
= nvme_realize
;
2468 pc
->exit
= nvme_exit
;
2469 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
2470 pc
->vendor_id
= PCI_VENDOR_ID_INTEL
;
2471 pc
->device_id
= 0x5845;
2474 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
2475 dc
->desc
= "Non-Volatile Memory Express";
2476 device_class_set_props(dc
, nvme_props
);
2477 dc
->vmsd
= &nvme_vmstate
;
2480 static void nvme_instance_init(Object
*obj
)
2482 NvmeCtrl
*s
= NVME(obj
);
2484 device_add_bootindex_property(obj
, &s
->conf
.bootindex
,
2485 "bootindex", "/namespace@1,0",
2489 static const TypeInfo nvme_info
= {
2491 .parent
= TYPE_PCI_DEVICE
,
2492 .instance_size
= sizeof(NvmeCtrl
),
2493 .class_init
= nvme_class_init
,
2494 .instance_init
= nvme_instance_init
,
2495 .interfaces
= (InterfaceInfo
[]) {
2496 { INTERFACE_PCIE_DEVICE
},
2501 static void nvme_register_types(void)
2503 type_register_static(&nvme_info
);
2506 type_init(nvme_register_types
)