2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * [pmrdev=<mem_backend_file_id>,] \
23 * max_ioqpairs=<N[optional]>, \
24 * aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
27 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
28 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
30 * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
31 * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
33 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
35 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
36 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
39 * nvme device parameters
40 * ~~~~~~~~~~~~~~~~~~~~~~
42 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
43 * of concurrently outstanding Asynchronous Event Request commands suppoert
44 * by the controller. This is a 0's based value.
47 * This is the maximum number of events that the device will enqueue for
48 * completion when there are no oustanding AERs. When the maximum number of
49 * enqueued events are reached, subsequent events will be dropped.
53 #include "qemu/osdep.h"
54 #include "qemu/units.h"
55 #include "qemu/error-report.h"
56 #include "hw/block/block.h"
57 #include "hw/pci/msix.h"
58 #include "hw/pci/pci.h"
59 #include "hw/qdev-properties.h"
60 #include "migration/vmstate.h"
61 #include "sysemu/sysemu.h"
62 #include "qapi/error.h"
63 #include "qapi/visitor.h"
64 #include "sysemu/hostmem.h"
65 #include "sysemu/block-backend.h"
66 #include "exec/memory.h"
68 #include "qemu/module.h"
69 #include "qemu/cutils.h"
73 #define NVME_MAX_IOQPAIRS 0xffff
74 #define NVME_DB_SIZE 4
75 #define NVME_SPEC_VER 0x00010300
76 #define NVME_CMB_BIR 2
77 #define NVME_PMR_BIR 2
78 #define NVME_TEMPERATURE 0x143
79 #define NVME_TEMPERATURE_WARNING 0x157
80 #define NVME_TEMPERATURE_CRITICAL 0x175
81 #define NVME_NUM_FW_SLOTS 1
83 #define NVME_GUEST_ERR(trace, fmt, ...) \
85 (trace_##trace)(__VA_ARGS__); \
86 qemu_log_mask(LOG_GUEST_ERROR, #trace \
87 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
90 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
91 [NVME_ARBITRATION
] = true,
92 [NVME_POWER_MANAGEMENT
] = true,
93 [NVME_TEMPERATURE_THRESHOLD
] = true,
94 [NVME_ERROR_RECOVERY
] = true,
95 [NVME_VOLATILE_WRITE_CACHE
] = true,
96 [NVME_NUMBER_OF_QUEUES
] = true,
97 [NVME_INTERRUPT_COALESCING
] = true,
98 [NVME_INTERRUPT_VECTOR_CONF
] = true,
99 [NVME_WRITE_ATOMICITY
] = true,
100 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
101 [NVME_TIMESTAMP
] = true,
104 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
105 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
106 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
107 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
108 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
109 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
112 static void nvme_process_sq(void *opaque
);
114 static uint16_t nvme_cid(NvmeRequest
*req
)
120 return le16_to_cpu(req
->cqe
.cid
);
123 static uint16_t nvme_sqid(NvmeRequest
*req
)
125 return le16_to_cpu(req
->sq
->sqid
);
128 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
130 hwaddr low
= n
->ctrl_mem
.addr
;
131 hwaddr hi
= n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
);
133 return addr
>= low
&& addr
< hi
;
136 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
138 assert(nvme_addr_is_cmb(n
, addr
));
140 return &n
->cmbuf
[addr
- n
->ctrl_mem
.addr
];
143 static int nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
145 hwaddr hi
= addr
+ size
- 1;
150 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
151 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
155 return pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
158 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
160 return sqid
< n
->params
.max_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
163 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
165 return cqid
< n
->params
.max_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
168 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
171 if (cq
->tail
>= cq
->size
) {
173 cq
->phase
= !cq
->phase
;
177 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
179 sq
->head
= (sq
->head
+ 1) % sq
->size
;
182 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
184 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
187 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
189 return sq
->head
== sq
->tail
;
192 static void nvme_irq_check(NvmeCtrl
*n
)
194 if (msix_enabled(&(n
->parent_obj
))) {
197 if (~n
->bar
.intms
& n
->irq_status
) {
198 pci_irq_assert(&n
->parent_obj
);
200 pci_irq_deassert(&n
->parent_obj
);
204 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
206 if (cq
->irq_enabled
) {
207 if (msix_enabled(&(n
->parent_obj
))) {
208 trace_pci_nvme_irq_msix(cq
->vector
);
209 msix_notify(&(n
->parent_obj
), cq
->vector
);
211 trace_pci_nvme_irq_pin();
212 assert(cq
->vector
< 32);
213 n
->irq_status
|= 1 << cq
->vector
;
217 trace_pci_nvme_irq_masked();
221 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
223 if (cq
->irq_enabled
) {
224 if (msix_enabled(&(n
->parent_obj
))) {
227 assert(cq
->vector
< 32);
228 n
->irq_status
&= ~(1 << cq
->vector
);
234 static void nvme_req_clear(NvmeRequest
*req
)
237 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
238 req
->status
= NVME_SUCCESS
;
241 static void nvme_req_exit(NvmeRequest
*req
)
244 qemu_sglist_destroy(&req
->qsg
);
248 qemu_iovec_destroy(&req
->iov
);
252 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
259 trace_pci_nvme_map_addr_cmb(addr
, len
);
261 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
262 return NVME_DATA_TRAS_ERROR
;
265 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
270 static uint16_t nvme_map_addr(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
271 hwaddr addr
, size_t len
)
277 trace_pci_nvme_map_addr(addr
, len
);
279 if (nvme_addr_is_cmb(n
, addr
)) {
280 if (qsg
&& qsg
->sg
) {
281 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
287 qemu_iovec_init(iov
, 1);
290 return nvme_map_addr_cmb(n
, iov
, addr
, len
);
293 if (iov
&& iov
->iov
) {
294 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
300 pci_dma_sglist_init(qsg
, &n
->parent_obj
, 1);
303 qemu_sglist_add(qsg
, addr
, len
);
308 static uint16_t nvme_map_prp(NvmeCtrl
*n
, uint64_t prp1
, uint64_t prp2
,
309 uint32_t len
, NvmeRequest
*req
)
311 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
312 trans_len
= MIN(len
, trans_len
);
313 int num_prps
= (len
>> n
->page_bits
) + 1;
315 bool prp_list_in_cmb
= false;
318 QEMUSGList
*qsg
= &req
->qsg
;
319 QEMUIOVector
*iov
= &req
->iov
;
321 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
323 if (unlikely(!prp1
)) {
324 trace_pci_nvme_err_invalid_prp();
325 return NVME_INVALID_FIELD
| NVME_DNR
;
328 if (nvme_addr_is_cmb(n
, prp1
)) {
329 qemu_iovec_init(iov
, num_prps
);
331 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
334 status
= nvme_map_addr(n
, qsg
, iov
, prp1
, trans_len
);
341 if (unlikely(!prp2
)) {
342 trace_pci_nvme_err_invalid_prp2_missing();
343 return NVME_INVALID_FIELD
| NVME_DNR
;
346 if (len
> n
->page_size
) {
347 uint64_t prp_list
[n
->max_prp_ents
];
348 uint32_t nents
, prp_trans
;
351 if (nvme_addr_is_cmb(n
, prp2
)) {
352 prp_list_in_cmb
= true;
355 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
356 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
357 ret
= nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
359 trace_pci_nvme_err_addr_read(prp2
);
360 return NVME_DATA_TRAS_ERROR
;
363 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
365 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
366 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
367 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
368 return NVME_INVALID_FIELD
| NVME_DNR
;
371 if (prp_list_in_cmb
!= nvme_addr_is_cmb(n
, prp_ent
)) {
372 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
376 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
377 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
378 ret
= nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
381 trace_pci_nvme_err_addr_read(prp_ent
);
382 return NVME_DATA_TRAS_ERROR
;
384 prp_ent
= le64_to_cpu(prp_list
[i
]);
387 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
388 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
389 return NVME_INVALID_FIELD
| NVME_DNR
;
392 trans_len
= MIN(len
, n
->page_size
);
393 status
= nvme_map_addr(n
, qsg
, iov
, prp_ent
, trans_len
);
402 if (unlikely(prp2
& (n
->page_size
- 1))) {
403 trace_pci_nvme_err_invalid_prp2_align(prp2
);
404 return NVME_INVALID_FIELD
| NVME_DNR
;
406 status
= nvme_map_addr(n
, qsg
, iov
, prp2
, len
);
417 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
418 * number of bytes mapped in len.
420 static uint16_t nvme_map_sgl_data(NvmeCtrl
*n
, QEMUSGList
*qsg
,
422 NvmeSglDescriptor
*segment
, uint64_t nsgld
,
423 size_t *len
, NvmeRequest
*req
)
425 dma_addr_t addr
, trans_len
;
429 for (int i
= 0; i
< nsgld
; i
++) {
430 uint8_t type
= NVME_SGL_TYPE(segment
[i
].type
);
433 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
434 if (req
->cmd
.opcode
== NVME_CMD_WRITE
) {
437 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
439 case NVME_SGL_DESCR_TYPE_SEGMENT
:
440 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
441 return NVME_INVALID_NUM_SGL_DESCRS
| NVME_DNR
;
443 return NVME_SGL_DESCR_TYPE_INVALID
| NVME_DNR
;
446 dlen
= le32_to_cpu(segment
[i
].len
);
454 * All data has been mapped, but the SGL contains additional
455 * segments and/or descriptors. The controller might accept
456 * ignoring the rest of the SGL.
458 uint16_t sgls
= le16_to_cpu(n
->id_ctrl
.sgls
);
459 if (sgls
& NVME_CTRL_SGLS_EXCESS_LENGTH
) {
463 trace_pci_nvme_err_invalid_sgl_excess_length(nvme_cid(req
));
464 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
467 trans_len
= MIN(*len
, dlen
);
469 if (type
== NVME_SGL_DESCR_TYPE_BIT_BUCKET
) {
473 addr
= le64_to_cpu(segment
[i
].addr
);
475 if (UINT64_MAX
- addr
< dlen
) {
476 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
479 status
= nvme_map_addr(n
, qsg
, iov
, addr
, trans_len
);
491 static uint16_t nvme_map_sgl(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
492 NvmeSglDescriptor sgl
, size_t len
,
496 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
497 * dynamically allocating a potentially huge SGL. The spec allows the SGL
498 * to be larger (as in number of bytes required to describe the SGL
499 * descriptors and segment chain) than the command transfer size, so it is
500 * not bounded by MDTS.
502 const int SEG_CHUNK_SIZE
= 256;
504 NvmeSglDescriptor segment
[SEG_CHUNK_SIZE
], *sgld
, *last_sgld
;
508 bool sgl_in_cmb
= false;
513 addr
= le64_to_cpu(sgl
.addr
);
515 trace_pci_nvme_map_sgl(nvme_cid(req
), NVME_SGL_TYPE(sgl
.type
), len
);
518 * If the entire transfer can be described with a single data block it can
519 * be mapped directly.
521 if (NVME_SGL_TYPE(sgl
.type
) == NVME_SGL_DESCR_TYPE_DATA_BLOCK
) {
522 status
= nvme_map_sgl_data(n
, qsg
, iov
, sgld
, 1, &len
, req
);
531 * If the segment is located in the CMB, the submission queue of the
532 * request must also reside there.
534 if (nvme_addr_is_cmb(n
, addr
)) {
535 if (!nvme_addr_is_cmb(n
, req
->sq
->dma_addr
)) {
536 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
543 switch (NVME_SGL_TYPE(sgld
->type
)) {
544 case NVME_SGL_DESCR_TYPE_SEGMENT
:
545 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
548 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
551 seg_len
= le32_to_cpu(sgld
->len
);
553 /* check the length of the (Last) Segment descriptor */
554 if ((!seg_len
|| seg_len
& 0xf) &&
555 (NVME_SGL_TYPE(sgld
->type
) != NVME_SGL_DESCR_TYPE_BIT_BUCKET
)) {
556 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
559 if (UINT64_MAX
- addr
< seg_len
) {
560 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
563 nsgld
= seg_len
/ sizeof(NvmeSglDescriptor
);
565 while (nsgld
> SEG_CHUNK_SIZE
) {
566 if (nvme_addr_read(n
, addr
, segment
, sizeof(segment
))) {
567 trace_pci_nvme_err_addr_read(addr
);
568 status
= NVME_DATA_TRAS_ERROR
;
572 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, SEG_CHUNK_SIZE
,
578 nsgld
-= SEG_CHUNK_SIZE
;
579 addr
+= SEG_CHUNK_SIZE
* sizeof(NvmeSglDescriptor
);
582 ret
= nvme_addr_read(n
, addr
, segment
, nsgld
*
583 sizeof(NvmeSglDescriptor
));
585 trace_pci_nvme_err_addr_read(addr
);
586 status
= NVME_DATA_TRAS_ERROR
;
590 last_sgld
= &segment
[nsgld
- 1];
593 * If the segment ends with a Data Block or Bit Bucket Descriptor Type,
596 switch (NVME_SGL_TYPE(last_sgld
->type
)) {
597 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
598 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
599 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
, &len
, req
);
611 * If the last descriptor was not a Data Block or Bit Bucket, then the
612 * current segment must not be a Last Segment.
614 if (NVME_SGL_TYPE(sgld
->type
) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT
) {
615 status
= NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
620 addr
= le64_to_cpu(sgld
->addr
);
623 * Do not map the last descriptor; it will be a Segment or Last Segment
624 * descriptor and is handled by the next iteration.
626 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
- 1, &len
, req
);
632 * If the next segment is in the CMB, make sure that the sgl was
633 * already located there.
635 if (sgl_in_cmb
!= nvme_addr_is_cmb(n
, addr
)) {
636 status
= NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
642 /* if there is any residual left in len, the SGL was too short */
644 status
= NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
652 qemu_iovec_destroy(iov
);
656 qemu_sglist_destroy(qsg
);
662 static uint16_t nvme_map_dptr(NvmeCtrl
*n
, size_t len
, NvmeRequest
*req
)
666 switch (NVME_CMD_FLAGS_PSDT(req
->cmd
.flags
)) {
668 prp1
= le64_to_cpu(req
->cmd
.dptr
.prp1
);
669 prp2
= le64_to_cpu(req
->cmd
.dptr
.prp2
);
671 return nvme_map_prp(n
, prp1
, prp2
, len
, req
);
672 case NVME_PSDT_SGL_MPTR_CONTIGUOUS
:
673 case NVME_PSDT_SGL_MPTR_SGL
:
674 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
675 if (!req
->sq
->sqid
) {
676 return NVME_INVALID_FIELD
| NVME_DNR
;
679 return nvme_map_sgl(n
, &req
->qsg
, &req
->iov
, req
->cmd
.dptr
.sgl
, len
,
682 return NVME_INVALID_FIELD
;
686 static uint16_t nvme_dma(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
687 DMADirection dir
, NvmeRequest
*req
)
689 uint16_t status
= NVME_SUCCESS
;
691 status
= nvme_map_dptr(n
, len
, req
);
696 /* assert that only one of qsg and iov carries data */
697 assert((req
->qsg
.nsg
> 0) != (req
->iov
.niov
> 0));
699 if (req
->qsg
.nsg
> 0) {
702 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
703 residual
= dma_buf_write(ptr
, len
, &req
->qsg
);
705 residual
= dma_buf_read(ptr
, len
, &req
->qsg
);
708 if (unlikely(residual
)) {
709 trace_pci_nvme_err_invalid_dma();
710 status
= NVME_INVALID_FIELD
| NVME_DNR
;
715 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
716 bytes
= qemu_iovec_to_buf(&req
->iov
, 0, ptr
, len
);
718 bytes
= qemu_iovec_from_buf(&req
->iov
, 0, ptr
, len
);
721 if (unlikely(bytes
!= len
)) {
722 trace_pci_nvme_err_invalid_dma();
723 status
= NVME_INVALID_FIELD
| NVME_DNR
;
730 static void nvme_post_cqes(void *opaque
)
732 NvmeCQueue
*cq
= opaque
;
733 NvmeCtrl
*n
= cq
->ctrl
;
734 NvmeRequest
*req
, *next
;
737 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
741 if (nvme_cq_full(cq
)) {
746 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
747 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
748 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
749 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
750 ret
= pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
753 trace_pci_nvme_err_addr_write(addr
);
754 trace_pci_nvme_err_cfs();
755 n
->bar
.csts
= NVME_CSTS_FAILED
;
758 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
759 nvme_inc_cq_tail(cq
);
761 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
763 if (cq
->tail
!= cq
->head
) {
764 nvme_irq_assert(n
, cq
);
768 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
770 assert(cq
->cqid
== req
->sq
->cqid
);
771 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
773 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
774 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
775 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
778 static void nvme_process_aers(void *opaque
)
780 NvmeCtrl
*n
= opaque
;
781 NvmeAsyncEvent
*event
, *next
;
783 trace_pci_nvme_process_aers(n
->aer_queued
);
785 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
787 NvmeAerResult
*result
;
789 /* can't post cqe if there is nothing to complete */
790 if (!n
->outstanding_aers
) {
791 trace_pci_nvme_no_outstanding_aers();
795 /* ignore if masked (cqe posted, but event not cleared) */
796 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
797 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
801 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
804 n
->aer_mask
|= 1 << event
->result
.event_type
;
805 n
->outstanding_aers
--;
807 req
= n
->aer_reqs
[n
->outstanding_aers
];
809 result
= (NvmeAerResult
*) &req
->cqe
.result
;
810 result
->event_type
= event
->result
.event_type
;
811 result
->event_info
= event
->result
.event_info
;
812 result
->log_page
= event
->result
.log_page
;
815 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
818 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
822 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
823 uint8_t event_info
, uint8_t log_page
)
825 NvmeAsyncEvent
*event
;
827 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
829 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
830 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
834 event
= g_new(NvmeAsyncEvent
, 1);
835 event
->result
= (NvmeAerResult
) {
836 .event_type
= event_type
,
837 .event_info
= event_info
,
838 .log_page
= log_page
,
841 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
844 nvme_process_aers(n
);
847 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
849 n
->aer_mask
&= ~(1 << event_type
);
850 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
851 nvme_process_aers(n
);
855 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
857 uint8_t mdts
= n
->params
.mdts
;
859 if (mdts
&& len
> n
->page_size
<< mdts
) {
860 return NVME_INVALID_FIELD
| NVME_DNR
;
866 static inline uint16_t nvme_check_bounds(NvmeCtrl
*n
, NvmeNamespace
*ns
,
867 uint64_t slba
, uint32_t nlb
)
869 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
871 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
872 return NVME_LBA_RANGE
| NVME_DNR
;
878 static void nvme_rw_cb(void *opaque
, int ret
)
880 NvmeRequest
*req
= opaque
;
881 NvmeCtrl
*n
= nvme_ctrl(req
);
883 BlockBackend
*blk
= n
->conf
.blk
;
884 BlockAcctCookie
*acct
= &req
->acct
;
885 BlockAcctStats
*stats
= blk_get_stats(blk
);
887 Error
*local_err
= NULL
;
889 trace_pci_nvme_rw_cb(nvme_cid(req
), blk_name(blk
));
892 block_acct_done(stats
, acct
);
896 block_acct_failed(stats
, acct
);
898 switch (req
->cmd
.opcode
) {
900 status
= NVME_UNRECOVERED_READ
;
904 case NVME_CMD_WRITE_ZEROES
:
905 status
= NVME_WRITE_FAULT
;
908 status
= NVME_INTERNAL_DEV_ERROR
;
912 trace_pci_nvme_err_aio(nvme_cid(req
), strerror(ret
), status
);
914 error_setg_errno(&local_err
, -ret
, "aio failed");
915 error_report_err(local_err
);
917 req
->status
= status
;
920 nvme_enqueue_req_completion(nvme_cq(req
), req
);
923 static uint16_t nvme_do_aio(BlockBackend
*blk
, int64_t offset
, size_t len
,
926 BlockAcctCookie
*acct
= &req
->acct
;
927 BlockAcctStats
*stats
= blk_get_stats(blk
);
929 bool is_write
= false;
931 trace_pci_nvme_do_aio(nvme_cid(req
), req
->cmd
.opcode
,
932 nvme_io_opc_str(req
->cmd
.opcode
), blk_name(blk
),
935 switch (req
->cmd
.opcode
) {
937 block_acct_start(stats
, acct
, 0, BLOCK_ACCT_FLUSH
);
938 req
->aiocb
= blk_aio_flush(blk
, nvme_rw_cb
, req
);
941 case NVME_CMD_WRITE_ZEROES
:
942 block_acct_start(stats
, acct
, len
, BLOCK_ACCT_WRITE
);
943 req
->aiocb
= blk_aio_pwrite_zeroes(blk
, offset
, len
,
944 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
,
954 block_acct_start(stats
, acct
, len
,
955 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
);
959 req
->aiocb
= dma_blk_write(blk
, &req
->qsg
, offset
,
960 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
962 req
->aiocb
= dma_blk_read(blk
, &req
->qsg
, offset
,
963 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
967 req
->aiocb
= blk_aio_pwritev(blk
, offset
, &req
->iov
, 0,
970 req
->aiocb
= blk_aio_preadv(blk
, offset
, &req
->iov
, 0,
978 return NVME_NO_COMPLETE
;
981 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
983 return nvme_do_aio(n
->conf
.blk
, 0, 0, req
);
986 static uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
988 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
989 NvmeNamespace
*ns
= req
->ns
;
990 uint64_t slba
= le64_to_cpu(rw
->slba
);
991 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
992 uint64_t offset
= nvme_l2b(ns
, slba
);
993 uint32_t count
= nvme_l2b(ns
, nlb
);
996 trace_pci_nvme_write_zeroes(nvme_cid(req
), slba
, nlb
);
998 status
= nvme_check_bounds(n
, ns
, slba
, nlb
);
1000 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1004 return nvme_do_aio(n
->conf
.blk
, offset
, count
, req
);
1007 static uint16_t nvme_rw(NvmeCtrl
*n
, NvmeRequest
*req
)
1009 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1010 NvmeNamespace
*ns
= req
->ns
;
1011 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
1012 uint64_t slba
= le64_to_cpu(rw
->slba
);
1014 uint64_t data_size
= nvme_l2b(ns
, nlb
);
1015 uint64_t data_offset
= nvme_l2b(ns
, slba
);
1016 enum BlockAcctType acct
= req
->cmd
.opcode
== NVME_CMD_WRITE
?
1017 BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
;
1020 trace_pci_nvme_rw(nvme_cid(req
), nvme_io_opc_str(rw
->opcode
), nlb
,
1023 status
= nvme_check_mdts(n
, data_size
);
1025 trace_pci_nvme_err_mdts(nvme_cid(req
), data_size
);
1029 status
= nvme_check_bounds(n
, ns
, slba
, nlb
);
1031 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1035 status
= nvme_map_dptr(n
, data_size
, req
);
1040 return nvme_do_aio(n
->conf
.blk
, data_offset
, data_size
, req
);
1043 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
1047 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
1049 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
1051 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
1052 req
->cmd
.opcode
, nvme_io_opc_str(req
->cmd
.opcode
));
1054 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
1055 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
1056 return NVME_INVALID_NSID
| NVME_DNR
;
1059 req
->ns
= &n
->namespaces
[nsid
- 1];
1060 switch (req
->cmd
.opcode
) {
1061 case NVME_CMD_FLUSH
:
1062 return nvme_flush(n
, req
);
1063 case NVME_CMD_WRITE_ZEROES
:
1064 return nvme_write_zeroes(n
, req
);
1065 case NVME_CMD_WRITE
:
1067 return nvme_rw(n
, req
);
1069 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
1070 return NVME_INVALID_OPCODE
| NVME_DNR
;
1074 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
1076 n
->sq
[sq
->sqid
] = NULL
;
1077 timer_del(sq
->timer
);
1078 timer_free(sq
->timer
);
1085 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
1087 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
1088 NvmeRequest
*r
, *next
;
1091 uint16_t qid
= le16_to_cpu(c
->qid
);
1093 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
1094 trace_pci_nvme_err_invalid_del_sq(qid
);
1095 return NVME_INVALID_QID
| NVME_DNR
;
1098 trace_pci_nvme_del_sq(qid
);
1101 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
1102 r
= QTAILQ_FIRST(&sq
->out_req_list
);
1104 blk_aio_cancel(r
->aiocb
);
1106 if (!nvme_check_cqid(n
, sq
->cqid
)) {
1107 cq
= n
->cq
[sq
->cqid
];
1108 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
1111 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
1113 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
1114 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
1119 nvme_free_sq(sq
, n
);
1120 return NVME_SUCCESS
;
1123 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
1124 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
1130 sq
->dma_addr
= dma_addr
;
1134 sq
->head
= sq
->tail
= 0;
1135 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
1137 QTAILQ_INIT(&sq
->req_list
);
1138 QTAILQ_INIT(&sq
->out_req_list
);
1139 for (i
= 0; i
< sq
->size
; i
++) {
1140 sq
->io_req
[i
].sq
= sq
;
1141 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
1143 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
1145 assert(n
->cq
[cqid
]);
1147 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
1151 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
1154 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
1156 uint16_t cqid
= le16_to_cpu(c
->cqid
);
1157 uint16_t sqid
= le16_to_cpu(c
->sqid
);
1158 uint16_t qsize
= le16_to_cpu(c
->qsize
);
1159 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
1160 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1162 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
1164 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
1165 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
1166 return NVME_INVALID_CQID
| NVME_DNR
;
1168 if (unlikely(!sqid
|| !nvme_check_sqid(n
, sqid
))) {
1169 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
1170 return NVME_INVALID_QID
| NVME_DNR
;
1172 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
1173 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
1174 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
1176 if (unlikely(!prp1
|| prp1
& (n
->page_size
- 1))) {
1177 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
1178 return NVME_INVALID_FIELD
| NVME_DNR
;
1180 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
1181 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
1182 return NVME_INVALID_FIELD
| NVME_DNR
;
1184 sq
= g_malloc0(sizeof(*sq
));
1185 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
1186 return NVME_SUCCESS
;
1189 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
1190 uint64_t off
, NvmeRequest
*req
)
1192 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
1196 uint64_t units_read
= 0, units_written
= 0;
1197 uint64_t read_commands
= 0, write_commands
= 0;
1201 if (nsid
&& nsid
!= 0xffffffff) {
1202 return NVME_INVALID_FIELD
| NVME_DNR
;
1205 s
= blk_get_stats(n
->conf
.blk
);
1207 units_read
= s
->nr_bytes
[BLOCK_ACCT_READ
] >> BDRV_SECTOR_BITS
;
1208 units_written
= s
->nr_bytes
[BLOCK_ACCT_WRITE
] >> BDRV_SECTOR_BITS
;
1209 read_commands
= s
->nr_ops
[BLOCK_ACCT_READ
];
1210 write_commands
= s
->nr_ops
[BLOCK_ACCT_WRITE
];
1212 if (off
> sizeof(smart
)) {
1213 return NVME_INVALID_FIELD
| NVME_DNR
;
1216 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
1218 memset(&smart
, 0x0, sizeof(smart
));
1220 smart
.data_units_read
[0] = cpu_to_le64(DIV_ROUND_UP(units_read
, 1000));
1221 smart
.data_units_written
[0] = cpu_to_le64(DIV_ROUND_UP(units_written
,
1223 smart
.host_read_commands
[0] = cpu_to_le64(read_commands
);
1224 smart
.host_write_commands
[0] = cpu_to_le64(write_commands
);
1226 smart
.temperature
= cpu_to_le16(n
->temperature
);
1228 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
1229 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
1230 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
1233 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1234 smart
.power_on_hours
[0] =
1235 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
1238 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
1241 return nvme_dma(n
, (uint8_t *) &smart
+ off
, trans_len
,
1242 DMA_DIRECTION_FROM_DEVICE
, req
);
1245 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
1249 NvmeFwSlotInfoLog fw_log
= {
1253 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
1255 if (off
> sizeof(fw_log
)) {
1256 return NVME_INVALID_FIELD
| NVME_DNR
;
1259 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
1261 return nvme_dma(n
, (uint8_t *) &fw_log
+ off
, trans_len
,
1262 DMA_DIRECTION_FROM_DEVICE
, req
);
1265 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
1266 uint64_t off
, NvmeRequest
*req
)
1269 NvmeErrorLog errlog
;
1272 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
1275 if (off
> sizeof(errlog
)) {
1276 return NVME_INVALID_FIELD
| NVME_DNR
;
1279 memset(&errlog
, 0x0, sizeof(errlog
));
1281 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
1283 return nvme_dma(n
, (uint8_t *)&errlog
, trans_len
,
1284 DMA_DIRECTION_FROM_DEVICE
, req
);
1287 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
1289 NvmeCmd
*cmd
= &req
->cmd
;
1291 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1292 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1293 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
1294 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
1295 uint8_t lid
= dw10
& 0xff;
1296 uint8_t lsp
= (dw10
>> 8) & 0xf;
1297 uint8_t rae
= (dw10
>> 15) & 0x1;
1298 uint32_t numdl
, numdu
;
1299 uint64_t off
, lpol
, lpou
;
1303 numdl
= (dw10
>> 16);
1304 numdu
= (dw11
& 0xffff);
1308 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
1309 off
= (lpou
<< 32ULL) | lpol
;
1312 return NVME_INVALID_FIELD
| NVME_DNR
;
1315 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
1317 status
= nvme_check_mdts(n
, len
);
1319 trace_pci_nvme_err_mdts(nvme_cid(req
), len
);
1324 case NVME_LOG_ERROR_INFO
:
1325 return nvme_error_info(n
, rae
, len
, off
, req
);
1326 case NVME_LOG_SMART_INFO
:
1327 return nvme_smart_info(n
, rae
, len
, off
, req
);
1328 case NVME_LOG_FW_SLOT_INFO
:
1329 return nvme_fw_log_info(n
, len
, off
, req
);
1331 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
1332 return NVME_INVALID_FIELD
| NVME_DNR
;
1336 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
1338 n
->cq
[cq
->cqid
] = NULL
;
1339 timer_del(cq
->timer
);
1340 timer_free(cq
->timer
);
1341 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
1347 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1349 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
1351 uint16_t qid
= le16_to_cpu(c
->qid
);
1353 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
1354 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
1355 return NVME_INVALID_CQID
| NVME_DNR
;
1359 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
1360 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
1361 return NVME_INVALID_QUEUE_DEL
;
1363 nvme_irq_deassert(n
, cq
);
1364 trace_pci_nvme_del_cq(qid
);
1365 nvme_free_cq(cq
, n
);
1366 return NVME_SUCCESS
;
1369 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
1370 uint16_t cqid
, uint16_t vector
, uint16_t size
,
1371 uint16_t irq_enabled
)
1375 ret
= msix_vector_use(&n
->parent_obj
, vector
);
1380 cq
->dma_addr
= dma_addr
;
1382 cq
->irq_enabled
= irq_enabled
;
1383 cq
->vector
= vector
;
1384 cq
->head
= cq
->tail
= 0;
1385 QTAILQ_INIT(&cq
->req_list
);
1386 QTAILQ_INIT(&cq
->sq_list
);
1388 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
1391 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1394 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
1395 uint16_t cqid
= le16_to_cpu(c
->cqid
);
1396 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
1397 uint16_t qsize
= le16_to_cpu(c
->qsize
);
1398 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
1399 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1401 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
1402 NVME_CQ_FLAGS_IEN(qflags
) != 0);
1404 if (unlikely(!cqid
|| !nvme_check_cqid(n
, cqid
))) {
1405 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
1406 return NVME_INVALID_CQID
| NVME_DNR
;
1408 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
1409 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
1410 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
1412 if (unlikely(!prp1
)) {
1413 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
1414 return NVME_INVALID_FIELD
| NVME_DNR
;
1416 if (unlikely(!msix_enabled(&n
->parent_obj
) && vector
)) {
1417 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1418 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1420 if (unlikely(vector
>= n
->params
.msix_qsize
)) {
1421 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1422 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1424 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
1425 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
1426 return NVME_INVALID_FIELD
| NVME_DNR
;
1429 cq
= g_malloc0(sizeof(*cq
));
1430 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
1431 NVME_CQ_FLAGS_IEN(qflags
));
1434 * It is only required to set qs_created when creating a completion queue;
1435 * creating a submission queue without a matching completion queue will
1438 n
->qs_created
= true;
1439 return NVME_SUCCESS
;
1442 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
1444 trace_pci_nvme_identify_ctrl();
1446 return nvme_dma(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
),
1447 DMA_DIRECTION_FROM_DEVICE
, req
);
1450 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
)
1453 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1454 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1456 trace_pci_nvme_identify_ns(nsid
);
1458 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
1459 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
1460 return NVME_INVALID_NSID
| NVME_DNR
;
1463 ns
= &n
->namespaces
[nsid
- 1];
1465 return nvme_dma(n
, (uint8_t *)&ns
->id_ns
, sizeof(ns
->id_ns
),
1466 DMA_DIRECTION_FROM_DEVICE
, req
);
1469 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
)
1471 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1472 static const int data_len
= NVME_IDENTIFY_DATA_SIZE
;
1473 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
1478 trace_pci_nvme_identify_nslist(min_nsid
);
1481 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
1482 * since the Active Namespace ID List should return namespaces with ids
1483 * *higher* than the NSID specified in the command. This is also specified
1484 * in the spec (NVM Express v1.3d, Section 5.15.4).
1486 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
1487 return NVME_INVALID_NSID
| NVME_DNR
;
1490 list
= g_malloc0(data_len
);
1491 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
1492 if (i
<= min_nsid
) {
1495 list
[j
++] = cpu_to_le32(i
);
1496 if (j
== data_len
/ sizeof(uint32_t)) {
1500 ret
= nvme_dma(n
, (uint8_t *)list
, data_len
, DMA_DIRECTION_FROM_DEVICE
,
1506 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
1508 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1509 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1511 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
];
1520 struct data
*ns_descrs
= (struct data
*)list
;
1522 trace_pci_nvme_identify_ns_descr_list(nsid
);
1524 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
1525 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
1526 return NVME_INVALID_NSID
| NVME_DNR
;
1529 memset(list
, 0x0, sizeof(list
));
1532 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
1533 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
1534 * Namespace Identification Descriptor. Add a very basic Namespace UUID
1537 ns_descrs
->uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
1538 ns_descrs
->uuid
.hdr
.nidl
= NVME_NIDT_UUID_LEN
;
1539 stl_be_p(&ns_descrs
->uuid
.v
, nsid
);
1541 return nvme_dma(n
, list
, NVME_IDENTIFY_DATA_SIZE
,
1542 DMA_DIRECTION_FROM_DEVICE
, req
);
1545 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
1547 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1549 switch (le32_to_cpu(c
->cns
)) {
1550 case NVME_ID_CNS_NS
:
1551 return nvme_identify_ns(n
, req
);
1552 case NVME_ID_CNS_CTRL
:
1553 return nvme_identify_ctrl(n
, req
);
1554 case NVME_ID_CNS_NS_ACTIVE_LIST
:
1555 return nvme_identify_nslist(n
, req
);
1556 case NVME_ID_CNS_NS_DESCR_LIST
:
1557 return nvme_identify_ns_descr_list(n
, req
);
1559 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
1560 return NVME_INVALID_FIELD
| NVME_DNR
;
1564 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
1566 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
1568 req
->cqe
.result
= 1;
1569 if (nvme_check_sqid(n
, sqid
)) {
1570 return NVME_INVALID_FIELD
| NVME_DNR
;
1573 return NVME_SUCCESS
;
1576 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
1578 trace_pci_nvme_setfeat_timestamp(ts
);
1580 n
->host_timestamp
= le64_to_cpu(ts
);
1581 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1584 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
1586 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1587 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
1589 union nvme_timestamp
{
1591 uint64_t timestamp
:48;
1599 union nvme_timestamp ts
;
1601 ts
.timestamp
= n
->host_timestamp
+ elapsed_time
;
1603 /* If the host timestamp is non-zero, set the timestamp origin */
1604 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
1606 trace_pci_nvme_getfeat_timestamp(ts
.all
);
1608 return cpu_to_le64(ts
.all
);
1611 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
1613 uint64_t timestamp
= nvme_get_timestamp(n
);
1615 return nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
1616 DMA_DIRECTION_FROM_DEVICE
, req
);
1619 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
1621 NvmeCmd
*cmd
= &req
->cmd
;
1622 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1623 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1624 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
1626 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
1627 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
1630 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
1631 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
1634 trace_pci_nvme_getfeat(nvme_cid(req
), fid
, sel
, dw11
);
1636 if (!nvme_feature_support
[fid
]) {
1637 return NVME_INVALID_FIELD
| NVME_DNR
;
1640 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
1641 if (!nsid
|| nsid
> n
->num_namespaces
) {
1643 * The Reservation Notification Mask and Reservation Persistence
1644 * features require a status code of Invalid Field in Command when
1645 * NSID is 0xFFFFFFFF. Since the device does not support those
1646 * features we can always return Invalid Namespace or Format as we
1647 * should do for all other features.
1649 return NVME_INVALID_NSID
| NVME_DNR
;
1654 case NVME_GETFEAT_SELECT_CURRENT
:
1656 case NVME_GETFEAT_SELECT_SAVED
:
1657 /* no features are saveable by the controller; fallthrough */
1658 case NVME_GETFEAT_SELECT_DEFAULT
:
1660 case NVME_GETFEAT_SELECT_CAP
:
1661 result
= nvme_feature_cap
[fid
];
1666 case NVME_TEMPERATURE_THRESHOLD
:
1670 * The controller only implements the Composite Temperature sensor, so
1671 * return 0 for all other sensors.
1673 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1677 switch (NVME_TEMP_THSEL(dw11
)) {
1678 case NVME_TEMP_THSEL_OVER
:
1679 result
= n
->features
.temp_thresh_hi
;
1681 case NVME_TEMP_THSEL_UNDER
:
1682 result
= n
->features
.temp_thresh_low
;
1686 return NVME_INVALID_FIELD
| NVME_DNR
;
1687 case NVME_VOLATILE_WRITE_CACHE
:
1688 result
= blk_enable_write_cache(n
->conf
.blk
);
1689 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
1691 case NVME_ASYNCHRONOUS_EVENT_CONF
:
1692 result
= n
->features
.async_config
;
1694 case NVME_TIMESTAMP
:
1695 return nvme_get_feature_timestamp(n
, req
);
1702 case NVME_TEMPERATURE_THRESHOLD
:
1705 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1709 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
1710 result
= NVME_TEMPERATURE_WARNING
;
1714 case NVME_NUMBER_OF_QUEUES
:
1715 result
= (n
->params
.max_ioqpairs
- 1) |
1716 ((n
->params
.max_ioqpairs
- 1) << 16);
1717 trace_pci_nvme_getfeat_numq(result
);
1719 case NVME_INTERRUPT_VECTOR_CONF
:
1721 if (iv
>= n
->params
.max_ioqpairs
+ 1) {
1722 return NVME_INVALID_FIELD
| NVME_DNR
;
1726 if (iv
== n
->admin_cq
.vector
) {
1727 result
|= NVME_INTVC_NOCOALESCING
;
1732 result
= nvme_feature_default
[fid
];
1737 req
->cqe
.result
= cpu_to_le32(result
);
1738 return NVME_SUCCESS
;
1741 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
1746 ret
= nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
1747 DMA_DIRECTION_TO_DEVICE
, req
);
1748 if (ret
!= NVME_SUCCESS
) {
1752 nvme_set_timestamp(n
, timestamp
);
1754 return NVME_SUCCESS
;
1757 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
1759 NvmeCmd
*cmd
= &req
->cmd
;
1760 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1761 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1762 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
1763 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
1764 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
1766 trace_pci_nvme_setfeat(nvme_cid(req
), fid
, save
, dw11
);
1769 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
1772 if (!nvme_feature_support
[fid
]) {
1773 return NVME_INVALID_FIELD
| NVME_DNR
;
1776 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
1777 if (!nsid
|| (nsid
!= NVME_NSID_BROADCAST
&&
1778 nsid
> n
->num_namespaces
)) {
1779 return NVME_INVALID_NSID
| NVME_DNR
;
1781 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
1782 if (nsid
> n
->num_namespaces
) {
1783 return NVME_INVALID_NSID
| NVME_DNR
;
1786 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
1789 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
1790 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
1794 case NVME_TEMPERATURE_THRESHOLD
:
1795 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1799 switch (NVME_TEMP_THSEL(dw11
)) {
1800 case NVME_TEMP_THSEL_OVER
:
1801 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
1803 case NVME_TEMP_THSEL_UNDER
:
1804 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
1807 return NVME_INVALID_FIELD
| NVME_DNR
;
1810 if (((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
1811 (n
->temperature
<= n
->features
.temp_thresh_low
)) &&
1812 NVME_AEC_SMART(n
->features
.async_config
) & NVME_SMART_TEMPERATURE
) {
1813 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
,
1814 NVME_AER_INFO_SMART_TEMP_THRESH
,
1815 NVME_LOG_SMART_INFO
);
1819 case NVME_VOLATILE_WRITE_CACHE
:
1820 if (!(dw11
& 0x1) && blk_enable_write_cache(n
->conf
.blk
)) {
1821 blk_flush(n
->conf
.blk
);
1824 blk_set_enable_write_cache(n
->conf
.blk
, dw11
& 1);
1826 case NVME_NUMBER_OF_QUEUES
:
1827 if (n
->qs_created
) {
1828 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
1832 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
1835 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
1836 return NVME_INVALID_FIELD
| NVME_DNR
;
1839 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
1840 ((dw11
>> 16) & 0xFFFF) + 1,
1841 n
->params
.max_ioqpairs
,
1842 n
->params
.max_ioqpairs
);
1843 req
->cqe
.result
= cpu_to_le32((n
->params
.max_ioqpairs
- 1) |
1844 ((n
->params
.max_ioqpairs
- 1) << 16));
1846 case NVME_ASYNCHRONOUS_EVENT_CONF
:
1847 n
->features
.async_config
= dw11
;
1849 case NVME_TIMESTAMP
:
1850 return nvme_set_feature_timestamp(n
, req
);
1852 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
1854 return NVME_SUCCESS
;
1857 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
1859 trace_pci_nvme_aer(nvme_cid(req
));
1861 if (n
->outstanding_aers
> n
->params
.aerl
) {
1862 trace_pci_nvme_aer_aerl_exceeded();
1863 return NVME_AER_LIMIT_EXCEEDED
;
1866 n
->aer_reqs
[n
->outstanding_aers
] = req
;
1867 n
->outstanding_aers
++;
1869 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1870 nvme_process_aers(n
);
1873 return NVME_NO_COMPLETE
;
1876 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
1878 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
,
1879 nvme_adm_opc_str(req
->cmd
.opcode
));
1881 switch (req
->cmd
.opcode
) {
1882 case NVME_ADM_CMD_DELETE_SQ
:
1883 return nvme_del_sq(n
, req
);
1884 case NVME_ADM_CMD_CREATE_SQ
:
1885 return nvme_create_sq(n
, req
);
1886 case NVME_ADM_CMD_GET_LOG_PAGE
:
1887 return nvme_get_log(n
, req
);
1888 case NVME_ADM_CMD_DELETE_CQ
:
1889 return nvme_del_cq(n
, req
);
1890 case NVME_ADM_CMD_CREATE_CQ
:
1891 return nvme_create_cq(n
, req
);
1892 case NVME_ADM_CMD_IDENTIFY
:
1893 return nvme_identify(n
, req
);
1894 case NVME_ADM_CMD_ABORT
:
1895 return nvme_abort(n
, req
);
1896 case NVME_ADM_CMD_SET_FEATURES
:
1897 return nvme_set_feature(n
, req
);
1898 case NVME_ADM_CMD_GET_FEATURES
:
1899 return nvme_get_feature(n
, req
);
1900 case NVME_ADM_CMD_ASYNC_EV_REQ
:
1901 return nvme_aer(n
, req
);
1903 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
1904 return NVME_INVALID_OPCODE
| NVME_DNR
;
1908 static void nvme_process_sq(void *opaque
)
1910 NvmeSQueue
*sq
= opaque
;
1911 NvmeCtrl
*n
= sq
->ctrl
;
1912 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
1919 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
1920 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
1921 if (nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
))) {
1922 trace_pci_nvme_err_addr_read(addr
);
1923 trace_pci_nvme_err_cfs();
1924 n
->bar
.csts
= NVME_CSTS_FAILED
;
1927 nvme_inc_sq_head(sq
);
1929 req
= QTAILQ_FIRST(&sq
->req_list
);
1930 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
1931 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
1932 nvme_req_clear(req
);
1933 req
->cqe
.cid
= cmd
.cid
;
1934 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
1936 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
1937 nvme_admin_cmd(n
, req
);
1938 if (status
!= NVME_NO_COMPLETE
) {
1939 req
->status
= status
;
1940 nvme_enqueue_req_completion(cq
, req
);
1945 static void nvme_clear_ctrl(NvmeCtrl
*n
)
1949 blk_drain(n
->conf
.blk
);
1951 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
1952 if (n
->sq
[i
] != NULL
) {
1953 nvme_free_sq(n
->sq
[i
], n
);
1956 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
1957 if (n
->cq
[i
] != NULL
) {
1958 nvme_free_cq(n
->cq
[i
], n
);
1962 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1963 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
1964 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
1969 n
->outstanding_aers
= 0;
1970 n
->qs_created
= false;
1972 blk_flush(n
->conf
.blk
);
1976 static int nvme_start_ctrl(NvmeCtrl
*n
)
1978 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
1979 uint32_t page_size
= 1 << page_bits
;
1981 if (unlikely(n
->cq
[0])) {
1982 trace_pci_nvme_err_startfail_cq();
1985 if (unlikely(n
->sq
[0])) {
1986 trace_pci_nvme_err_startfail_sq();
1989 if (unlikely(!n
->bar
.asq
)) {
1990 trace_pci_nvme_err_startfail_nbarasq();
1993 if (unlikely(!n
->bar
.acq
)) {
1994 trace_pci_nvme_err_startfail_nbaracq();
1997 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
1998 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
2001 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
2002 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
2005 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
2006 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
2007 trace_pci_nvme_err_startfail_page_too_small(
2008 NVME_CC_MPS(n
->bar
.cc
),
2009 NVME_CAP_MPSMIN(n
->bar
.cap
));
2012 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
2013 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
2014 trace_pci_nvme_err_startfail_page_too_large(
2015 NVME_CC_MPS(n
->bar
.cc
),
2016 NVME_CAP_MPSMAX(n
->bar
.cap
));
2019 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
2020 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
2021 trace_pci_nvme_err_startfail_cqent_too_small(
2022 NVME_CC_IOCQES(n
->bar
.cc
),
2023 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
2026 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
2027 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
2028 trace_pci_nvme_err_startfail_cqent_too_large(
2029 NVME_CC_IOCQES(n
->bar
.cc
),
2030 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
2033 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
2034 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
2035 trace_pci_nvme_err_startfail_sqent_too_small(
2036 NVME_CC_IOSQES(n
->bar
.cc
),
2037 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
2040 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
2041 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
2042 trace_pci_nvme_err_startfail_sqent_too_large(
2043 NVME_CC_IOSQES(n
->bar
.cc
),
2044 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
2047 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
2048 trace_pci_nvme_err_startfail_asqent_sz_zero();
2051 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
2052 trace_pci_nvme_err_startfail_acqent_sz_zero();
2056 n
->page_bits
= page_bits
;
2057 n
->page_size
= page_size
;
2058 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
2059 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
2060 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
2061 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
2062 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
2063 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
2064 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
2066 nvme_set_timestamp(n
, 0ULL);
2068 QTAILQ_INIT(&n
->aer_queue
);
2073 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
2076 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
2077 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
2078 "MMIO write not 32-bit aligned,"
2079 " offset=0x%"PRIx64
"", offset
);
2080 /* should be ignored, fall through for now */
2083 if (unlikely(size
< sizeof(uint32_t))) {
2084 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
2085 "MMIO write smaller than 32-bits,"
2086 " offset=0x%"PRIx64
", size=%u",
2088 /* should be ignored, fall through for now */
2092 case 0xc: /* INTMS */
2093 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
2094 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
2095 "undefined access to interrupt mask set"
2096 " when MSI-X is enabled");
2097 /* should be ignored, fall through for now */
2099 n
->bar
.intms
|= data
& 0xffffffff;
2100 n
->bar
.intmc
= n
->bar
.intms
;
2101 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
2104 case 0x10: /* INTMC */
2105 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
2106 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
2107 "undefined access to interrupt mask clr"
2108 " when MSI-X is enabled");
2109 /* should be ignored, fall through for now */
2111 n
->bar
.intms
&= ~(data
& 0xffffffff);
2112 n
->bar
.intmc
= n
->bar
.intms
;
2113 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
2117 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
2118 /* Windows first sends data, then sends enable bit */
2119 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
2120 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
2125 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
2127 if (unlikely(nvme_start_ctrl(n
))) {
2128 trace_pci_nvme_err_startfail();
2129 n
->bar
.csts
= NVME_CSTS_FAILED
;
2131 trace_pci_nvme_mmio_start_success();
2132 n
->bar
.csts
= NVME_CSTS_READY
;
2134 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
2135 trace_pci_nvme_mmio_stopped();
2137 n
->bar
.csts
&= ~NVME_CSTS_READY
;
2139 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
2140 trace_pci_nvme_mmio_shutdown_set();
2143 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
2144 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
2145 trace_pci_nvme_mmio_shutdown_cleared();
2146 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
2150 case 0x1C: /* CSTS */
2151 if (data
& (1 << 4)) {
2152 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
2153 "attempted to W1C CSTS.NSSRO"
2154 " but CAP.NSSRS is zero (not supported)");
2155 } else if (data
!= 0) {
2156 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
2157 "attempted to set a read only bit"
2158 " of controller status");
2161 case 0x20: /* NSSR */
2162 if (data
== 0x4E564D65) {
2163 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
2165 /* The spec says that writes of other values have no effect */
2169 case 0x24: /* AQA */
2170 n
->bar
.aqa
= data
& 0xffffffff;
2171 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
2173 case 0x28: /* ASQ */
2175 trace_pci_nvme_mmio_asqaddr(data
);
2177 case 0x2c: /* ASQ hi */
2178 n
->bar
.asq
|= data
<< 32;
2179 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
2181 case 0x30: /* ACQ */
2182 trace_pci_nvme_mmio_acqaddr(data
);
2185 case 0x34: /* ACQ hi */
2186 n
->bar
.acq
|= data
<< 32;
2187 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
2189 case 0x38: /* CMBLOC */
2190 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
2191 "invalid write to reserved CMBLOC"
2192 " when CMBSZ is zero, ignored");
2194 case 0x3C: /* CMBSZ */
2195 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
2196 "invalid write to read only CMBSZ, ignored");
2198 case 0xE00: /* PMRCAP */
2199 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
2200 "invalid write to PMRCAP register, ignored");
2202 case 0xE04: /* TODO PMRCTL */
2204 case 0xE08: /* PMRSTS */
2205 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
2206 "invalid write to PMRSTS register, ignored");
2208 case 0xE0C: /* PMREBS */
2209 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
2210 "invalid write to PMREBS register, ignored");
2212 case 0xE10: /* PMRSWTP */
2213 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
2214 "invalid write to PMRSWTP register, ignored");
2216 case 0xE14: /* TODO PMRMSC */
2219 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
2220 "invalid MMIO write,"
2221 " offset=0x%"PRIx64
", data=%"PRIx64
"",
2227 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
2229 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2230 uint8_t *ptr
= (uint8_t *)&n
->bar
;
2233 trace_pci_nvme_mmio_read(addr
);
2235 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
2236 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
2237 "MMIO read not 32-bit aligned,"
2238 " offset=0x%"PRIx64
"", addr
);
2239 /* should RAZ, fall through for now */
2240 } else if (unlikely(size
< sizeof(uint32_t))) {
2241 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
2242 "MMIO read smaller than 32-bits,"
2243 " offset=0x%"PRIx64
"", addr
);
2244 /* should RAZ, fall through for now */
2247 if (addr
< sizeof(n
->bar
)) {
2249 * When PMRWBM bit 1 is set then read from
2250 * from PMRSTS should ensure prior writes
2251 * made it to persistent media
2253 if (addr
== 0xE08 &&
2254 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
2255 memory_region_msync(&n
->pmrdev
->mr
, 0, n
->pmrdev
->size
);
2257 memcpy(&val
, ptr
+ addr
, size
);
2259 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
2260 "MMIO read beyond last register,"
2261 " offset=0x%"PRIx64
", returning 0", addr
);
2267 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
2271 if (unlikely(addr
& ((1 << 2) - 1))) {
2272 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
2273 "doorbell write not 32-bit aligned,"
2274 " offset=0x%"PRIx64
", ignoring", addr
);
2278 if (((addr
- 0x1000) >> 2) & 1) {
2279 /* Completion queue doorbell write */
2281 uint16_t new_head
= val
& 0xffff;
2285 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
2286 if (unlikely(nvme_check_cqid(n
, qid
))) {
2287 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
2288 "completion queue doorbell write"
2289 " for nonexistent queue,"
2290 " sqid=%"PRIu32
", ignoring", qid
);
2293 * NVM Express v1.3d, Section 4.1 state: "If host software writes
2294 * an invalid value to the Submission Queue Tail Doorbell or
2295 * Completion Queue Head Doorbell regiter and an Asynchronous Event
2296 * Request command is outstanding, then an asynchronous event is
2297 * posted to the Admin Completion Queue with a status code of
2298 * Invalid Doorbell Write Value."
2300 * Also note that the spec includes the "Invalid Doorbell Register"
2301 * status code, but nowhere does it specify when to use it.
2302 * However, it seems reasonable to use it here in a similar
2305 if (n
->outstanding_aers
) {
2306 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2307 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2308 NVME_LOG_ERROR_INFO
);
2315 if (unlikely(new_head
>= cq
->size
)) {
2316 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
2317 "completion queue doorbell write value"
2318 " beyond queue size, sqid=%"PRIu32
","
2319 " new_head=%"PRIu16
", ignoring",
2322 if (n
->outstanding_aers
) {
2323 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2324 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2325 NVME_LOG_ERROR_INFO
);
2331 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
2333 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
2334 cq
->head
= new_head
;
2337 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
2338 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2340 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2343 if (cq
->tail
== cq
->head
) {
2344 nvme_irq_deassert(n
, cq
);
2347 /* Submission queue doorbell write */
2349 uint16_t new_tail
= val
& 0xffff;
2352 qid
= (addr
- 0x1000) >> 3;
2353 if (unlikely(nvme_check_sqid(n
, qid
))) {
2354 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
2355 "submission queue doorbell write"
2356 " for nonexistent queue,"
2357 " sqid=%"PRIu32
", ignoring", qid
);
2359 if (n
->outstanding_aers
) {
2360 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2361 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2362 NVME_LOG_ERROR_INFO
);
2369 if (unlikely(new_tail
>= sq
->size
)) {
2370 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
2371 "submission queue doorbell write value"
2372 " beyond queue size, sqid=%"PRIu32
","
2373 " new_tail=%"PRIu16
", ignoring",
2376 if (n
->outstanding_aers
) {
2377 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2378 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2379 NVME_LOG_ERROR_INFO
);
2385 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
2387 sq
->tail
= new_tail
;
2388 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2392 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
2395 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2397 trace_pci_nvme_mmio_write(addr
, data
);
2399 if (addr
< sizeof(n
->bar
)) {
2400 nvme_write_bar(n
, addr
, data
, size
);
2402 nvme_process_db(n
, addr
, data
);
2406 static const MemoryRegionOps nvme_mmio_ops
= {
2407 .read
= nvme_mmio_read
,
2408 .write
= nvme_mmio_write
,
2409 .endianness
= DEVICE_LITTLE_ENDIAN
,
2411 .min_access_size
= 2,
2412 .max_access_size
= 8,
2416 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
2419 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2420 stn_le_p(&n
->cmbuf
[addr
], size
, data
);
2423 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
2425 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2426 return ldn_le_p(&n
->cmbuf
[addr
], size
);
2429 static const MemoryRegionOps nvme_cmb_ops
= {
2430 .read
= nvme_cmb_read
,
2431 .write
= nvme_cmb_write
,
2432 .endianness
= DEVICE_LITTLE_ENDIAN
,
2434 .min_access_size
= 1,
2435 .max_access_size
= 8,
2439 static void nvme_check_constraints(NvmeCtrl
*n
, Error
**errp
)
2441 NvmeParams
*params
= &n
->params
;
2443 if (params
->num_queues
) {
2444 warn_report("num_queues is deprecated; please use max_ioqpairs "
2447 params
->max_ioqpairs
= params
->num_queues
- 1;
2450 if (params
->max_ioqpairs
< 1 ||
2451 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
2452 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
2457 if (params
->msix_qsize
< 1 ||
2458 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
2459 error_setg(errp
, "msix_qsize must be between 1 and %d",
2460 PCI_MSIX_FLAGS_QSIZE
+ 1);
2465 error_setg(errp
, "drive property not set");
2469 if (!params
->serial
) {
2470 error_setg(errp
, "serial property not set");
2474 if (!n
->params
.cmb_size_mb
&& n
->pmrdev
) {
2475 if (host_memory_backend_is_mapped(n
->pmrdev
)) {
2476 error_setg(errp
, "can't use already busy memdev: %s",
2477 object_get_canonical_path_component(OBJECT(n
->pmrdev
)));
2481 if (!is_power_of_2(n
->pmrdev
->size
)) {
2482 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
2486 host_memory_backend_set_mapped(n
->pmrdev
, true);
2490 static void nvme_init_state(NvmeCtrl
*n
)
2492 n
->num_namespaces
= 1;
2493 /* add one to max_ioqpairs to account for the admin queue pair */
2494 n
->reg_size
= pow2ceil(sizeof(NvmeBar
) +
2495 2 * (n
->params
.max_ioqpairs
+ 1) * NVME_DB_SIZE
);
2496 n
->namespaces
= g_new0(NvmeNamespace
, n
->num_namespaces
);
2497 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
2498 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
2499 n
->temperature
= NVME_TEMPERATURE
;
2500 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
2501 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
2502 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
2505 static void nvme_init_blk(NvmeCtrl
*n
, Error
**errp
)
2507 if (!blkconf_blocksizes(&n
->conf
, errp
)) {
2510 blkconf_apply_backend_options(&n
->conf
, blk_is_read_only(n
->conf
.blk
),
2514 static void nvme_init_namespace(NvmeCtrl
*n
, NvmeNamespace
*ns
, Error
**errp
)
2517 NvmeIdNs
*id_ns
= &ns
->id_ns
;
2519 bs_size
= blk_getlength(n
->conf
.blk
);
2521 error_setg_errno(errp
, -bs_size
, "could not get backing file size");
2525 n
->ns_size
= bs_size
;
2527 id_ns
->lbaf
[0].ds
= BDRV_SECTOR_BITS
;
2528 id_ns
->nsze
= cpu_to_le64(nvme_ns_nlbas(n
, ns
));
2530 /* no thin provisioning */
2531 id_ns
->ncap
= id_ns
->nsze
;
2532 id_ns
->nuse
= id_ns
->ncap
;
2535 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2537 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, NVME_CMB_BIR
);
2538 NVME_CMBLOC_SET_OFST(n
->bar
.cmbloc
, 0);
2540 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
2541 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
2542 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 1);
2543 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
2544 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
2545 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
2546 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
2548 n
->cmbuf
= g_malloc0(NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2549 memory_region_init_io(&n
->ctrl_mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
2550 "nvme-cmb", NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2551 pci_register_bar(pci_dev
, NVME_CMBLOC_BIR(n
->bar
.cmbloc
),
2552 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2553 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2554 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->ctrl_mem
);
2557 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2559 /* Controller Capabilities register */
2560 NVME_CAP_SET_PMRS(n
->bar
.cap
, 1);
2562 /* PMR Capabities register */
2564 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 0);
2565 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 0);
2566 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, NVME_PMR_BIR
);
2567 NVME_PMRCAP_SET_PMRTU(n
->bar
.pmrcap
, 0);
2568 /* Turn on bit 1 support */
2569 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
2570 NVME_PMRCAP_SET_PMRTO(n
->bar
.pmrcap
, 0);
2571 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 0);
2573 /* PMR Control register */
2575 NVME_PMRCTL_SET_EN(n
->bar
.pmrctl
, 0);
2577 /* PMR Status register */
2579 NVME_PMRSTS_SET_ERR(n
->bar
.pmrsts
, 0);
2580 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 0);
2581 NVME_PMRSTS_SET_HSTS(n
->bar
.pmrsts
, 0);
2582 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 0);
2584 /* PMR Elasticity Buffer Size register */
2586 NVME_PMREBS_SET_PMRSZU(n
->bar
.pmrebs
, 0);
2587 NVME_PMREBS_SET_RBB(n
->bar
.pmrebs
, 0);
2588 NVME_PMREBS_SET_PMRWBZ(n
->bar
.pmrebs
, 0);
2590 /* PMR Sustained Write Throughput register */
2592 NVME_PMRSWTP_SET_PMRSWTU(n
->bar
.pmrswtp
, 0);
2593 NVME_PMRSWTP_SET_PMRSWTV(n
->bar
.pmrswtp
, 0);
2595 /* PMR Memory Space Control register */
2597 NVME_PMRMSC_SET_CMSE(n
->bar
.pmrmsc
, 0);
2598 NVME_PMRMSC_SET_CBA(n
->bar
.pmrmsc
, 0);
2600 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
2601 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2602 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2603 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmrdev
->mr
);
2606 static void nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
2608 uint8_t *pci_conf
= pci_dev
->config
;
2610 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
2611 pci_config_set_prog_interface(pci_conf
, 0x2);
2612 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
2613 pcie_endpoint_cap_init(pci_dev
, 0x80);
2615 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
2617 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
2618 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->iomem
);
2619 if (msix_init_exclusive_bar(pci_dev
, n
->params
.msix_qsize
, 4, errp
)) {
2623 if (n
->params
.cmb_size_mb
) {
2624 nvme_init_cmb(n
, pci_dev
);
2625 } else if (n
->pmrdev
) {
2626 nvme_init_pmr(n
, pci_dev
);
2630 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2632 NvmeIdCtrl
*id
= &n
->id_ctrl
;
2633 uint8_t *pci_conf
= pci_dev
->config
;
2636 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
2637 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
2638 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
2639 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
2640 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
2645 id
->mdts
= n
->params
.mdts
;
2646 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
2647 id
->oacs
= cpu_to_le16(0);
2650 * Because the controller always completes the Abort command immediately,
2651 * there can never be more than one concurrently executing Abort command,
2652 * so this value is never used for anything. Note that there can easily be
2653 * many Abort commands in the queues, but they are not considered
2654 * "executing" until processed by nvme_abort.
2656 * The specification recommends a value of 3 for Abort Command Limit (four
2657 * concurrently outstanding Abort commands), so lets use that though it is
2661 id
->aerl
= n
->params
.aerl
;
2662 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
2663 id
->lpa
= NVME_LPA_EXTENDED
;
2665 /* recommended default value (~70 C) */
2666 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
2667 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
2669 id
->sqes
= (0x6 << 4) | 0x6;
2670 id
->cqes
= (0x4 << 4) | 0x4;
2671 id
->nn
= cpu_to_le32(n
->num_namespaces
);
2672 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
2673 NVME_ONCS_FEATURES
);
2674 id
->sgls
= cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN
|
2675 NVME_CTRL_SGLS_BITBUCKET
);
2677 subnqn
= g_strdup_printf("nqn.2019-08.org.qemu:%s", n
->params
.serial
);
2678 strpadcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), subnqn
, '\0');
2681 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
2682 id
->psd
[0].enlat
= cpu_to_le32(0x10);
2683 id
->psd
[0].exlat
= cpu_to_le32(0x4);
2684 if (blk_enable_write_cache(n
->conf
.blk
)) {
2689 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
2690 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
2691 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
2692 NVME_CAP_SET_CSS(n
->bar
.cap
, 1);
2693 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
2695 n
->bar
.vs
= NVME_SPEC_VER
;
2696 n
->bar
.intmc
= n
->bar
.intms
= 0;
2699 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
2701 NvmeCtrl
*n
= NVME(pci_dev
);
2702 Error
*local_err
= NULL
;
2706 nvme_check_constraints(n
, &local_err
);
2708 error_propagate(errp
, local_err
);
2713 nvme_init_blk(n
, &local_err
);
2715 error_propagate(errp
, local_err
);
2719 nvme_init_pci(n
, pci_dev
, &local_err
);
2721 error_propagate(errp
, local_err
);
2725 nvme_init_ctrl(n
, pci_dev
);
2727 for (i
= 0; i
< n
->num_namespaces
; i
++) {
2728 nvme_init_namespace(n
, &n
->namespaces
[i
], &local_err
);
2730 error_propagate(errp
, local_err
);
2736 static void nvme_exit(PCIDevice
*pci_dev
)
2738 NvmeCtrl
*n
= NVME(pci_dev
);
2741 g_free(n
->namespaces
);
2744 g_free(n
->aer_reqs
);
2746 if (n
->params
.cmb_size_mb
) {
2751 host_memory_backend_set_mapped(n
->pmrdev
, false);
2753 msix_uninit_exclusive_bar(pci_dev
);
2756 static Property nvme_props
[] = {
2757 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, conf
),
2758 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmrdev
, TYPE_MEMORY_BACKEND
,
2759 HostMemoryBackend
*),
2760 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
2761 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
2762 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
2763 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
2764 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
2765 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
2766 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
2767 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
2768 DEFINE_PROP_END_OF_LIST(),
2771 static const VMStateDescription nvme_vmstate
= {
2776 static void nvme_class_init(ObjectClass
*oc
, void *data
)
2778 DeviceClass
*dc
= DEVICE_CLASS(oc
);
2779 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
2781 pc
->realize
= nvme_realize
;
2782 pc
->exit
= nvme_exit
;
2783 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
2784 pc
->vendor_id
= PCI_VENDOR_ID_INTEL
;
2785 pc
->device_id
= 0x5845;
2788 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
2789 dc
->desc
= "Non-Volatile Memory Express";
2790 device_class_set_props(dc
, nvme_props
);
2791 dc
->vmsd
= &nvme_vmstate
;
2794 static void nvme_instance_init(Object
*obj
)
2796 NvmeCtrl
*s
= NVME(obj
);
2798 device_add_bootindex_property(obj
, &s
->conf
.bootindex
,
2799 "bootindex", "/namespace@1,0",
2803 static const TypeInfo nvme_info
= {
2805 .parent
= TYPE_PCI_DEVICE
,
2806 .instance_size
= sizeof(NvmeCtrl
),
2807 .class_init
= nvme_class_init
,
2808 .instance_init
= nvme_instance_init
,
2809 .interfaces
= (InterfaceInfo
[]) {
2810 { INTERFACE_PCIE_DEVICE
},
2815 static void nvme_register_types(void)
2817 type_register_static(&nvme_info
);
2820 type_init(nvme_register_types
)