2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * [pmrdev=<mem_backend_file_id>,] \
23 * max_ioqpairs=<N[optional]>, \
24 * aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
27 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
28 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
30 * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
31 * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
33 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
35 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
36 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
39 * nvme device parameters
40 * ~~~~~~~~~~~~~~~~~~~~~~
42 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
43 * of concurrently outstanding Asynchronous Event Request commands suppoert
44 * by the controller. This is a 0's based value.
47 * This is the maximum number of events that the device will enqueue for
48 * completion when there are no oustanding AERs. When the maximum number of
49 * enqueued events are reached, subsequent events will be dropped.
53 #include "qemu/osdep.h"
54 #include "qemu/units.h"
55 #include "qemu/error-report.h"
56 #include "hw/block/block.h"
57 #include "hw/pci/msix.h"
58 #include "hw/pci/pci.h"
59 #include "hw/qdev-properties.h"
60 #include "migration/vmstate.h"
61 #include "sysemu/sysemu.h"
62 #include "qapi/error.h"
63 #include "qapi/visitor.h"
64 #include "sysemu/hostmem.h"
65 #include "sysemu/block-backend.h"
66 #include "exec/memory.h"
68 #include "qemu/module.h"
69 #include "qemu/cutils.h"
73 #define NVME_MAX_IOQPAIRS 0xffff
74 #define NVME_DB_SIZE 4
75 #define NVME_SPEC_VER 0x00010300
76 #define NVME_CMB_BIR 2
77 #define NVME_PMR_BIR 2
78 #define NVME_TEMPERATURE 0x143
79 #define NVME_TEMPERATURE_WARNING 0x157
80 #define NVME_TEMPERATURE_CRITICAL 0x175
81 #define NVME_NUM_FW_SLOTS 1
83 #define NVME_GUEST_ERR(trace, fmt, ...) \
85 (trace_##trace)(__VA_ARGS__); \
86 qemu_log_mask(LOG_GUEST_ERROR, #trace \
87 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
90 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
91 [NVME_ARBITRATION
] = true,
92 [NVME_POWER_MANAGEMENT
] = true,
93 [NVME_TEMPERATURE_THRESHOLD
] = true,
94 [NVME_ERROR_RECOVERY
] = true,
95 [NVME_VOLATILE_WRITE_CACHE
] = true,
96 [NVME_NUMBER_OF_QUEUES
] = true,
97 [NVME_INTERRUPT_COALESCING
] = true,
98 [NVME_INTERRUPT_VECTOR_CONF
] = true,
99 [NVME_WRITE_ATOMICITY
] = true,
100 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
101 [NVME_TIMESTAMP
] = true,
104 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
105 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
106 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
107 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
108 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
109 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
112 static void nvme_process_sq(void *opaque
);
114 static uint16_t nvme_cid(NvmeRequest
*req
)
120 return le16_to_cpu(req
->cqe
.cid
);
123 static uint16_t nvme_sqid(NvmeRequest
*req
)
125 return le16_to_cpu(req
->sq
->sqid
);
128 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
130 hwaddr low
= n
->ctrl_mem
.addr
;
131 hwaddr hi
= n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
);
133 return addr
>= low
&& addr
< hi
;
136 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
138 assert(nvme_addr_is_cmb(n
, addr
));
140 return &n
->cmbuf
[addr
- n
->ctrl_mem
.addr
];
143 static int nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
145 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
)) {
146 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
150 return pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
153 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
155 return sqid
< n
->params
.max_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
158 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
160 return cqid
< n
->params
.max_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
163 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
166 if (cq
->tail
>= cq
->size
) {
168 cq
->phase
= !cq
->phase
;
172 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
174 sq
->head
= (sq
->head
+ 1) % sq
->size
;
177 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
179 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
182 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
184 return sq
->head
== sq
->tail
;
187 static void nvme_irq_check(NvmeCtrl
*n
)
189 if (msix_enabled(&(n
->parent_obj
))) {
192 if (~n
->bar
.intms
& n
->irq_status
) {
193 pci_irq_assert(&n
->parent_obj
);
195 pci_irq_deassert(&n
->parent_obj
);
199 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
201 if (cq
->irq_enabled
) {
202 if (msix_enabled(&(n
->parent_obj
))) {
203 trace_pci_nvme_irq_msix(cq
->vector
);
204 msix_notify(&(n
->parent_obj
), cq
->vector
);
206 trace_pci_nvme_irq_pin();
207 assert(cq
->vector
< 32);
208 n
->irq_status
|= 1 << cq
->vector
;
212 trace_pci_nvme_irq_masked();
216 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
218 if (cq
->irq_enabled
) {
219 if (msix_enabled(&(n
->parent_obj
))) {
222 assert(cq
->vector
< 32);
223 n
->irq_status
&= ~(1 << cq
->vector
);
229 static void nvme_req_clear(NvmeRequest
*req
)
232 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
235 static void nvme_req_exit(NvmeRequest
*req
)
238 qemu_sglist_destroy(&req
->qsg
);
242 qemu_iovec_destroy(&req
->iov
);
246 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
253 trace_pci_nvme_map_addr_cmb(addr
, len
);
255 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
256 return NVME_DATA_TRAS_ERROR
;
259 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
264 static uint16_t nvme_map_addr(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
265 hwaddr addr
, size_t len
)
271 trace_pci_nvme_map_addr(addr
, len
);
273 if (nvme_addr_is_cmb(n
, addr
)) {
274 if (qsg
&& qsg
->sg
) {
275 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
281 qemu_iovec_init(iov
, 1);
284 return nvme_map_addr_cmb(n
, iov
, addr
, len
);
287 if (iov
&& iov
->iov
) {
288 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
294 pci_dma_sglist_init(qsg
, &n
->parent_obj
, 1);
297 qemu_sglist_add(qsg
, addr
, len
);
302 static uint16_t nvme_map_prp(NvmeCtrl
*n
, uint64_t prp1
, uint64_t prp2
,
303 uint32_t len
, NvmeRequest
*req
)
305 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
306 trans_len
= MIN(len
, trans_len
);
307 int num_prps
= (len
>> n
->page_bits
) + 1;
309 bool prp_list_in_cmb
= false;
312 QEMUSGList
*qsg
= &req
->qsg
;
313 QEMUIOVector
*iov
= &req
->iov
;
315 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
317 if (unlikely(!prp1
)) {
318 trace_pci_nvme_err_invalid_prp();
319 return NVME_INVALID_FIELD
| NVME_DNR
;
322 if (nvme_addr_is_cmb(n
, prp1
)) {
323 qemu_iovec_init(iov
, num_prps
);
325 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
328 status
= nvme_map_addr(n
, qsg
, iov
, prp1
, trans_len
);
335 if (unlikely(!prp2
)) {
336 trace_pci_nvme_err_invalid_prp2_missing();
337 return NVME_INVALID_FIELD
| NVME_DNR
;
340 if (len
> n
->page_size
) {
341 uint64_t prp_list
[n
->max_prp_ents
];
342 uint32_t nents
, prp_trans
;
345 if (nvme_addr_is_cmb(n
, prp2
)) {
346 prp_list_in_cmb
= true;
349 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
350 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
351 ret
= nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
353 trace_pci_nvme_err_addr_read(prp2
);
354 return NVME_DATA_TRAS_ERROR
;
357 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
359 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
360 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
361 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
362 return NVME_INVALID_FIELD
| NVME_DNR
;
365 if (prp_list_in_cmb
!= nvme_addr_is_cmb(n
, prp_ent
)) {
366 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
370 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
371 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
372 ret
= nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
375 trace_pci_nvme_err_addr_read(prp_ent
);
376 return NVME_DATA_TRAS_ERROR
;
378 prp_ent
= le64_to_cpu(prp_list
[i
]);
381 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
382 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
383 return NVME_INVALID_FIELD
| NVME_DNR
;
386 trans_len
= MIN(len
, n
->page_size
);
387 status
= nvme_map_addr(n
, qsg
, iov
, prp_ent
, trans_len
);
396 if (unlikely(prp2
& (n
->page_size
- 1))) {
397 trace_pci_nvme_err_invalid_prp2_align(prp2
);
398 return NVME_INVALID_FIELD
| NVME_DNR
;
400 status
= nvme_map_addr(n
, qsg
, iov
, prp2
, len
);
410 static uint16_t nvme_dma_prp(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
411 uint64_t prp1
, uint64_t prp2
, DMADirection dir
,
414 uint16_t status
= NVME_SUCCESS
;
416 status
= nvme_map_prp(n
, prp1
, prp2
, len
, req
);
421 /* assert that only one of qsg and iov carries data */
422 assert((req
->qsg
.nsg
> 0) != (req
->iov
.niov
> 0));
424 if (req
->qsg
.nsg
> 0) {
427 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
428 residual
= dma_buf_write(ptr
, len
, &req
->qsg
);
430 residual
= dma_buf_read(ptr
, len
, &req
->qsg
);
433 if (unlikely(residual
)) {
434 trace_pci_nvme_err_invalid_dma();
435 status
= NVME_INVALID_FIELD
| NVME_DNR
;
440 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
441 bytes
= qemu_iovec_to_buf(&req
->iov
, 0, ptr
, len
);
443 bytes
= qemu_iovec_from_buf(&req
->iov
, 0, ptr
, len
);
446 if (unlikely(bytes
!= len
)) {
447 trace_pci_nvme_err_invalid_dma();
448 status
= NVME_INVALID_FIELD
| NVME_DNR
;
455 static uint16_t nvme_map_dptr(NvmeCtrl
*n
, size_t len
, NvmeRequest
*req
)
457 NvmeCmd
*cmd
= &req
->cmd
;
458 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
459 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
461 return nvme_map_prp(n
, prp1
, prp2
, len
, req
);
464 static void nvme_post_cqes(void *opaque
)
466 NvmeCQueue
*cq
= opaque
;
467 NvmeCtrl
*n
= cq
->ctrl
;
468 NvmeRequest
*req
, *next
;
471 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
475 if (nvme_cq_full(cq
)) {
480 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
481 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
482 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
483 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
484 ret
= pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
487 trace_pci_nvme_err_addr_write(addr
);
488 trace_pci_nvme_err_cfs();
489 n
->bar
.csts
= NVME_CSTS_FAILED
;
492 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
493 nvme_inc_cq_tail(cq
);
495 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
497 if (cq
->tail
!= cq
->head
) {
498 nvme_irq_assert(n
, cq
);
502 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
504 assert(cq
->cqid
== req
->sq
->cqid
);
505 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
507 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
508 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
509 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
512 static void nvme_process_aers(void *opaque
)
514 NvmeCtrl
*n
= opaque
;
515 NvmeAsyncEvent
*event
, *next
;
517 trace_pci_nvme_process_aers(n
->aer_queued
);
519 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
521 NvmeAerResult
*result
;
523 /* can't post cqe if there is nothing to complete */
524 if (!n
->outstanding_aers
) {
525 trace_pci_nvme_no_outstanding_aers();
529 /* ignore if masked (cqe posted, but event not cleared) */
530 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
531 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
535 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
538 n
->aer_mask
|= 1 << event
->result
.event_type
;
539 n
->outstanding_aers
--;
541 req
= n
->aer_reqs
[n
->outstanding_aers
];
543 result
= (NvmeAerResult
*) &req
->cqe
.result
;
544 result
->event_type
= event
->result
.event_type
;
545 result
->event_info
= event
->result
.event_info
;
546 result
->log_page
= event
->result
.log_page
;
549 req
->status
= NVME_SUCCESS
;
551 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
554 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
558 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
559 uint8_t event_info
, uint8_t log_page
)
561 NvmeAsyncEvent
*event
;
563 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
565 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
566 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
570 event
= g_new(NvmeAsyncEvent
, 1);
571 event
->result
= (NvmeAerResult
) {
572 .event_type
= event_type
,
573 .event_info
= event_info
,
574 .log_page
= log_page
,
577 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
580 nvme_process_aers(n
);
583 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
585 n
->aer_mask
&= ~(1 << event_type
);
586 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
587 nvme_process_aers(n
);
591 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
593 uint8_t mdts
= n
->params
.mdts
;
595 if (mdts
&& len
> n
->page_size
<< mdts
) {
596 return NVME_INVALID_FIELD
| NVME_DNR
;
602 static inline uint16_t nvme_check_bounds(NvmeCtrl
*n
, NvmeNamespace
*ns
,
603 uint64_t slba
, uint32_t nlb
)
605 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
607 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
608 return NVME_LBA_RANGE
| NVME_DNR
;
614 static void nvme_rw_cb(void *opaque
, int ret
)
616 NvmeRequest
*req
= opaque
;
617 NvmeSQueue
*sq
= req
->sq
;
618 NvmeCtrl
*n
= sq
->ctrl
;
619 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
621 trace_pci_nvme_rw_cb(nvme_cid(req
));
624 block_acct_done(blk_get_stats(n
->conf
.blk
), &req
->acct
);
625 req
->status
= NVME_SUCCESS
;
627 block_acct_failed(blk_get_stats(n
->conf
.blk
), &req
->acct
);
628 req
->status
= NVME_INTERNAL_DEV_ERROR
;
631 nvme_enqueue_req_completion(cq
, req
);
634 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
636 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, 0,
638 req
->aiocb
= blk_aio_flush(n
->conf
.blk
, nvme_rw_cb
, req
);
640 return NVME_NO_COMPLETE
;
643 static uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
645 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
646 NvmeNamespace
*ns
= req
->ns
;
647 const uint8_t lba_index
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
648 const uint8_t data_shift
= ns
->id_ns
.lbaf
[lba_index
].ds
;
649 uint64_t slba
= le64_to_cpu(rw
->slba
);
650 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
651 uint64_t offset
= slba
<< data_shift
;
652 uint32_t count
= nlb
<< data_shift
;
655 trace_pci_nvme_write_zeroes(nvme_cid(req
), slba
, nlb
);
657 status
= nvme_check_bounds(n
, ns
, slba
, nlb
);
659 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
663 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, 0,
665 req
->aiocb
= blk_aio_pwrite_zeroes(n
->conf
.blk
, offset
, count
,
666 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
, req
);
667 return NVME_NO_COMPLETE
;
670 static uint16_t nvme_rw(NvmeCtrl
*n
, NvmeRequest
*req
)
672 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
673 NvmeNamespace
*ns
= req
->ns
;
674 uint32_t nlb
= le32_to_cpu(rw
->nlb
) + 1;
675 uint64_t slba
= le64_to_cpu(rw
->slba
);
677 uint8_t lba_index
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
678 uint8_t data_shift
= ns
->id_ns
.lbaf
[lba_index
].ds
;
679 uint64_t data_size
= (uint64_t)nlb
<< data_shift
;
680 uint64_t data_offset
= slba
<< data_shift
;
681 int is_write
= rw
->opcode
== NVME_CMD_WRITE
? 1 : 0;
682 enum BlockAcctType acct
= is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
;
685 trace_pci_nvme_rw(is_write
? "write" : "read", nlb
, data_size
, slba
);
687 status
= nvme_check_mdts(n
, data_size
);
689 trace_pci_nvme_err_mdts(nvme_cid(req
), data_size
);
690 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
694 status
= nvme_check_bounds(n
, ns
, slba
, nlb
);
696 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
697 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
701 if (nvme_map_dptr(n
, data_size
, req
)) {
702 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
703 return NVME_INVALID_FIELD
| NVME_DNR
;
706 if (req
->qsg
.nsg
> 0) {
707 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, req
->qsg
.size
,
709 req
->aiocb
= is_write
?
710 dma_blk_write(n
->conf
.blk
, &req
->qsg
, data_offset
, BDRV_SECTOR_SIZE
,
712 dma_blk_read(n
->conf
.blk
, &req
->qsg
, data_offset
, BDRV_SECTOR_SIZE
,
715 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, req
->iov
.size
,
717 req
->aiocb
= is_write
?
718 blk_aio_pwritev(n
->conf
.blk
, data_offset
, &req
->iov
, 0, nvme_rw_cb
,
720 blk_aio_preadv(n
->conf
.blk
, data_offset
, &req
->iov
, 0, nvme_rw_cb
,
724 return NVME_NO_COMPLETE
;
727 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
729 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
731 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
734 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
735 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
736 return NVME_INVALID_NSID
| NVME_DNR
;
739 req
->ns
= &n
->namespaces
[nsid
- 1];
740 switch (req
->cmd
.opcode
) {
742 return nvme_flush(n
, req
);
743 case NVME_CMD_WRITE_ZEROES
:
744 return nvme_write_zeroes(n
, req
);
747 return nvme_rw(n
, req
);
749 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
750 return NVME_INVALID_OPCODE
| NVME_DNR
;
754 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
756 n
->sq
[sq
->sqid
] = NULL
;
757 timer_del(sq
->timer
);
758 timer_free(sq
->timer
);
765 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
767 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
768 NvmeRequest
*r
, *next
;
771 uint16_t qid
= le16_to_cpu(c
->qid
);
773 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
774 trace_pci_nvme_err_invalid_del_sq(qid
);
775 return NVME_INVALID_QID
| NVME_DNR
;
778 trace_pci_nvme_del_sq(qid
);
781 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
782 r
= QTAILQ_FIRST(&sq
->out_req_list
);
784 blk_aio_cancel(r
->aiocb
);
786 if (!nvme_check_cqid(n
, sq
->cqid
)) {
787 cq
= n
->cq
[sq
->cqid
];
788 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
791 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
793 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
794 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
803 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
804 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
810 sq
->dma_addr
= dma_addr
;
814 sq
->head
= sq
->tail
= 0;
815 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
817 QTAILQ_INIT(&sq
->req_list
);
818 QTAILQ_INIT(&sq
->out_req_list
);
819 for (i
= 0; i
< sq
->size
; i
++) {
820 sq
->io_req
[i
].sq
= sq
;
821 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
823 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
827 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
831 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
834 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
836 uint16_t cqid
= le16_to_cpu(c
->cqid
);
837 uint16_t sqid
= le16_to_cpu(c
->sqid
);
838 uint16_t qsize
= le16_to_cpu(c
->qsize
);
839 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
840 uint64_t prp1
= le64_to_cpu(c
->prp1
);
842 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
844 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
845 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
846 return NVME_INVALID_CQID
| NVME_DNR
;
848 if (unlikely(!sqid
|| !nvme_check_sqid(n
, sqid
))) {
849 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
850 return NVME_INVALID_QID
| NVME_DNR
;
852 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
853 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
854 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
856 if (unlikely(!prp1
|| prp1
& (n
->page_size
- 1))) {
857 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
858 return NVME_INVALID_FIELD
| NVME_DNR
;
860 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
861 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
862 return NVME_INVALID_FIELD
| NVME_DNR
;
864 sq
= g_malloc0(sizeof(*sq
));
865 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
869 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
870 uint64_t off
, NvmeRequest
*req
)
872 NvmeCmd
*cmd
= &req
->cmd
;
873 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
874 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
875 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
879 uint64_t units_read
= 0, units_written
= 0;
880 uint64_t read_commands
= 0, write_commands
= 0;
884 if (nsid
&& nsid
!= 0xffffffff) {
885 return NVME_INVALID_FIELD
| NVME_DNR
;
888 s
= blk_get_stats(n
->conf
.blk
);
890 units_read
= s
->nr_bytes
[BLOCK_ACCT_READ
] >> BDRV_SECTOR_BITS
;
891 units_written
= s
->nr_bytes
[BLOCK_ACCT_WRITE
] >> BDRV_SECTOR_BITS
;
892 read_commands
= s
->nr_ops
[BLOCK_ACCT_READ
];
893 write_commands
= s
->nr_ops
[BLOCK_ACCT_WRITE
];
895 if (off
> sizeof(smart
)) {
896 return NVME_INVALID_FIELD
| NVME_DNR
;
899 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
901 memset(&smart
, 0x0, sizeof(smart
));
903 smart
.data_units_read
[0] = cpu_to_le64(DIV_ROUND_UP(units_read
, 1000));
904 smart
.data_units_written
[0] = cpu_to_le64(DIV_ROUND_UP(units_written
,
906 smart
.host_read_commands
[0] = cpu_to_le64(read_commands
);
907 smart
.host_write_commands
[0] = cpu_to_le64(write_commands
);
909 smart
.temperature
= cpu_to_le16(n
->temperature
);
911 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
912 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
913 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
916 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
917 smart
.power_on_hours
[0] =
918 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
921 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
924 return nvme_dma_prp(n
, (uint8_t *) &smart
+ off
, trans_len
, prp1
, prp2
,
925 DMA_DIRECTION_FROM_DEVICE
, req
);
928 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
932 NvmeCmd
*cmd
= &req
->cmd
;
933 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
934 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
935 NvmeFwSlotInfoLog fw_log
= {
939 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
941 if (off
> sizeof(fw_log
)) {
942 return NVME_INVALID_FIELD
| NVME_DNR
;
945 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
947 return nvme_dma_prp(n
, (uint8_t *) &fw_log
+ off
, trans_len
, prp1
, prp2
,
948 DMA_DIRECTION_FROM_DEVICE
, req
);
951 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
952 uint64_t off
, NvmeRequest
*req
)
955 NvmeCmd
*cmd
= &req
->cmd
;
956 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
957 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
961 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
964 if (off
> sizeof(errlog
)) {
965 return NVME_INVALID_FIELD
| NVME_DNR
;
968 memset(&errlog
, 0x0, sizeof(errlog
));
970 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
972 return nvme_dma_prp(n
, (uint8_t *)&errlog
, trans_len
, prp1
, prp2
,
973 DMA_DIRECTION_FROM_DEVICE
, req
);
976 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
978 NvmeCmd
*cmd
= &req
->cmd
;
980 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
981 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
982 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
983 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
984 uint8_t lid
= dw10
& 0xff;
985 uint8_t lsp
= (dw10
>> 8) & 0xf;
986 uint8_t rae
= (dw10
>> 15) & 0x1;
987 uint32_t numdl
, numdu
;
988 uint64_t off
, lpol
, lpou
;
992 numdl
= (dw10
>> 16);
993 numdu
= (dw11
& 0xffff);
997 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
998 off
= (lpou
<< 32ULL) | lpol
;
1001 return NVME_INVALID_FIELD
| NVME_DNR
;
1004 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
1006 status
= nvme_check_mdts(n
, len
);
1008 trace_pci_nvme_err_mdts(nvme_cid(req
), len
);
1013 case NVME_LOG_ERROR_INFO
:
1014 return nvme_error_info(n
, rae
, len
, off
, req
);
1015 case NVME_LOG_SMART_INFO
:
1016 return nvme_smart_info(n
, rae
, len
, off
, req
);
1017 case NVME_LOG_FW_SLOT_INFO
:
1018 return nvme_fw_log_info(n
, len
, off
, req
);
1020 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
1021 return NVME_INVALID_FIELD
| NVME_DNR
;
1025 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
1027 n
->cq
[cq
->cqid
] = NULL
;
1028 timer_del(cq
->timer
);
1029 timer_free(cq
->timer
);
1030 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
1036 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1038 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
1040 uint16_t qid
= le16_to_cpu(c
->qid
);
1042 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
1043 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
1044 return NVME_INVALID_CQID
| NVME_DNR
;
1048 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
1049 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
1050 return NVME_INVALID_QUEUE_DEL
;
1052 nvme_irq_deassert(n
, cq
);
1053 trace_pci_nvme_del_cq(qid
);
1054 nvme_free_cq(cq
, n
);
1055 return NVME_SUCCESS
;
1058 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
1059 uint16_t cqid
, uint16_t vector
, uint16_t size
, uint16_t irq_enabled
)
1063 ret
= msix_vector_use(&n
->parent_obj
, vector
);
1068 cq
->dma_addr
= dma_addr
;
1070 cq
->irq_enabled
= irq_enabled
;
1071 cq
->vector
= vector
;
1072 cq
->head
= cq
->tail
= 0;
1073 QTAILQ_INIT(&cq
->req_list
);
1074 QTAILQ_INIT(&cq
->sq_list
);
1076 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
1079 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1082 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
1083 uint16_t cqid
= le16_to_cpu(c
->cqid
);
1084 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
1085 uint16_t qsize
= le16_to_cpu(c
->qsize
);
1086 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
1087 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1089 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
1090 NVME_CQ_FLAGS_IEN(qflags
) != 0);
1092 if (unlikely(!cqid
|| !nvme_check_cqid(n
, cqid
))) {
1093 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
1094 return NVME_INVALID_CQID
| NVME_DNR
;
1096 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
1097 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
1098 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
1100 if (unlikely(!prp1
)) {
1101 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
1102 return NVME_INVALID_FIELD
| NVME_DNR
;
1104 if (unlikely(!msix_enabled(&n
->parent_obj
) && vector
)) {
1105 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1106 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1108 if (unlikely(vector
>= n
->params
.msix_qsize
)) {
1109 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1110 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1112 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
1113 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
1114 return NVME_INVALID_FIELD
| NVME_DNR
;
1117 cq
= g_malloc0(sizeof(*cq
));
1118 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
1119 NVME_CQ_FLAGS_IEN(qflags
));
1122 * It is only required to set qs_created when creating a completion queue;
1123 * creating a submission queue without a matching completion queue will
1126 n
->qs_created
= true;
1127 return NVME_SUCCESS
;
1130 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
1132 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1133 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1134 uint64_t prp2
= le64_to_cpu(c
->prp2
);
1136 trace_pci_nvme_identify_ctrl();
1138 return nvme_dma_prp(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
), prp1
,
1139 prp2
, DMA_DIRECTION_FROM_DEVICE
, req
);
1142 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
)
1145 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1146 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1147 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1148 uint64_t prp2
= le64_to_cpu(c
->prp2
);
1150 trace_pci_nvme_identify_ns(nsid
);
1152 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
1153 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
1154 return NVME_INVALID_NSID
| NVME_DNR
;
1157 ns
= &n
->namespaces
[nsid
- 1];
1159 return nvme_dma_prp(n
, (uint8_t *)&ns
->id_ns
, sizeof(ns
->id_ns
), prp1
,
1160 prp2
, DMA_DIRECTION_FROM_DEVICE
, req
);
1163 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
)
1165 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1166 static const int data_len
= NVME_IDENTIFY_DATA_SIZE
;
1167 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
1168 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1169 uint64_t prp2
= le64_to_cpu(c
->prp2
);
1174 trace_pci_nvme_identify_nslist(min_nsid
);
1177 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
1178 * since the Active Namespace ID List should return namespaces with ids
1179 * *higher* than the NSID specified in the command. This is also specified
1180 * in the spec (NVM Express v1.3d, Section 5.15.4).
1182 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
1183 return NVME_INVALID_NSID
| NVME_DNR
;
1186 list
= g_malloc0(data_len
);
1187 for (i
= 0; i
< n
->num_namespaces
; i
++) {
1191 list
[j
++] = cpu_to_le32(i
+ 1);
1192 if (j
== data_len
/ sizeof(uint32_t)) {
1196 ret
= nvme_dma_prp(n
, (uint8_t *)list
, data_len
, prp1
, prp2
,
1197 DMA_DIRECTION_FROM_DEVICE
, req
);
1202 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
1204 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1205 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1206 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1207 uint64_t prp2
= le64_to_cpu(c
->prp2
);
1209 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
];
1218 struct data
*ns_descrs
= (struct data
*)list
;
1220 trace_pci_nvme_identify_ns_descr_list(nsid
);
1222 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
1223 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
1224 return NVME_INVALID_NSID
| NVME_DNR
;
1227 memset(list
, 0x0, sizeof(list
));
1230 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
1231 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
1232 * Namespace Identification Descriptor. Add a very basic Namespace UUID
1235 ns_descrs
->uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
1236 ns_descrs
->uuid
.hdr
.nidl
= NVME_NIDT_UUID_LEN
;
1237 stl_be_p(&ns_descrs
->uuid
.v
, nsid
);
1239 return nvme_dma_prp(n
, list
, NVME_IDENTIFY_DATA_SIZE
, prp1
, prp2
,
1240 DMA_DIRECTION_FROM_DEVICE
, req
);
1243 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
1245 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1247 switch (le32_to_cpu(c
->cns
)) {
1248 case NVME_ID_CNS_NS
:
1249 return nvme_identify_ns(n
, req
);
1250 case NVME_ID_CNS_CTRL
:
1251 return nvme_identify_ctrl(n
, req
);
1252 case NVME_ID_CNS_NS_ACTIVE_LIST
:
1253 return nvme_identify_nslist(n
, req
);
1254 case NVME_ID_CNS_NS_DESCR_LIST
:
1255 return nvme_identify_ns_descr_list(n
, req
);
1257 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
1258 return NVME_INVALID_FIELD
| NVME_DNR
;
1262 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
1264 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
1266 req
->cqe
.result
= 1;
1267 if (nvme_check_sqid(n
, sqid
)) {
1268 return NVME_INVALID_FIELD
| NVME_DNR
;
1271 return NVME_SUCCESS
;
1274 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
1276 trace_pci_nvme_setfeat_timestamp(ts
);
1278 n
->host_timestamp
= le64_to_cpu(ts
);
1279 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1282 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
1284 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1285 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
1287 union nvme_timestamp
{
1289 uint64_t timestamp
:48;
1297 union nvme_timestamp ts
;
1299 ts
.timestamp
= n
->host_timestamp
+ elapsed_time
;
1301 /* If the host timestamp is non-zero, set the timestamp origin */
1302 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
1304 trace_pci_nvme_getfeat_timestamp(ts
.all
);
1306 return cpu_to_le64(ts
.all
);
1309 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
1311 NvmeCmd
*cmd
= &req
->cmd
;
1312 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
1313 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
1315 uint64_t timestamp
= nvme_get_timestamp(n
);
1317 return nvme_dma_prp(n
, (uint8_t *)×tamp
, sizeof(timestamp
), prp1
,
1318 prp2
, DMA_DIRECTION_FROM_DEVICE
, req
);
1321 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
1323 NvmeCmd
*cmd
= &req
->cmd
;
1324 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1325 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1326 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
1328 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
1329 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
1332 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
1333 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
1336 trace_pci_nvme_getfeat(nvme_cid(req
), fid
, sel
, dw11
);
1338 if (!nvme_feature_support
[fid
]) {
1339 return NVME_INVALID_FIELD
| NVME_DNR
;
1342 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
1343 if (!nsid
|| nsid
> n
->num_namespaces
) {
1345 * The Reservation Notification Mask and Reservation Persistence
1346 * features require a status code of Invalid Field in Command when
1347 * NSID is 0xFFFFFFFF. Since the device does not support those
1348 * features we can always return Invalid Namespace or Format as we
1349 * should do for all other features.
1351 return NVME_INVALID_NSID
| NVME_DNR
;
1356 case NVME_GETFEAT_SELECT_CURRENT
:
1358 case NVME_GETFEAT_SELECT_SAVED
:
1359 /* no features are saveable by the controller; fallthrough */
1360 case NVME_GETFEAT_SELECT_DEFAULT
:
1362 case NVME_GETFEAT_SELECT_CAP
:
1363 result
= nvme_feature_cap
[fid
];
1368 case NVME_TEMPERATURE_THRESHOLD
:
1372 * The controller only implements the Composite Temperature sensor, so
1373 * return 0 for all other sensors.
1375 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1379 switch (NVME_TEMP_THSEL(dw11
)) {
1380 case NVME_TEMP_THSEL_OVER
:
1381 result
= n
->features
.temp_thresh_hi
;
1383 case NVME_TEMP_THSEL_UNDER
:
1384 result
= n
->features
.temp_thresh_low
;
1388 return NVME_INVALID_FIELD
| NVME_DNR
;
1389 case NVME_VOLATILE_WRITE_CACHE
:
1390 result
= blk_enable_write_cache(n
->conf
.blk
);
1391 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
1393 case NVME_ASYNCHRONOUS_EVENT_CONF
:
1394 result
= n
->features
.async_config
;
1396 case NVME_TIMESTAMP
:
1397 return nvme_get_feature_timestamp(n
, req
);
1404 case NVME_TEMPERATURE_THRESHOLD
:
1407 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1411 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
1412 result
= NVME_TEMPERATURE_WARNING
;
1416 case NVME_NUMBER_OF_QUEUES
:
1417 result
= (n
->params
.max_ioqpairs
- 1) |
1418 ((n
->params
.max_ioqpairs
- 1) << 16);
1419 trace_pci_nvme_getfeat_numq(result
);
1421 case NVME_INTERRUPT_VECTOR_CONF
:
1423 if (iv
>= n
->params
.max_ioqpairs
+ 1) {
1424 return NVME_INVALID_FIELD
| NVME_DNR
;
1428 if (iv
== n
->admin_cq
.vector
) {
1429 result
|= NVME_INTVC_NOCOALESCING
;
1434 result
= nvme_feature_default
[fid
];
1439 req
->cqe
.result
= cpu_to_le32(result
);
1440 return NVME_SUCCESS
;
1443 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
1447 NvmeCmd
*cmd
= &req
->cmd
;
1448 uint64_t prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
1449 uint64_t prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
1451 ret
= nvme_dma_prp(n
, (uint8_t *)×tamp
, sizeof(timestamp
), prp1
,
1452 prp2
, DMA_DIRECTION_TO_DEVICE
, req
);
1453 if (ret
!= NVME_SUCCESS
) {
1457 nvme_set_timestamp(n
, timestamp
);
1459 return NVME_SUCCESS
;
1462 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
1464 NvmeCmd
*cmd
= &req
->cmd
;
1465 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1466 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1467 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
1468 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
1469 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
1471 trace_pci_nvme_setfeat(nvme_cid(req
), fid
, save
, dw11
);
1474 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
1477 if (!nvme_feature_support
[fid
]) {
1478 return NVME_INVALID_FIELD
| NVME_DNR
;
1481 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
1482 if (!nsid
|| (nsid
!= NVME_NSID_BROADCAST
&&
1483 nsid
> n
->num_namespaces
)) {
1484 return NVME_INVALID_NSID
| NVME_DNR
;
1486 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
1487 if (nsid
> n
->num_namespaces
) {
1488 return NVME_INVALID_NSID
| NVME_DNR
;
1491 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
1494 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
1495 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
1499 case NVME_TEMPERATURE_THRESHOLD
:
1500 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1504 switch (NVME_TEMP_THSEL(dw11
)) {
1505 case NVME_TEMP_THSEL_OVER
:
1506 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
1508 case NVME_TEMP_THSEL_UNDER
:
1509 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
1512 return NVME_INVALID_FIELD
| NVME_DNR
;
1515 if (((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
1516 (n
->temperature
<= n
->features
.temp_thresh_low
)) &&
1517 NVME_AEC_SMART(n
->features
.async_config
) & NVME_SMART_TEMPERATURE
) {
1518 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
,
1519 NVME_AER_INFO_SMART_TEMP_THRESH
,
1520 NVME_LOG_SMART_INFO
);
1524 case NVME_VOLATILE_WRITE_CACHE
:
1525 if (!(dw11
& 0x1) && blk_enable_write_cache(n
->conf
.blk
)) {
1526 blk_flush(n
->conf
.blk
);
1529 blk_set_enable_write_cache(n
->conf
.blk
, dw11
& 1);
1531 case NVME_NUMBER_OF_QUEUES
:
1532 if (n
->qs_created
) {
1533 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
1537 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
1540 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
1541 return NVME_INVALID_FIELD
| NVME_DNR
;
1544 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
1545 ((dw11
>> 16) & 0xFFFF) + 1,
1546 n
->params
.max_ioqpairs
,
1547 n
->params
.max_ioqpairs
);
1548 req
->cqe
.result
= cpu_to_le32((n
->params
.max_ioqpairs
- 1) |
1549 ((n
->params
.max_ioqpairs
- 1) << 16));
1551 case NVME_ASYNCHRONOUS_EVENT_CONF
:
1552 n
->features
.async_config
= dw11
;
1554 case NVME_TIMESTAMP
:
1555 return nvme_set_feature_timestamp(n
, req
);
1557 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
1559 return NVME_SUCCESS
;
1562 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
1564 trace_pci_nvme_aer(nvme_cid(req
));
1566 if (n
->outstanding_aers
> n
->params
.aerl
) {
1567 trace_pci_nvme_aer_aerl_exceeded();
1568 return NVME_AER_LIMIT_EXCEEDED
;
1571 n
->aer_reqs
[n
->outstanding_aers
] = req
;
1572 n
->outstanding_aers
++;
1574 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1575 nvme_process_aers(n
);
1578 return NVME_NO_COMPLETE
;
1581 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
1583 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
);
1585 switch (req
->cmd
.opcode
) {
1586 case NVME_ADM_CMD_DELETE_SQ
:
1587 return nvme_del_sq(n
, req
);
1588 case NVME_ADM_CMD_CREATE_SQ
:
1589 return nvme_create_sq(n
, req
);
1590 case NVME_ADM_CMD_GET_LOG_PAGE
:
1591 return nvme_get_log(n
, req
);
1592 case NVME_ADM_CMD_DELETE_CQ
:
1593 return nvme_del_cq(n
, req
);
1594 case NVME_ADM_CMD_CREATE_CQ
:
1595 return nvme_create_cq(n
, req
);
1596 case NVME_ADM_CMD_IDENTIFY
:
1597 return nvme_identify(n
, req
);
1598 case NVME_ADM_CMD_ABORT
:
1599 return nvme_abort(n
, req
);
1600 case NVME_ADM_CMD_SET_FEATURES
:
1601 return nvme_set_feature(n
, req
);
1602 case NVME_ADM_CMD_GET_FEATURES
:
1603 return nvme_get_feature(n
, req
);
1604 case NVME_ADM_CMD_ASYNC_EV_REQ
:
1605 return nvme_aer(n
, req
);
1607 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
1608 return NVME_INVALID_OPCODE
| NVME_DNR
;
1612 static void nvme_process_sq(void *opaque
)
1614 NvmeSQueue
*sq
= opaque
;
1615 NvmeCtrl
*n
= sq
->ctrl
;
1616 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
1623 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
1624 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
1625 if (nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
))) {
1626 trace_pci_nvme_err_addr_read(addr
);
1627 trace_pci_nvme_err_cfs();
1628 n
->bar
.csts
= NVME_CSTS_FAILED
;
1631 nvme_inc_sq_head(sq
);
1633 req
= QTAILQ_FIRST(&sq
->req_list
);
1634 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
1635 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
1636 nvme_req_clear(req
);
1637 req
->cqe
.cid
= cmd
.cid
;
1638 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
1640 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
1641 nvme_admin_cmd(n
, req
);
1642 if (status
!= NVME_NO_COMPLETE
) {
1643 req
->status
= status
;
1644 nvme_enqueue_req_completion(cq
, req
);
1649 static void nvme_clear_ctrl(NvmeCtrl
*n
)
1653 blk_drain(n
->conf
.blk
);
1655 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
1656 if (n
->sq
[i
] != NULL
) {
1657 nvme_free_sq(n
->sq
[i
], n
);
1660 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
1661 if (n
->cq
[i
] != NULL
) {
1662 nvme_free_cq(n
->cq
[i
], n
);
1666 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1667 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
1668 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
1673 n
->outstanding_aers
= 0;
1674 n
->qs_created
= false;
1676 blk_flush(n
->conf
.blk
);
1680 static int nvme_start_ctrl(NvmeCtrl
*n
)
1682 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
1683 uint32_t page_size
= 1 << page_bits
;
1685 if (unlikely(n
->cq
[0])) {
1686 trace_pci_nvme_err_startfail_cq();
1689 if (unlikely(n
->sq
[0])) {
1690 trace_pci_nvme_err_startfail_sq();
1693 if (unlikely(!n
->bar
.asq
)) {
1694 trace_pci_nvme_err_startfail_nbarasq();
1697 if (unlikely(!n
->bar
.acq
)) {
1698 trace_pci_nvme_err_startfail_nbaracq();
1701 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
1702 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
1705 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
1706 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
1709 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
1710 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
1711 trace_pci_nvme_err_startfail_page_too_small(
1712 NVME_CC_MPS(n
->bar
.cc
),
1713 NVME_CAP_MPSMIN(n
->bar
.cap
));
1716 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
1717 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
1718 trace_pci_nvme_err_startfail_page_too_large(
1719 NVME_CC_MPS(n
->bar
.cc
),
1720 NVME_CAP_MPSMAX(n
->bar
.cap
));
1723 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
1724 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
1725 trace_pci_nvme_err_startfail_cqent_too_small(
1726 NVME_CC_IOCQES(n
->bar
.cc
),
1727 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
1730 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
1731 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
1732 trace_pci_nvme_err_startfail_cqent_too_large(
1733 NVME_CC_IOCQES(n
->bar
.cc
),
1734 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
1737 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
1738 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
1739 trace_pci_nvme_err_startfail_sqent_too_small(
1740 NVME_CC_IOSQES(n
->bar
.cc
),
1741 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
1744 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
1745 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
1746 trace_pci_nvme_err_startfail_sqent_too_large(
1747 NVME_CC_IOSQES(n
->bar
.cc
),
1748 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
1751 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
1752 trace_pci_nvme_err_startfail_asqent_sz_zero();
1755 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
1756 trace_pci_nvme_err_startfail_acqent_sz_zero();
1760 n
->page_bits
= page_bits
;
1761 n
->page_size
= page_size
;
1762 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
1763 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
1764 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
1765 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
1766 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
1767 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
1768 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
1770 nvme_set_timestamp(n
, 0ULL);
1772 QTAILQ_INIT(&n
->aer_queue
);
1777 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
1780 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
1781 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
1782 "MMIO write not 32-bit aligned,"
1783 " offset=0x%"PRIx64
"", offset
);
1784 /* should be ignored, fall through for now */
1787 if (unlikely(size
< sizeof(uint32_t))) {
1788 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
1789 "MMIO write smaller than 32-bits,"
1790 " offset=0x%"PRIx64
", size=%u",
1792 /* should be ignored, fall through for now */
1796 case 0xc: /* INTMS */
1797 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
1798 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
1799 "undefined access to interrupt mask set"
1800 " when MSI-X is enabled");
1801 /* should be ignored, fall through for now */
1803 n
->bar
.intms
|= data
& 0xffffffff;
1804 n
->bar
.intmc
= n
->bar
.intms
;
1805 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
1808 case 0x10: /* INTMC */
1809 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
1810 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
1811 "undefined access to interrupt mask clr"
1812 " when MSI-X is enabled");
1813 /* should be ignored, fall through for now */
1815 n
->bar
.intms
&= ~(data
& 0xffffffff);
1816 n
->bar
.intmc
= n
->bar
.intms
;
1817 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
1821 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
1822 /* Windows first sends data, then sends enable bit */
1823 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
1824 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
1829 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
1831 if (unlikely(nvme_start_ctrl(n
))) {
1832 trace_pci_nvme_err_startfail();
1833 n
->bar
.csts
= NVME_CSTS_FAILED
;
1835 trace_pci_nvme_mmio_start_success();
1836 n
->bar
.csts
= NVME_CSTS_READY
;
1838 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
1839 trace_pci_nvme_mmio_stopped();
1841 n
->bar
.csts
&= ~NVME_CSTS_READY
;
1843 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
1844 trace_pci_nvme_mmio_shutdown_set();
1847 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
1848 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
1849 trace_pci_nvme_mmio_shutdown_cleared();
1850 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
1854 case 0x1C: /* CSTS */
1855 if (data
& (1 << 4)) {
1856 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
1857 "attempted to W1C CSTS.NSSRO"
1858 " but CAP.NSSRS is zero (not supported)");
1859 } else if (data
!= 0) {
1860 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
1861 "attempted to set a read only bit"
1862 " of controller status");
1865 case 0x20: /* NSSR */
1866 if (data
== 0x4E564D65) {
1867 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
1869 /* The spec says that writes of other values have no effect */
1873 case 0x24: /* AQA */
1874 n
->bar
.aqa
= data
& 0xffffffff;
1875 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
1877 case 0x28: /* ASQ */
1879 trace_pci_nvme_mmio_asqaddr(data
);
1881 case 0x2c: /* ASQ hi */
1882 n
->bar
.asq
|= data
<< 32;
1883 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
1885 case 0x30: /* ACQ */
1886 trace_pci_nvme_mmio_acqaddr(data
);
1889 case 0x34: /* ACQ hi */
1890 n
->bar
.acq
|= data
<< 32;
1891 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
1893 case 0x38: /* CMBLOC */
1894 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
1895 "invalid write to reserved CMBLOC"
1896 " when CMBSZ is zero, ignored");
1898 case 0x3C: /* CMBSZ */
1899 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
1900 "invalid write to read only CMBSZ, ignored");
1902 case 0xE00: /* PMRCAP */
1903 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
1904 "invalid write to PMRCAP register, ignored");
1906 case 0xE04: /* TODO PMRCTL */
1908 case 0xE08: /* PMRSTS */
1909 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
1910 "invalid write to PMRSTS register, ignored");
1912 case 0xE0C: /* PMREBS */
1913 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
1914 "invalid write to PMREBS register, ignored");
1916 case 0xE10: /* PMRSWTP */
1917 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
1918 "invalid write to PMRSWTP register, ignored");
1920 case 0xE14: /* TODO PMRMSC */
1923 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
1924 "invalid MMIO write,"
1925 " offset=0x%"PRIx64
", data=%"PRIx64
"",
1931 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
1933 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1934 uint8_t *ptr
= (uint8_t *)&n
->bar
;
1937 trace_pci_nvme_mmio_read(addr
);
1939 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
1940 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
1941 "MMIO read not 32-bit aligned,"
1942 " offset=0x%"PRIx64
"", addr
);
1943 /* should RAZ, fall through for now */
1944 } else if (unlikely(size
< sizeof(uint32_t))) {
1945 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
1946 "MMIO read smaller than 32-bits,"
1947 " offset=0x%"PRIx64
"", addr
);
1948 /* should RAZ, fall through for now */
1951 if (addr
< sizeof(n
->bar
)) {
1953 * When PMRWBM bit 1 is set then read from
1954 * from PMRSTS should ensure prior writes
1955 * made it to persistent media
1957 if (addr
== 0xE08 &&
1958 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
1959 memory_region_msync(&n
->pmrdev
->mr
, 0, n
->pmrdev
->size
);
1961 memcpy(&val
, ptr
+ addr
, size
);
1963 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
1964 "MMIO read beyond last register,"
1965 " offset=0x%"PRIx64
", returning 0", addr
);
1971 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
1975 if (unlikely(addr
& ((1 << 2) - 1))) {
1976 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
1977 "doorbell write not 32-bit aligned,"
1978 " offset=0x%"PRIx64
", ignoring", addr
);
1982 if (((addr
- 0x1000) >> 2) & 1) {
1983 /* Completion queue doorbell write */
1985 uint16_t new_head
= val
& 0xffff;
1989 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
1990 if (unlikely(nvme_check_cqid(n
, qid
))) {
1991 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
1992 "completion queue doorbell write"
1993 " for nonexistent queue,"
1994 " sqid=%"PRIu32
", ignoring", qid
);
1997 * NVM Express v1.3d, Section 4.1 state: "If host software writes
1998 * an invalid value to the Submission Queue Tail Doorbell or
1999 * Completion Queue Head Doorbell regiter and an Asynchronous Event
2000 * Request command is outstanding, then an asynchronous event is
2001 * posted to the Admin Completion Queue with a status code of
2002 * Invalid Doorbell Write Value."
2004 * Also note that the spec includes the "Invalid Doorbell Register"
2005 * status code, but nowhere does it specify when to use it.
2006 * However, it seems reasonable to use it here in a similar
2009 if (n
->outstanding_aers
) {
2010 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2011 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2012 NVME_LOG_ERROR_INFO
);
2019 if (unlikely(new_head
>= cq
->size
)) {
2020 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
2021 "completion queue doorbell write value"
2022 " beyond queue size, sqid=%"PRIu32
","
2023 " new_head=%"PRIu16
", ignoring",
2026 if (n
->outstanding_aers
) {
2027 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2028 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2029 NVME_LOG_ERROR_INFO
);
2035 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
2037 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
2038 cq
->head
= new_head
;
2041 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
2042 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2044 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2047 if (cq
->tail
== cq
->head
) {
2048 nvme_irq_deassert(n
, cq
);
2051 /* Submission queue doorbell write */
2053 uint16_t new_tail
= val
& 0xffff;
2056 qid
= (addr
- 0x1000) >> 3;
2057 if (unlikely(nvme_check_sqid(n
, qid
))) {
2058 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
2059 "submission queue doorbell write"
2060 " for nonexistent queue,"
2061 " sqid=%"PRIu32
", ignoring", qid
);
2063 if (n
->outstanding_aers
) {
2064 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2065 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2066 NVME_LOG_ERROR_INFO
);
2073 if (unlikely(new_tail
>= sq
->size
)) {
2074 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
2075 "submission queue doorbell write value"
2076 " beyond queue size, sqid=%"PRIu32
","
2077 " new_tail=%"PRIu16
", ignoring",
2080 if (n
->outstanding_aers
) {
2081 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2082 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2083 NVME_LOG_ERROR_INFO
);
2089 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
2091 sq
->tail
= new_tail
;
2092 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2096 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
2099 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2101 trace_pci_nvme_mmio_write(addr
, data
);
2103 if (addr
< sizeof(n
->bar
)) {
2104 nvme_write_bar(n
, addr
, data
, size
);
2106 nvme_process_db(n
, addr
, data
);
2110 static const MemoryRegionOps nvme_mmio_ops
= {
2111 .read
= nvme_mmio_read
,
2112 .write
= nvme_mmio_write
,
2113 .endianness
= DEVICE_LITTLE_ENDIAN
,
2115 .min_access_size
= 2,
2116 .max_access_size
= 8,
2120 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
2123 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2124 stn_le_p(&n
->cmbuf
[addr
], size
, data
);
2127 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
2129 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2130 return ldn_le_p(&n
->cmbuf
[addr
], size
);
2133 static const MemoryRegionOps nvme_cmb_ops
= {
2134 .read
= nvme_cmb_read
,
2135 .write
= nvme_cmb_write
,
2136 .endianness
= DEVICE_LITTLE_ENDIAN
,
2138 .min_access_size
= 1,
2139 .max_access_size
= 8,
2143 static void nvme_check_constraints(NvmeCtrl
*n
, Error
**errp
)
2145 NvmeParams
*params
= &n
->params
;
2147 if (params
->num_queues
) {
2148 warn_report("num_queues is deprecated; please use max_ioqpairs "
2151 params
->max_ioqpairs
= params
->num_queues
- 1;
2154 if (params
->max_ioqpairs
< 1 ||
2155 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
2156 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
2161 if (params
->msix_qsize
< 1 ||
2162 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
2163 error_setg(errp
, "msix_qsize must be between 1 and %d",
2164 PCI_MSIX_FLAGS_QSIZE
+ 1);
2169 error_setg(errp
, "drive property not set");
2173 if (!params
->serial
) {
2174 error_setg(errp
, "serial property not set");
2178 if (!n
->params
.cmb_size_mb
&& n
->pmrdev
) {
2179 if (host_memory_backend_is_mapped(n
->pmrdev
)) {
2180 error_setg(errp
, "can't use already busy memdev: %s",
2181 object_get_canonical_path_component(OBJECT(n
->pmrdev
)));
2185 if (!is_power_of_2(n
->pmrdev
->size
)) {
2186 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
2190 host_memory_backend_set_mapped(n
->pmrdev
, true);
2194 static void nvme_init_state(NvmeCtrl
*n
)
2196 n
->num_namespaces
= 1;
2197 /* add one to max_ioqpairs to account for the admin queue pair */
2198 n
->reg_size
= pow2ceil(sizeof(NvmeBar
) +
2199 2 * (n
->params
.max_ioqpairs
+ 1) * NVME_DB_SIZE
);
2200 n
->namespaces
= g_new0(NvmeNamespace
, n
->num_namespaces
);
2201 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
2202 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
2203 n
->temperature
= NVME_TEMPERATURE
;
2204 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
2205 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
2206 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
2209 static void nvme_init_blk(NvmeCtrl
*n
, Error
**errp
)
2211 if (!blkconf_blocksizes(&n
->conf
, errp
)) {
2214 blkconf_apply_backend_options(&n
->conf
, blk_is_read_only(n
->conf
.blk
),
2218 static void nvme_init_namespace(NvmeCtrl
*n
, NvmeNamespace
*ns
, Error
**errp
)
2221 NvmeIdNs
*id_ns
= &ns
->id_ns
;
2223 bs_size
= blk_getlength(n
->conf
.blk
);
2225 error_setg_errno(errp
, -bs_size
, "could not get backing file size");
2229 n
->ns_size
= bs_size
;
2231 id_ns
->lbaf
[0].ds
= BDRV_SECTOR_BITS
;
2232 id_ns
->nsze
= cpu_to_le64(nvme_ns_nlbas(n
, ns
));
2234 /* no thin provisioning */
2235 id_ns
->ncap
= id_ns
->nsze
;
2236 id_ns
->nuse
= id_ns
->ncap
;
2239 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2241 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, NVME_CMB_BIR
);
2242 NVME_CMBLOC_SET_OFST(n
->bar
.cmbloc
, 0);
2244 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
2245 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
2246 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 1);
2247 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
2248 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
2249 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
2250 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
2252 n
->cmbuf
= g_malloc0(NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2253 memory_region_init_io(&n
->ctrl_mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
2254 "nvme-cmb", NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2255 pci_register_bar(pci_dev
, NVME_CMBLOC_BIR(n
->bar
.cmbloc
),
2256 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2257 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2258 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->ctrl_mem
);
2261 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2263 /* Controller Capabilities register */
2264 NVME_CAP_SET_PMRS(n
->bar
.cap
, 1);
2266 /* PMR Capabities register */
2268 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 0);
2269 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 0);
2270 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, NVME_PMR_BIR
);
2271 NVME_PMRCAP_SET_PMRTU(n
->bar
.pmrcap
, 0);
2272 /* Turn on bit 1 support */
2273 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
2274 NVME_PMRCAP_SET_PMRTO(n
->bar
.pmrcap
, 0);
2275 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 0);
2277 /* PMR Control register */
2279 NVME_PMRCTL_SET_EN(n
->bar
.pmrctl
, 0);
2281 /* PMR Status register */
2283 NVME_PMRSTS_SET_ERR(n
->bar
.pmrsts
, 0);
2284 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 0);
2285 NVME_PMRSTS_SET_HSTS(n
->bar
.pmrsts
, 0);
2286 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 0);
2288 /* PMR Elasticity Buffer Size register */
2290 NVME_PMREBS_SET_PMRSZU(n
->bar
.pmrebs
, 0);
2291 NVME_PMREBS_SET_RBB(n
->bar
.pmrebs
, 0);
2292 NVME_PMREBS_SET_PMRWBZ(n
->bar
.pmrebs
, 0);
2294 /* PMR Sustained Write Throughput register */
2296 NVME_PMRSWTP_SET_PMRSWTU(n
->bar
.pmrswtp
, 0);
2297 NVME_PMRSWTP_SET_PMRSWTV(n
->bar
.pmrswtp
, 0);
2299 /* PMR Memory Space Control register */
2301 NVME_PMRMSC_SET_CMSE(n
->bar
.pmrmsc
, 0);
2302 NVME_PMRMSC_SET_CBA(n
->bar
.pmrmsc
, 0);
2304 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
2305 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2306 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2307 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmrdev
->mr
);
2310 static void nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
2312 uint8_t *pci_conf
= pci_dev
->config
;
2314 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
2315 pci_config_set_prog_interface(pci_conf
, 0x2);
2316 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
2317 pcie_endpoint_cap_init(pci_dev
, 0x80);
2319 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
2321 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
2322 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->iomem
);
2323 if (msix_init_exclusive_bar(pci_dev
, n
->params
.msix_qsize
, 4, errp
)) {
2327 if (n
->params
.cmb_size_mb
) {
2328 nvme_init_cmb(n
, pci_dev
);
2329 } else if (n
->pmrdev
) {
2330 nvme_init_pmr(n
, pci_dev
);
2334 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2336 NvmeIdCtrl
*id
= &n
->id_ctrl
;
2337 uint8_t *pci_conf
= pci_dev
->config
;
2340 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
2341 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
2342 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
2343 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
2344 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
2349 id
->mdts
= n
->params
.mdts
;
2350 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
2351 id
->oacs
= cpu_to_le16(0);
2354 * Because the controller always completes the Abort command immediately,
2355 * there can never be more than one concurrently executing Abort command,
2356 * so this value is never used for anything. Note that there can easily be
2357 * many Abort commands in the queues, but they are not considered
2358 * "executing" until processed by nvme_abort.
2360 * The specification recommends a value of 3 for Abort Command Limit (four
2361 * concurrently outstanding Abort commands), so lets use that though it is
2365 id
->aerl
= n
->params
.aerl
;
2366 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
2367 id
->lpa
= NVME_LPA_EXTENDED
;
2369 /* recommended default value (~70 C) */
2370 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
2371 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
2373 id
->sqes
= (0x6 << 4) | 0x6;
2374 id
->cqes
= (0x4 << 4) | 0x4;
2375 id
->nn
= cpu_to_le32(n
->num_namespaces
);
2376 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
2377 NVME_ONCS_FEATURES
);
2379 subnqn
= g_strdup_printf("nqn.2019-08.org.qemu:%s", n
->params
.serial
);
2380 strpadcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), subnqn
, '\0');
2383 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
2384 id
->psd
[0].enlat
= cpu_to_le32(0x10);
2385 id
->psd
[0].exlat
= cpu_to_le32(0x4);
2386 if (blk_enable_write_cache(n
->conf
.blk
)) {
2391 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
2392 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
2393 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
2394 NVME_CAP_SET_CSS(n
->bar
.cap
, 1);
2395 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
2397 n
->bar
.vs
= NVME_SPEC_VER
;
2398 n
->bar
.intmc
= n
->bar
.intms
= 0;
2401 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
2403 NvmeCtrl
*n
= NVME(pci_dev
);
2404 Error
*local_err
= NULL
;
2408 nvme_check_constraints(n
, &local_err
);
2410 error_propagate(errp
, local_err
);
2415 nvme_init_blk(n
, &local_err
);
2417 error_propagate(errp
, local_err
);
2421 nvme_init_pci(n
, pci_dev
, &local_err
);
2423 error_propagate(errp
, local_err
);
2427 nvme_init_ctrl(n
, pci_dev
);
2429 for (i
= 0; i
< n
->num_namespaces
; i
++) {
2430 nvme_init_namespace(n
, &n
->namespaces
[i
], &local_err
);
2432 error_propagate(errp
, local_err
);
2438 static void nvme_exit(PCIDevice
*pci_dev
)
2440 NvmeCtrl
*n
= NVME(pci_dev
);
2443 g_free(n
->namespaces
);
2446 g_free(n
->aer_reqs
);
2448 if (n
->params
.cmb_size_mb
) {
2453 host_memory_backend_set_mapped(n
->pmrdev
, false);
2455 msix_uninit_exclusive_bar(pci_dev
);
2458 static Property nvme_props
[] = {
2459 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, conf
),
2460 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmrdev
, TYPE_MEMORY_BACKEND
,
2461 HostMemoryBackend
*),
2462 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
2463 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
2464 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
2465 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
2466 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
2467 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
2468 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
2469 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
2470 DEFINE_PROP_END_OF_LIST(),
2473 static const VMStateDescription nvme_vmstate
= {
2478 static void nvme_class_init(ObjectClass
*oc
, void *data
)
2480 DeviceClass
*dc
= DEVICE_CLASS(oc
);
2481 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
2483 pc
->realize
= nvme_realize
;
2484 pc
->exit
= nvme_exit
;
2485 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
2486 pc
->vendor_id
= PCI_VENDOR_ID_INTEL
;
2487 pc
->device_id
= 0x5845;
2490 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
2491 dc
->desc
= "Non-Volatile Memory Express";
2492 device_class_set_props(dc
, nvme_props
);
2493 dc
->vmsd
= &nvme_vmstate
;
2496 static void nvme_instance_init(Object
*obj
)
2498 NvmeCtrl
*s
= NVME(obj
);
2500 device_add_bootindex_property(obj
, &s
->conf
.bootindex
,
2501 "bootindex", "/namespace@1,0",
2505 static const TypeInfo nvme_info
= {
2507 .parent
= TYPE_PCI_DEVICE
,
2508 .instance_size
= sizeof(NvmeCtrl
),
2509 .class_init
= nvme_class_init
,
2510 .instance_init
= nvme_instance_init
,
2511 .interfaces
= (InterfaceInfo
[]) {
2512 { INTERFACE_PCIE_DEVICE
},
2517 static void nvme_register_types(void)
2519 type_register_static(&nvme_info
);
2522 type_init(nvme_register_types
)