2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * http://www.nvmexpress.org/resources/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * [pmrdev=<mem_backend_file_id>,] \
23 * num_queues=<N[optional]>
25 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
26 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
28 * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
29 * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
31 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
33 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
34 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
37 #include "qemu/osdep.h"
38 #include "qemu/units.h"
39 #include "hw/block/block.h"
40 #include "hw/pci/msix.h"
41 #include "hw/pci/pci.h"
42 #include "hw/qdev-properties.h"
43 #include "migration/vmstate.h"
44 #include "sysemu/sysemu.h"
45 #include "qapi/error.h"
46 #include "qapi/visitor.h"
47 #include "sysemu/hostmem.h"
48 #include "sysemu/block-backend.h"
49 #include "exec/memory.h"
51 #include "qemu/module.h"
52 #include "qemu/cutils.h"
56 #define NVME_REG_SIZE 0x1000
57 #define NVME_DB_SIZE 4
59 #define NVME_GUEST_ERR(trace, fmt, ...) \
61 (trace_##trace)(__VA_ARGS__); \
62 qemu_log_mask(LOG_GUEST_ERROR, #trace \
63 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
66 static void nvme_process_sq(void *opaque
);
68 static void nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
70 if (n
->cmbsz
&& addr
>= n
->ctrl_mem
.addr
&&
71 addr
< (n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
))) {
72 memcpy(buf
, (void *)&n
->cmbuf
[addr
- n
->ctrl_mem
.addr
], size
);
74 pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
78 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
80 return sqid
< n
->params
.num_queues
&& n
->sq
[sqid
] != NULL
? 0 : -1;
83 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
85 return cqid
< n
->params
.num_queues
&& n
->cq
[cqid
] != NULL
? 0 : -1;
88 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
91 if (cq
->tail
>= cq
->size
) {
93 cq
->phase
= !cq
->phase
;
97 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
99 sq
->head
= (sq
->head
+ 1) % sq
->size
;
102 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
104 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
107 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
109 return sq
->head
== sq
->tail
;
112 static void nvme_irq_check(NvmeCtrl
*n
)
114 if (msix_enabled(&(n
->parent_obj
))) {
117 if (~n
->bar
.intms
& n
->irq_status
) {
118 pci_irq_assert(&n
->parent_obj
);
120 pci_irq_deassert(&n
->parent_obj
);
124 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
126 if (cq
->irq_enabled
) {
127 if (msix_enabled(&(n
->parent_obj
))) {
128 trace_pci_nvme_irq_msix(cq
->vector
);
129 msix_notify(&(n
->parent_obj
), cq
->vector
);
131 trace_pci_nvme_irq_pin();
132 assert(cq
->cqid
< 64);
133 n
->irq_status
|= 1 << cq
->cqid
;
137 trace_pci_nvme_irq_masked();
141 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
143 if (cq
->irq_enabled
) {
144 if (msix_enabled(&(n
->parent_obj
))) {
147 assert(cq
->cqid
< 64);
148 n
->irq_status
&= ~(1 << cq
->cqid
);
154 static uint16_t nvme_map_prp(QEMUSGList
*qsg
, QEMUIOVector
*iov
, uint64_t prp1
,
155 uint64_t prp2
, uint32_t len
, NvmeCtrl
*n
)
157 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
158 trans_len
= MIN(len
, trans_len
);
159 int num_prps
= (len
>> n
->page_bits
) + 1;
161 if (unlikely(!prp1
)) {
162 trace_pci_nvme_err_invalid_prp();
163 return NVME_INVALID_FIELD
| NVME_DNR
;
164 } else if (n
->cmbsz
&& prp1
>= n
->ctrl_mem
.addr
&&
165 prp1
< n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
)) {
167 qemu_iovec_init(iov
, num_prps
);
168 qemu_iovec_add(iov
, (void *)&n
->cmbuf
[prp1
- n
->ctrl_mem
.addr
], trans_len
);
170 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
171 qemu_sglist_add(qsg
, prp1
, trans_len
);
175 if (unlikely(!prp2
)) {
176 trace_pci_nvme_err_invalid_prp2_missing();
179 if (len
> n
->page_size
) {
180 uint64_t prp_list
[n
->max_prp_ents
];
181 uint32_t nents
, prp_trans
;
184 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
185 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
186 nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
188 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
190 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
191 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
192 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
197 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
198 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
199 nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
201 prp_ent
= le64_to_cpu(prp_list
[i
]);
204 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
205 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
209 trans_len
= MIN(len
, n
->page_size
);
211 qemu_sglist_add(qsg
, prp_ent
, trans_len
);
213 qemu_iovec_add(iov
, (void *)&n
->cmbuf
[prp_ent
- n
->ctrl_mem
.addr
], trans_len
);
219 if (unlikely(prp2
& (n
->page_size
- 1))) {
220 trace_pci_nvme_err_invalid_prp2_align(prp2
);
224 qemu_sglist_add(qsg
, prp2
, len
);
226 qemu_iovec_add(iov
, (void *)&n
->cmbuf
[prp2
- n
->ctrl_mem
.addr
], trans_len
);
233 qemu_sglist_destroy(qsg
);
234 return NVME_INVALID_FIELD
| NVME_DNR
;
237 static uint16_t nvme_dma_write_prp(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
238 uint64_t prp1
, uint64_t prp2
)
242 uint16_t status
= NVME_SUCCESS
;
244 if (nvme_map_prp(&qsg
, &iov
, prp1
, prp2
, len
, n
)) {
245 return NVME_INVALID_FIELD
| NVME_DNR
;
248 if (dma_buf_write(ptr
, len
, &qsg
)) {
249 status
= NVME_INVALID_FIELD
| NVME_DNR
;
251 qemu_sglist_destroy(&qsg
);
253 if (qemu_iovec_to_buf(&iov
, 0, ptr
, len
) != len
) {
254 status
= NVME_INVALID_FIELD
| NVME_DNR
;
256 qemu_iovec_destroy(&iov
);
261 static uint16_t nvme_dma_read_prp(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
262 uint64_t prp1
, uint64_t prp2
)
266 uint16_t status
= NVME_SUCCESS
;
268 trace_pci_nvme_dma_read(prp1
, prp2
);
270 if (nvme_map_prp(&qsg
, &iov
, prp1
, prp2
, len
, n
)) {
271 return NVME_INVALID_FIELD
| NVME_DNR
;
274 if (unlikely(dma_buf_read(ptr
, len
, &qsg
))) {
275 trace_pci_nvme_err_invalid_dma();
276 status
= NVME_INVALID_FIELD
| NVME_DNR
;
278 qemu_sglist_destroy(&qsg
);
280 if (unlikely(qemu_iovec_from_buf(&iov
, 0, ptr
, len
) != len
)) {
281 trace_pci_nvme_err_invalid_dma();
282 status
= NVME_INVALID_FIELD
| NVME_DNR
;
284 qemu_iovec_destroy(&iov
);
289 static void nvme_post_cqes(void *opaque
)
291 NvmeCQueue
*cq
= opaque
;
292 NvmeCtrl
*n
= cq
->ctrl
;
293 NvmeRequest
*req
, *next
;
295 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
299 if (nvme_cq_full(cq
)) {
303 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
305 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
306 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
307 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
308 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
309 nvme_inc_cq_tail(cq
);
310 pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
312 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
314 if (cq
->tail
!= cq
->head
) {
315 nvme_irq_assert(n
, cq
);
319 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
321 assert(cq
->cqid
== req
->sq
->cqid
);
322 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
323 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
324 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
327 static void nvme_rw_cb(void *opaque
, int ret
)
329 NvmeRequest
*req
= opaque
;
330 NvmeSQueue
*sq
= req
->sq
;
331 NvmeCtrl
*n
= sq
->ctrl
;
332 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
335 block_acct_done(blk_get_stats(n
->conf
.blk
), &req
->acct
);
336 req
->status
= NVME_SUCCESS
;
338 block_acct_failed(blk_get_stats(n
->conf
.blk
), &req
->acct
);
339 req
->status
= NVME_INTERNAL_DEV_ERROR
;
342 qemu_sglist_destroy(&req
->qsg
);
344 nvme_enqueue_req_completion(cq
, req
);
347 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeNamespace
*ns
, NvmeCmd
*cmd
,
351 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, 0,
353 req
->aiocb
= blk_aio_flush(n
->conf
.blk
, nvme_rw_cb
, req
);
355 return NVME_NO_COMPLETE
;
358 static uint16_t nvme_write_zeros(NvmeCtrl
*n
, NvmeNamespace
*ns
, NvmeCmd
*cmd
,
361 NvmeRwCmd
*rw
= (NvmeRwCmd
*)cmd
;
362 const uint8_t lba_index
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
363 const uint8_t data_shift
= ns
->id_ns
.lbaf
[lba_index
].ds
;
364 uint64_t slba
= le64_to_cpu(rw
->slba
);
365 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
366 uint64_t offset
= slba
<< data_shift
;
367 uint32_t count
= nlb
<< data_shift
;
369 if (unlikely(slba
+ nlb
> ns
->id_ns
.nsze
)) {
370 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
371 return NVME_LBA_RANGE
| NVME_DNR
;
375 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, 0,
377 req
->aiocb
= blk_aio_pwrite_zeroes(n
->conf
.blk
, offset
, count
,
378 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
, req
);
379 return NVME_NO_COMPLETE
;
382 static uint16_t nvme_rw(NvmeCtrl
*n
, NvmeNamespace
*ns
, NvmeCmd
*cmd
,
385 NvmeRwCmd
*rw
= (NvmeRwCmd
*)cmd
;
386 uint32_t nlb
= le32_to_cpu(rw
->nlb
) + 1;
387 uint64_t slba
= le64_to_cpu(rw
->slba
);
388 uint64_t prp1
= le64_to_cpu(rw
->prp1
);
389 uint64_t prp2
= le64_to_cpu(rw
->prp2
);
391 uint8_t lba_index
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
392 uint8_t data_shift
= ns
->id_ns
.lbaf
[lba_index
].ds
;
393 uint64_t data_size
= (uint64_t)nlb
<< data_shift
;
394 uint64_t data_offset
= slba
<< data_shift
;
395 int is_write
= rw
->opcode
== NVME_CMD_WRITE
? 1 : 0;
396 enum BlockAcctType acct
= is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
;
398 trace_pci_nvme_rw(is_write
? "write" : "read", nlb
, data_size
, slba
);
400 if (unlikely((slba
+ nlb
) > ns
->id_ns
.nsze
)) {
401 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
402 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
403 return NVME_LBA_RANGE
| NVME_DNR
;
406 if (nvme_map_prp(&req
->qsg
, &req
->iov
, prp1
, prp2
, data_size
, n
)) {
407 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
408 return NVME_INVALID_FIELD
| NVME_DNR
;
411 dma_acct_start(n
->conf
.blk
, &req
->acct
, &req
->qsg
, acct
);
412 if (req
->qsg
.nsg
> 0) {
414 req
->aiocb
= is_write
?
415 dma_blk_write(n
->conf
.blk
, &req
->qsg
, data_offset
, BDRV_SECTOR_SIZE
,
417 dma_blk_read(n
->conf
.blk
, &req
->qsg
, data_offset
, BDRV_SECTOR_SIZE
,
421 req
->aiocb
= is_write
?
422 blk_aio_pwritev(n
->conf
.blk
, data_offset
, &req
->iov
, 0, nvme_rw_cb
,
424 blk_aio_preadv(n
->conf
.blk
, data_offset
, &req
->iov
, 0, nvme_rw_cb
,
428 return NVME_NO_COMPLETE
;
431 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
434 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
436 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
437 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
438 return NVME_INVALID_NSID
| NVME_DNR
;
441 ns
= &n
->namespaces
[nsid
- 1];
442 switch (cmd
->opcode
) {
444 return nvme_flush(n
, ns
, cmd
, req
);
445 case NVME_CMD_WRITE_ZEROS
:
446 return nvme_write_zeros(n
, ns
, cmd
, req
);
449 return nvme_rw(n
, ns
, cmd
, req
);
451 trace_pci_nvme_err_invalid_opc(cmd
->opcode
);
452 return NVME_INVALID_OPCODE
| NVME_DNR
;
456 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
458 n
->sq
[sq
->sqid
] = NULL
;
459 timer_del(sq
->timer
);
460 timer_free(sq
->timer
);
467 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
469 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)cmd
;
470 NvmeRequest
*req
, *next
;
473 uint16_t qid
= le16_to_cpu(c
->qid
);
475 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
476 trace_pci_nvme_err_invalid_del_sq(qid
);
477 return NVME_INVALID_QID
| NVME_DNR
;
480 trace_pci_nvme_del_sq(qid
);
483 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
484 req
= QTAILQ_FIRST(&sq
->out_req_list
);
486 blk_aio_cancel(req
->aiocb
);
488 if (!nvme_check_cqid(n
, sq
->cqid
)) {
489 cq
= n
->cq
[sq
->cqid
];
490 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
493 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
495 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
496 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
505 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
506 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
512 sq
->dma_addr
= dma_addr
;
516 sq
->head
= sq
->tail
= 0;
517 sq
->io_req
= g_new(NvmeRequest
, sq
->size
);
519 QTAILQ_INIT(&sq
->req_list
);
520 QTAILQ_INIT(&sq
->out_req_list
);
521 for (i
= 0; i
< sq
->size
; i
++) {
522 sq
->io_req
[i
].sq
= sq
;
523 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
525 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
529 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
533 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
536 NvmeCreateSq
*c
= (NvmeCreateSq
*)cmd
;
538 uint16_t cqid
= le16_to_cpu(c
->cqid
);
539 uint16_t sqid
= le16_to_cpu(c
->sqid
);
540 uint16_t qsize
= le16_to_cpu(c
->qsize
);
541 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
542 uint64_t prp1
= le64_to_cpu(c
->prp1
);
544 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
546 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
547 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
548 return NVME_INVALID_CQID
| NVME_DNR
;
550 if (unlikely(!sqid
|| !nvme_check_sqid(n
, sqid
))) {
551 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
552 return NVME_INVALID_QID
| NVME_DNR
;
554 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
555 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
556 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
558 if (unlikely(!prp1
|| prp1
& (n
->page_size
- 1))) {
559 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
560 return NVME_INVALID_FIELD
| NVME_DNR
;
562 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
563 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
564 return NVME_INVALID_FIELD
| NVME_DNR
;
566 sq
= g_malloc0(sizeof(*sq
));
567 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
571 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
573 n
->cq
[cq
->cqid
] = NULL
;
574 timer_del(cq
->timer
);
575 timer_free(cq
->timer
);
576 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
582 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
584 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)cmd
;
586 uint16_t qid
= le16_to_cpu(c
->qid
);
588 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
589 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
590 return NVME_INVALID_CQID
| NVME_DNR
;
594 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
595 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
596 return NVME_INVALID_QUEUE_DEL
;
598 nvme_irq_deassert(n
, cq
);
599 trace_pci_nvme_del_cq(qid
);
604 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
605 uint16_t cqid
, uint16_t vector
, uint16_t size
, uint16_t irq_enabled
)
610 cq
->dma_addr
= dma_addr
;
612 cq
->irq_enabled
= irq_enabled
;
614 cq
->head
= cq
->tail
= 0;
615 QTAILQ_INIT(&cq
->req_list
);
616 QTAILQ_INIT(&cq
->sq_list
);
617 msix_vector_use(&n
->parent_obj
, cq
->vector
);
619 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
622 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
625 NvmeCreateCq
*c
= (NvmeCreateCq
*)cmd
;
626 uint16_t cqid
= le16_to_cpu(c
->cqid
);
627 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
628 uint16_t qsize
= le16_to_cpu(c
->qsize
);
629 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
630 uint64_t prp1
= le64_to_cpu(c
->prp1
);
632 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
633 NVME_CQ_FLAGS_IEN(qflags
) != 0);
635 if (unlikely(!cqid
|| !nvme_check_cqid(n
, cqid
))) {
636 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
637 return NVME_INVALID_CQID
| NVME_DNR
;
639 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
640 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
641 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
643 if (unlikely(!prp1
)) {
644 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
645 return NVME_INVALID_FIELD
| NVME_DNR
;
647 if (unlikely(vector
> n
->params
.num_queues
)) {
648 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
649 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
651 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
652 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
653 return NVME_INVALID_FIELD
| NVME_DNR
;
656 cq
= g_malloc0(sizeof(*cq
));
657 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
658 NVME_CQ_FLAGS_IEN(qflags
));
662 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeIdentify
*c
)
664 uint64_t prp1
= le64_to_cpu(c
->prp1
);
665 uint64_t prp2
= le64_to_cpu(c
->prp2
);
667 trace_pci_nvme_identify_ctrl();
669 return nvme_dma_read_prp(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
),
673 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeIdentify
*c
)
676 uint32_t nsid
= le32_to_cpu(c
->nsid
);
677 uint64_t prp1
= le64_to_cpu(c
->prp1
);
678 uint64_t prp2
= le64_to_cpu(c
->prp2
);
680 trace_pci_nvme_identify_ns(nsid
);
682 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
683 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
684 return NVME_INVALID_NSID
| NVME_DNR
;
687 ns
= &n
->namespaces
[nsid
- 1];
689 return nvme_dma_read_prp(n
, (uint8_t *)&ns
->id_ns
, sizeof(ns
->id_ns
),
693 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeIdentify
*c
)
695 static const int data_len
= 4 * KiB
;
696 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
697 uint64_t prp1
= le64_to_cpu(c
->prp1
);
698 uint64_t prp2
= le64_to_cpu(c
->prp2
);
703 trace_pci_nvme_identify_nslist(min_nsid
);
705 list
= g_malloc0(data_len
);
706 for (i
= 0; i
< n
->num_namespaces
; i
++) {
710 list
[j
++] = cpu_to_le32(i
+ 1);
711 if (j
== data_len
/ sizeof(uint32_t)) {
715 ret
= nvme_dma_read_prp(n
, (uint8_t *)list
, data_len
, prp1
, prp2
);
720 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeCmd
*cmd
)
722 NvmeIdentify
*c
= (NvmeIdentify
*)cmd
;
724 switch (le32_to_cpu(c
->cns
)) {
726 return nvme_identify_ns(n
, c
);
728 return nvme_identify_ctrl(n
, c
);
730 return nvme_identify_nslist(n
, c
);
732 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
733 return NVME_INVALID_FIELD
| NVME_DNR
;
737 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
739 trace_pci_nvme_setfeat_timestamp(ts
);
741 n
->host_timestamp
= le64_to_cpu(ts
);
742 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
745 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
747 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
748 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
750 union nvme_timestamp
{
752 uint64_t timestamp
:48;
760 union nvme_timestamp ts
;
764 * If the sum of the Timestamp value set by the host and the elapsed
765 * time exceeds 2^48, the value returned should be reduced modulo 2^48.
767 ts
.timestamp
= (n
->host_timestamp
+ elapsed_time
) & 0xffffffffffff;
769 /* If the host timestamp is non-zero, set the timestamp origin */
770 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
772 trace_pci_nvme_getfeat_timestamp(ts
.all
);
774 return cpu_to_le64(ts
.all
);
777 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeCmd
*cmd
)
779 uint64_t prp1
= le64_to_cpu(cmd
->prp1
);
780 uint64_t prp2
= le64_to_cpu(cmd
->prp2
);
782 uint64_t timestamp
= nvme_get_timestamp(n
);
784 return nvme_dma_read_prp(n
, (uint8_t *)×tamp
,
785 sizeof(timestamp
), prp1
, prp2
);
788 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
790 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
794 case NVME_VOLATILE_WRITE_CACHE
:
795 result
= blk_enable_write_cache(n
->conf
.blk
);
796 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
798 case NVME_NUMBER_OF_QUEUES
:
799 result
= cpu_to_le32((n
->params
.num_queues
- 2) |
800 ((n
->params
.num_queues
- 2) << 16));
801 trace_pci_nvme_getfeat_numq(result
);
804 return nvme_get_feature_timestamp(n
, cmd
);
806 trace_pci_nvme_err_invalid_getfeat(dw10
);
807 return NVME_INVALID_FIELD
| NVME_DNR
;
810 req
->cqe
.result
= result
;
814 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeCmd
*cmd
)
818 uint64_t prp1
= le64_to_cpu(cmd
->prp1
);
819 uint64_t prp2
= le64_to_cpu(cmd
->prp2
);
821 ret
= nvme_dma_write_prp(n
, (uint8_t *)×tamp
,
822 sizeof(timestamp
), prp1
, prp2
);
823 if (ret
!= NVME_SUCCESS
) {
827 nvme_set_timestamp(n
, timestamp
);
832 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
834 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
835 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
838 case NVME_VOLATILE_WRITE_CACHE
:
839 blk_set_enable_write_cache(n
->conf
.blk
, dw11
& 1);
841 case NVME_NUMBER_OF_QUEUES
:
842 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
843 ((dw11
>> 16) & 0xFFFF) + 1,
844 n
->params
.num_queues
- 1,
845 n
->params
.num_queues
- 1);
846 req
->cqe
.result
= cpu_to_le32((n
->params
.num_queues
- 2) |
847 ((n
->params
.num_queues
- 2) << 16));
850 return nvme_set_feature_timestamp(n
, cmd
);
852 trace_pci_nvme_err_invalid_setfeat(dw10
);
853 return NVME_INVALID_FIELD
| NVME_DNR
;
858 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
860 switch (cmd
->opcode
) {
861 case NVME_ADM_CMD_DELETE_SQ
:
862 return nvme_del_sq(n
, cmd
);
863 case NVME_ADM_CMD_CREATE_SQ
:
864 return nvme_create_sq(n
, cmd
);
865 case NVME_ADM_CMD_DELETE_CQ
:
866 return nvme_del_cq(n
, cmd
);
867 case NVME_ADM_CMD_CREATE_CQ
:
868 return nvme_create_cq(n
, cmd
);
869 case NVME_ADM_CMD_IDENTIFY
:
870 return nvme_identify(n
, cmd
);
871 case NVME_ADM_CMD_SET_FEATURES
:
872 return nvme_set_feature(n
, cmd
, req
);
873 case NVME_ADM_CMD_GET_FEATURES
:
874 return nvme_get_feature(n
, cmd
, req
);
876 trace_pci_nvme_err_invalid_admin_opc(cmd
->opcode
);
877 return NVME_INVALID_OPCODE
| NVME_DNR
;
881 static void nvme_process_sq(void *opaque
)
883 NvmeSQueue
*sq
= opaque
;
884 NvmeCtrl
*n
= sq
->ctrl
;
885 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
892 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
893 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
894 nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
));
895 nvme_inc_sq_head(sq
);
897 req
= QTAILQ_FIRST(&sq
->req_list
);
898 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
899 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
900 memset(&req
->cqe
, 0, sizeof(req
->cqe
));
901 req
->cqe
.cid
= cmd
.cid
;
903 status
= sq
->sqid
? nvme_io_cmd(n
, &cmd
, req
) :
904 nvme_admin_cmd(n
, &cmd
, req
);
905 if (status
!= NVME_NO_COMPLETE
) {
906 req
->status
= status
;
907 nvme_enqueue_req_completion(cq
, req
);
912 static void nvme_clear_ctrl(NvmeCtrl
*n
)
916 blk_drain(n
->conf
.blk
);
918 for (i
= 0; i
< n
->params
.num_queues
; i
++) {
919 if (n
->sq
[i
] != NULL
) {
920 nvme_free_sq(n
->sq
[i
], n
);
923 for (i
= 0; i
< n
->params
.num_queues
; i
++) {
924 if (n
->cq
[i
] != NULL
) {
925 nvme_free_cq(n
->cq
[i
], n
);
929 blk_flush(n
->conf
.blk
);
933 static int nvme_start_ctrl(NvmeCtrl
*n
)
935 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
936 uint32_t page_size
= 1 << page_bits
;
938 if (unlikely(n
->cq
[0])) {
939 trace_pci_nvme_err_startfail_cq();
942 if (unlikely(n
->sq
[0])) {
943 trace_pci_nvme_err_startfail_sq();
946 if (unlikely(!n
->bar
.asq
)) {
947 trace_pci_nvme_err_startfail_nbarasq();
950 if (unlikely(!n
->bar
.acq
)) {
951 trace_pci_nvme_err_startfail_nbaracq();
954 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
955 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
958 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
959 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
962 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
963 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
964 trace_pci_nvme_err_startfail_page_too_small(
965 NVME_CC_MPS(n
->bar
.cc
),
966 NVME_CAP_MPSMIN(n
->bar
.cap
));
969 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
970 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
971 trace_pci_nvme_err_startfail_page_too_large(
972 NVME_CC_MPS(n
->bar
.cc
),
973 NVME_CAP_MPSMAX(n
->bar
.cap
));
976 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
977 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
978 trace_pci_nvme_err_startfail_cqent_too_small(
979 NVME_CC_IOCQES(n
->bar
.cc
),
980 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
983 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
984 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
985 trace_pci_nvme_err_startfail_cqent_too_large(
986 NVME_CC_IOCQES(n
->bar
.cc
),
987 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
990 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
991 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
992 trace_pci_nvme_err_startfail_sqent_too_small(
993 NVME_CC_IOSQES(n
->bar
.cc
),
994 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
997 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
998 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
999 trace_pci_nvme_err_startfail_sqent_too_large(
1000 NVME_CC_IOSQES(n
->bar
.cc
),
1001 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
1004 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
1005 trace_pci_nvme_err_startfail_asqent_sz_zero();
1008 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
1009 trace_pci_nvme_err_startfail_acqent_sz_zero();
1013 n
->page_bits
= page_bits
;
1014 n
->page_size
= page_size
;
1015 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
1016 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
1017 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
1018 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
1019 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
1020 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
1021 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
1023 nvme_set_timestamp(n
, 0ULL);
1028 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
1031 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
1032 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
1033 "MMIO write not 32-bit aligned,"
1034 " offset=0x%"PRIx64
"", offset
);
1035 /* should be ignored, fall through for now */
1038 if (unlikely(size
< sizeof(uint32_t))) {
1039 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
1040 "MMIO write smaller than 32-bits,"
1041 " offset=0x%"PRIx64
", size=%u",
1043 /* should be ignored, fall through for now */
1047 case 0xc: /* INTMS */
1048 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
1049 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
1050 "undefined access to interrupt mask set"
1051 " when MSI-X is enabled");
1052 /* should be ignored, fall through for now */
1054 n
->bar
.intms
|= data
& 0xffffffff;
1055 n
->bar
.intmc
= n
->bar
.intms
;
1056 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
1059 case 0x10: /* INTMC */
1060 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
1061 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
1062 "undefined access to interrupt mask clr"
1063 " when MSI-X is enabled");
1064 /* should be ignored, fall through for now */
1066 n
->bar
.intms
&= ~(data
& 0xffffffff);
1067 n
->bar
.intmc
= n
->bar
.intms
;
1068 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
1072 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
1073 /* Windows first sends data, then sends enable bit */
1074 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
1075 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
1080 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
1082 if (unlikely(nvme_start_ctrl(n
))) {
1083 trace_pci_nvme_err_startfail();
1084 n
->bar
.csts
= NVME_CSTS_FAILED
;
1086 trace_pci_nvme_mmio_start_success();
1087 n
->bar
.csts
= NVME_CSTS_READY
;
1089 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
1090 trace_pci_nvme_mmio_stopped();
1092 n
->bar
.csts
&= ~NVME_CSTS_READY
;
1094 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
1095 trace_pci_nvme_mmio_shutdown_set();
1098 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
1099 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
1100 trace_pci_nvme_mmio_shutdown_cleared();
1101 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
1105 case 0x1C: /* CSTS */
1106 if (data
& (1 << 4)) {
1107 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
1108 "attempted to W1C CSTS.NSSRO"
1109 " but CAP.NSSRS is zero (not supported)");
1110 } else if (data
!= 0) {
1111 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
1112 "attempted to set a read only bit"
1113 " of controller status");
1116 case 0x20: /* NSSR */
1117 if (data
== 0x4E564D65) {
1118 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
1120 /* The spec says that writes of other values have no effect */
1124 case 0x24: /* AQA */
1125 n
->bar
.aqa
= data
& 0xffffffff;
1126 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
1128 case 0x28: /* ASQ */
1130 trace_pci_nvme_mmio_asqaddr(data
);
1132 case 0x2c: /* ASQ hi */
1133 n
->bar
.asq
|= data
<< 32;
1134 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
1136 case 0x30: /* ACQ */
1137 trace_pci_nvme_mmio_acqaddr(data
);
1140 case 0x34: /* ACQ hi */
1141 n
->bar
.acq
|= data
<< 32;
1142 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
1144 case 0x38: /* CMBLOC */
1145 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
1146 "invalid write to reserved CMBLOC"
1147 " when CMBSZ is zero, ignored");
1149 case 0x3C: /* CMBSZ */
1150 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
1151 "invalid write to read only CMBSZ, ignored");
1153 case 0xE00: /* PMRCAP */
1154 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
1155 "invalid write to PMRCAP register, ignored");
1157 case 0xE04: /* TODO PMRCTL */
1159 case 0xE08: /* PMRSTS */
1160 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
1161 "invalid write to PMRSTS register, ignored");
1163 case 0xE0C: /* PMREBS */
1164 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
1165 "invalid write to PMREBS register, ignored");
1167 case 0xE10: /* PMRSWTP */
1168 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
1169 "invalid write to PMRSWTP register, ignored");
1171 case 0xE14: /* TODO PMRMSC */
1174 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
1175 "invalid MMIO write,"
1176 " offset=0x%"PRIx64
", data=%"PRIx64
"",
1182 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
1184 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1185 uint8_t *ptr
= (uint8_t *)&n
->bar
;
1188 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
1189 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
1190 "MMIO read not 32-bit aligned,"
1191 " offset=0x%"PRIx64
"", addr
);
1192 /* should RAZ, fall through for now */
1193 } else if (unlikely(size
< sizeof(uint32_t))) {
1194 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
1195 "MMIO read smaller than 32-bits,"
1196 " offset=0x%"PRIx64
"", addr
);
1197 /* should RAZ, fall through for now */
1200 if (addr
< sizeof(n
->bar
)) {
1202 * When PMRWBM bit 1 is set then read from
1203 * from PMRSTS should ensure prior writes
1204 * made it to persistent media
1206 if (addr
== 0xE08 &&
1207 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
1208 memory_region_msync(&n
->pmrdev
->mr
, 0, n
->pmrdev
->size
);
1210 memcpy(&val
, ptr
+ addr
, size
);
1212 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
1213 "MMIO read beyond last register,"
1214 " offset=0x%"PRIx64
", returning 0", addr
);
1220 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
1224 if (unlikely(addr
& ((1 << 2) - 1))) {
1225 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
1226 "doorbell write not 32-bit aligned,"
1227 " offset=0x%"PRIx64
", ignoring", addr
);
1231 if (((addr
- 0x1000) >> 2) & 1) {
1232 /* Completion queue doorbell write */
1234 uint16_t new_head
= val
& 0xffff;
1238 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
1239 if (unlikely(nvme_check_cqid(n
, qid
))) {
1240 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
1241 "completion queue doorbell write"
1242 " for nonexistent queue,"
1243 " sqid=%"PRIu32
", ignoring", qid
);
1248 if (unlikely(new_head
>= cq
->size
)) {
1249 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
1250 "completion queue doorbell write value"
1251 " beyond queue size, sqid=%"PRIu32
","
1252 " new_head=%"PRIu16
", ignoring",
1257 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
1258 cq
->head
= new_head
;
1261 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
1262 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1264 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1267 if (cq
->tail
== cq
->head
) {
1268 nvme_irq_deassert(n
, cq
);
1271 /* Submission queue doorbell write */
1273 uint16_t new_tail
= val
& 0xffff;
1276 qid
= (addr
- 0x1000) >> 3;
1277 if (unlikely(nvme_check_sqid(n
, qid
))) {
1278 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
1279 "submission queue doorbell write"
1280 " for nonexistent queue,"
1281 " sqid=%"PRIu32
", ignoring", qid
);
1286 if (unlikely(new_tail
>= sq
->size
)) {
1287 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
1288 "submission queue doorbell write value"
1289 " beyond queue size, sqid=%"PRIu32
","
1290 " new_tail=%"PRIu16
", ignoring",
1295 sq
->tail
= new_tail
;
1296 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1300 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
1303 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1304 if (addr
< sizeof(n
->bar
)) {
1305 nvme_write_bar(n
, addr
, data
, size
);
1306 } else if (addr
>= 0x1000) {
1307 nvme_process_db(n
, addr
, data
);
1311 static const MemoryRegionOps nvme_mmio_ops
= {
1312 .read
= nvme_mmio_read
,
1313 .write
= nvme_mmio_write
,
1314 .endianness
= DEVICE_LITTLE_ENDIAN
,
1316 .min_access_size
= 2,
1317 .max_access_size
= 8,
1321 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
1324 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1325 stn_le_p(&n
->cmbuf
[addr
], size
, data
);
1328 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
1330 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1331 return ldn_le_p(&n
->cmbuf
[addr
], size
);
1334 static const MemoryRegionOps nvme_cmb_ops
= {
1335 .read
= nvme_cmb_read
,
1336 .write
= nvme_cmb_write
,
1337 .endianness
= DEVICE_LITTLE_ENDIAN
,
1339 .min_access_size
= 1,
1340 .max_access_size
= 8,
1344 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
1346 NvmeCtrl
*n
= NVME(pci_dev
);
1347 NvmeIdCtrl
*id
= &n
->id_ctrl
;
1353 if (!n
->params
.num_queues
) {
1354 error_setg(errp
, "num_queues can't be zero");
1359 error_setg(errp
, "drive property not set");
1363 bs_size
= blk_getlength(n
->conf
.blk
);
1365 error_setg(errp
, "could not get backing file size");
1369 if (!n
->params
.serial
) {
1370 error_setg(errp
, "serial property not set");
1374 if (!n
->params
.cmb_size_mb
&& n
->pmrdev
) {
1375 if (host_memory_backend_is_mapped(n
->pmrdev
)) {
1376 char *path
= object_get_canonical_path_component(OBJECT(n
->pmrdev
));
1377 error_setg(errp
, "can't use already busy memdev: %s", path
);
1382 if (!is_power_of_2(n
->pmrdev
->size
)) {
1383 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
1387 host_memory_backend_set_mapped(n
->pmrdev
, true);
1390 blkconf_blocksizes(&n
->conf
);
1391 if (!blkconf_apply_backend_options(&n
->conf
, blk_is_read_only(n
->conf
.blk
),
1396 pci_conf
= pci_dev
->config
;
1397 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
1398 pci_config_set_prog_interface(pci_dev
->config
, 0x2);
1399 pci_config_set_class(pci_dev
->config
, PCI_CLASS_STORAGE_EXPRESS
);
1400 pcie_endpoint_cap_init(pci_dev
, 0x80);
1402 n
->num_namespaces
= 1;
1404 /* num_queues is really number of pairs, so each has two doorbells */
1405 n
->reg_size
= pow2ceil(NVME_REG_SIZE
+
1406 2 * n
->params
.num_queues
* NVME_DB_SIZE
);
1407 n
->ns_size
= bs_size
/ (uint64_t)n
->num_namespaces
;
1409 n
->namespaces
= g_new0(NvmeNamespace
, n
->num_namespaces
);
1410 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.num_queues
);
1411 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.num_queues
);
1413 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
,
1414 "nvme", n
->reg_size
);
1415 pci_register_bar(pci_dev
, 0,
1416 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
,
1418 msix_init_exclusive_bar(pci_dev
, n
->params
.num_queues
, 4, NULL
);
1420 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
1421 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
1422 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
1423 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
1424 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
1429 id
->oacs
= cpu_to_le16(0);
1432 id
->sqes
= (0x6 << 4) | 0x6;
1433 id
->cqes
= (0x4 << 4) | 0x4;
1434 id
->nn
= cpu_to_le32(n
->num_namespaces
);
1435 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROS
| NVME_ONCS_TIMESTAMP
);
1436 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
1437 id
->psd
[0].enlat
= cpu_to_le32(0x10);
1438 id
->psd
[0].exlat
= cpu_to_le32(0x4);
1439 if (blk_enable_write_cache(n
->conf
.blk
)) {
1444 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
1445 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
1446 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
1447 NVME_CAP_SET_CSS(n
->bar
.cap
, 1);
1448 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
1450 n
->bar
.vs
= 0x00010200;
1451 n
->bar
.intmc
= n
->bar
.intms
= 0;
1453 if (n
->params
.cmb_size_mb
) {
1455 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, 2);
1456 NVME_CMBLOC_SET_OFST(n
->bar
.cmbloc
, 0);
1458 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
1459 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
1460 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 0);
1461 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
1462 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
1463 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
1464 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
1466 n
->cmbloc
= n
->bar
.cmbloc
;
1467 n
->cmbsz
= n
->bar
.cmbsz
;
1469 n
->cmbuf
= g_malloc0(NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
1470 memory_region_init_io(&n
->ctrl_mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
1471 "nvme-cmb", NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
1472 pci_register_bar(pci_dev
, NVME_CMBLOC_BIR(n
->bar
.cmbloc
),
1473 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
|
1474 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->ctrl_mem
);
1476 } else if (n
->pmrdev
) {
1477 /* Controller Capabilities register */
1478 NVME_CAP_SET_PMRS(n
->bar
.cap
, 1);
1480 /* PMR Capabities register */
1482 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 0);
1483 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 0);
1484 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, 2);
1485 NVME_PMRCAP_SET_PMRTU(n
->bar
.pmrcap
, 0);
1486 /* Turn on bit 1 support */
1487 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
1488 NVME_PMRCAP_SET_PMRTO(n
->bar
.pmrcap
, 0);
1489 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 0);
1491 /* PMR Control register */
1493 NVME_PMRCTL_SET_EN(n
->bar
.pmrctl
, 0);
1495 /* PMR Status register */
1497 NVME_PMRSTS_SET_ERR(n
->bar
.pmrsts
, 0);
1498 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 0);
1499 NVME_PMRSTS_SET_HSTS(n
->bar
.pmrsts
, 0);
1500 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 0);
1502 /* PMR Elasticity Buffer Size register */
1504 NVME_PMREBS_SET_PMRSZU(n
->bar
.pmrebs
, 0);
1505 NVME_PMREBS_SET_RBB(n
->bar
.pmrebs
, 0);
1506 NVME_PMREBS_SET_PMRWBZ(n
->bar
.pmrebs
, 0);
1508 /* PMR Sustained Write Throughput register */
1510 NVME_PMRSWTP_SET_PMRSWTU(n
->bar
.pmrswtp
, 0);
1511 NVME_PMRSWTP_SET_PMRSWTV(n
->bar
.pmrswtp
, 0);
1513 /* PMR Memory Space Control register */
1515 NVME_PMRMSC_SET_CMSE(n
->bar
.pmrmsc
, 0);
1516 NVME_PMRMSC_SET_CBA(n
->bar
.pmrmsc
, 0);
1518 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
1519 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
|
1520 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmrdev
->mr
);
1523 for (i
= 0; i
< n
->num_namespaces
; i
++) {
1524 NvmeNamespace
*ns
= &n
->namespaces
[i
];
1525 NvmeIdNs
*id_ns
= &ns
->id_ns
;
1532 id_ns
->lbaf
[0].ds
= BDRV_SECTOR_BITS
;
1533 id_ns
->ncap
= id_ns
->nuse
= id_ns
->nsze
=
1534 cpu_to_le64(n
->ns_size
>>
1535 id_ns
->lbaf
[NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
)].ds
);
1539 static void nvme_exit(PCIDevice
*pci_dev
)
1541 NvmeCtrl
*n
= NVME(pci_dev
);
1544 g_free(n
->namespaces
);
1548 if (n
->params
.cmb_size_mb
) {
1553 host_memory_backend_set_mapped(n
->pmrdev
, false);
1555 msix_uninit_exclusive_bar(pci_dev
);
1558 static Property nvme_props
[] = {
1559 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, conf
),
1560 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmrdev
, TYPE_MEMORY_BACKEND
,
1561 HostMemoryBackend
*),
1562 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
1563 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
1564 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 64),
1565 DEFINE_PROP_END_OF_LIST(),
1568 static const VMStateDescription nvme_vmstate
= {
1573 static void nvme_class_init(ObjectClass
*oc
, void *data
)
1575 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1576 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
1578 pc
->realize
= nvme_realize
;
1579 pc
->exit
= nvme_exit
;
1580 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
1581 pc
->vendor_id
= PCI_VENDOR_ID_INTEL
;
1582 pc
->device_id
= 0x5845;
1585 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1586 dc
->desc
= "Non-Volatile Memory Express";
1587 device_class_set_props(dc
, nvme_props
);
1588 dc
->vmsd
= &nvme_vmstate
;
1591 static void nvme_instance_init(Object
*obj
)
1593 NvmeCtrl
*s
= NVME(obj
);
1595 device_add_bootindex_property(obj
, &s
->conf
.bootindex
,
1596 "bootindex", "/namespace@1,0",
1600 static const TypeInfo nvme_info
= {
1602 .parent
= TYPE_PCI_DEVICE
,
1603 .instance_size
= sizeof(NvmeCtrl
),
1604 .class_init
= nvme_class_init
,
1605 .instance_init
= nvme_instance_init
,
1606 .interfaces
= (InterfaceInfo
[]) {
1607 { INTERFACE_PCIE_DEVICE
},
1612 static void nvme_register_types(void)
1614 type_register_static(&nvme_info
);
1617 type_init(nvme_register_types
)