2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * http://www.nvmexpress.org/resources/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * [pmrdev=<mem_backend_file_id>,] \
23 * num_queues=<N[optional]>
25 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
26 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
28 * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
29 * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
31 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
33 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
34 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
37 #include "qemu/osdep.h"
38 #include "qemu/units.h"
39 #include "hw/block/block.h"
40 #include "hw/pci/msix.h"
41 #include "hw/pci/pci.h"
42 #include "hw/qdev-properties.h"
43 #include "migration/vmstate.h"
44 #include "sysemu/sysemu.h"
45 #include "qapi/error.h"
46 #include "qapi/visitor.h"
47 #include "sysemu/hostmem.h"
48 #include "sysemu/block-backend.h"
49 #include "exec/memory.h"
51 #include "qemu/module.h"
52 #include "qemu/cutils.h"
56 #define NVME_REG_SIZE 0x1000
57 #define NVME_DB_SIZE 4
59 #define NVME_GUEST_ERR(trace, fmt, ...) \
61 (trace_##trace)(__VA_ARGS__); \
62 qemu_log_mask(LOG_GUEST_ERROR, #trace \
63 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
66 static void nvme_process_sq(void *opaque
);
68 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
70 hwaddr low
= n
->ctrl_mem
.addr
;
71 hwaddr hi
= n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
);
73 return addr
>= low
&& addr
< hi
;
76 static void nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
78 if (n
->cmbsz
&& nvme_addr_is_cmb(n
, addr
)) {
79 memcpy(buf
, (void *)&n
->cmbuf
[addr
- n
->ctrl_mem
.addr
], size
);
83 pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
86 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
88 return sqid
< n
->params
.num_queues
&& n
->sq
[sqid
] != NULL
? 0 : -1;
91 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
93 return cqid
< n
->params
.num_queues
&& n
->cq
[cqid
] != NULL
? 0 : -1;
96 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
99 if (cq
->tail
>= cq
->size
) {
101 cq
->phase
= !cq
->phase
;
105 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
107 sq
->head
= (sq
->head
+ 1) % sq
->size
;
110 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
112 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
115 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
117 return sq
->head
== sq
->tail
;
120 static void nvme_irq_check(NvmeCtrl
*n
)
122 if (msix_enabled(&(n
->parent_obj
))) {
125 if (~n
->bar
.intms
& n
->irq_status
) {
126 pci_irq_assert(&n
->parent_obj
);
128 pci_irq_deassert(&n
->parent_obj
);
132 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
134 if (cq
->irq_enabled
) {
135 if (msix_enabled(&(n
->parent_obj
))) {
136 trace_pci_nvme_irq_msix(cq
->vector
);
137 msix_notify(&(n
->parent_obj
), cq
->vector
);
139 trace_pci_nvme_irq_pin();
140 assert(cq
->cqid
< 64);
141 n
->irq_status
|= 1 << cq
->cqid
;
145 trace_pci_nvme_irq_masked();
149 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
151 if (cq
->irq_enabled
) {
152 if (msix_enabled(&(n
->parent_obj
))) {
155 assert(cq
->cqid
< 64);
156 n
->irq_status
&= ~(1 << cq
->cqid
);
162 static uint16_t nvme_map_prp(QEMUSGList
*qsg
, QEMUIOVector
*iov
, uint64_t prp1
,
163 uint64_t prp2
, uint32_t len
, NvmeCtrl
*n
)
165 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
166 trans_len
= MIN(len
, trans_len
);
167 int num_prps
= (len
>> n
->page_bits
) + 1;
169 if (unlikely(!prp1
)) {
170 trace_pci_nvme_err_invalid_prp();
171 return NVME_INVALID_FIELD
| NVME_DNR
;
172 } else if (n
->cmbsz
&& prp1
>= n
->ctrl_mem
.addr
&&
173 prp1
< n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
)) {
175 qemu_iovec_init(iov
, num_prps
);
176 qemu_iovec_add(iov
, (void *)&n
->cmbuf
[prp1
- n
->ctrl_mem
.addr
], trans_len
);
178 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
179 qemu_sglist_add(qsg
, prp1
, trans_len
);
183 if (unlikely(!prp2
)) {
184 trace_pci_nvme_err_invalid_prp2_missing();
187 if (len
> n
->page_size
) {
188 uint64_t prp_list
[n
->max_prp_ents
];
189 uint32_t nents
, prp_trans
;
192 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
193 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
194 nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
196 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
198 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
199 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
200 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
205 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
206 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
207 nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
209 prp_ent
= le64_to_cpu(prp_list
[i
]);
212 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
213 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
217 trans_len
= MIN(len
, n
->page_size
);
219 qemu_sglist_add(qsg
, prp_ent
, trans_len
);
221 qemu_iovec_add(iov
, (void *)&n
->cmbuf
[prp_ent
- n
->ctrl_mem
.addr
], trans_len
);
227 if (unlikely(prp2
& (n
->page_size
- 1))) {
228 trace_pci_nvme_err_invalid_prp2_align(prp2
);
232 qemu_sglist_add(qsg
, prp2
, len
);
234 qemu_iovec_add(iov
, (void *)&n
->cmbuf
[prp2
- n
->ctrl_mem
.addr
], trans_len
);
241 qemu_sglist_destroy(qsg
);
242 return NVME_INVALID_FIELD
| NVME_DNR
;
245 static uint16_t nvme_dma_write_prp(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
246 uint64_t prp1
, uint64_t prp2
)
250 uint16_t status
= NVME_SUCCESS
;
252 if (nvme_map_prp(&qsg
, &iov
, prp1
, prp2
, len
, n
)) {
253 return NVME_INVALID_FIELD
| NVME_DNR
;
256 if (dma_buf_write(ptr
, len
, &qsg
)) {
257 status
= NVME_INVALID_FIELD
| NVME_DNR
;
259 qemu_sglist_destroy(&qsg
);
261 if (qemu_iovec_to_buf(&iov
, 0, ptr
, len
) != len
) {
262 status
= NVME_INVALID_FIELD
| NVME_DNR
;
264 qemu_iovec_destroy(&iov
);
269 static uint16_t nvme_dma_read_prp(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
270 uint64_t prp1
, uint64_t prp2
)
274 uint16_t status
= NVME_SUCCESS
;
276 trace_pci_nvme_dma_read(prp1
, prp2
);
278 if (nvme_map_prp(&qsg
, &iov
, prp1
, prp2
, len
, n
)) {
279 return NVME_INVALID_FIELD
| NVME_DNR
;
282 if (unlikely(dma_buf_read(ptr
, len
, &qsg
))) {
283 trace_pci_nvme_err_invalid_dma();
284 status
= NVME_INVALID_FIELD
| NVME_DNR
;
286 qemu_sglist_destroy(&qsg
);
288 if (unlikely(qemu_iovec_from_buf(&iov
, 0, ptr
, len
) != len
)) {
289 trace_pci_nvme_err_invalid_dma();
290 status
= NVME_INVALID_FIELD
| NVME_DNR
;
292 qemu_iovec_destroy(&iov
);
297 static void nvme_post_cqes(void *opaque
)
299 NvmeCQueue
*cq
= opaque
;
300 NvmeCtrl
*n
= cq
->ctrl
;
301 NvmeRequest
*req
, *next
;
303 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
307 if (nvme_cq_full(cq
)) {
311 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
313 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
314 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
315 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
316 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
317 nvme_inc_cq_tail(cq
);
318 pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
320 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
322 if (cq
->tail
!= cq
->head
) {
323 nvme_irq_assert(n
, cq
);
327 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
329 assert(cq
->cqid
== req
->sq
->cqid
);
330 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
331 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
332 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
335 static void nvme_rw_cb(void *opaque
, int ret
)
337 NvmeRequest
*req
= opaque
;
338 NvmeSQueue
*sq
= req
->sq
;
339 NvmeCtrl
*n
= sq
->ctrl
;
340 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
343 block_acct_done(blk_get_stats(n
->conf
.blk
), &req
->acct
);
344 req
->status
= NVME_SUCCESS
;
346 block_acct_failed(blk_get_stats(n
->conf
.blk
), &req
->acct
);
347 req
->status
= NVME_INTERNAL_DEV_ERROR
;
350 qemu_sglist_destroy(&req
->qsg
);
352 nvme_enqueue_req_completion(cq
, req
);
355 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeNamespace
*ns
, NvmeCmd
*cmd
,
359 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, 0,
361 req
->aiocb
= blk_aio_flush(n
->conf
.blk
, nvme_rw_cb
, req
);
363 return NVME_NO_COMPLETE
;
366 static uint16_t nvme_write_zeros(NvmeCtrl
*n
, NvmeNamespace
*ns
, NvmeCmd
*cmd
,
369 NvmeRwCmd
*rw
= (NvmeRwCmd
*)cmd
;
370 const uint8_t lba_index
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
371 const uint8_t data_shift
= ns
->id_ns
.lbaf
[lba_index
].ds
;
372 uint64_t slba
= le64_to_cpu(rw
->slba
);
373 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
374 uint64_t offset
= slba
<< data_shift
;
375 uint32_t count
= nlb
<< data_shift
;
377 if (unlikely(slba
+ nlb
> ns
->id_ns
.nsze
)) {
378 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
379 return NVME_LBA_RANGE
| NVME_DNR
;
383 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, 0,
385 req
->aiocb
= blk_aio_pwrite_zeroes(n
->conf
.blk
, offset
, count
,
386 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
, req
);
387 return NVME_NO_COMPLETE
;
390 static uint16_t nvme_rw(NvmeCtrl
*n
, NvmeNamespace
*ns
, NvmeCmd
*cmd
,
393 NvmeRwCmd
*rw
= (NvmeRwCmd
*)cmd
;
394 uint32_t nlb
= le32_to_cpu(rw
->nlb
) + 1;
395 uint64_t slba
= le64_to_cpu(rw
->slba
);
396 uint64_t prp1
= le64_to_cpu(rw
->prp1
);
397 uint64_t prp2
= le64_to_cpu(rw
->prp2
);
399 uint8_t lba_index
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
400 uint8_t data_shift
= ns
->id_ns
.lbaf
[lba_index
].ds
;
401 uint64_t data_size
= (uint64_t)nlb
<< data_shift
;
402 uint64_t data_offset
= slba
<< data_shift
;
403 int is_write
= rw
->opcode
== NVME_CMD_WRITE
? 1 : 0;
404 enum BlockAcctType acct
= is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
;
406 trace_pci_nvme_rw(is_write
? "write" : "read", nlb
, data_size
, slba
);
408 if (unlikely((slba
+ nlb
) > ns
->id_ns
.nsze
)) {
409 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
410 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
411 return NVME_LBA_RANGE
| NVME_DNR
;
414 if (nvme_map_prp(&req
->qsg
, &req
->iov
, prp1
, prp2
, data_size
, n
)) {
415 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
416 return NVME_INVALID_FIELD
| NVME_DNR
;
419 dma_acct_start(n
->conf
.blk
, &req
->acct
, &req
->qsg
, acct
);
420 if (req
->qsg
.nsg
> 0) {
422 req
->aiocb
= is_write
?
423 dma_blk_write(n
->conf
.blk
, &req
->qsg
, data_offset
, BDRV_SECTOR_SIZE
,
425 dma_blk_read(n
->conf
.blk
, &req
->qsg
, data_offset
, BDRV_SECTOR_SIZE
,
429 req
->aiocb
= is_write
?
430 blk_aio_pwritev(n
->conf
.blk
, data_offset
, &req
->iov
, 0, nvme_rw_cb
,
432 blk_aio_preadv(n
->conf
.blk
, data_offset
, &req
->iov
, 0, nvme_rw_cb
,
436 return NVME_NO_COMPLETE
;
439 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
442 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
444 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
445 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
446 return NVME_INVALID_NSID
| NVME_DNR
;
449 ns
= &n
->namespaces
[nsid
- 1];
450 switch (cmd
->opcode
) {
452 return nvme_flush(n
, ns
, cmd
, req
);
453 case NVME_CMD_WRITE_ZEROS
:
454 return nvme_write_zeros(n
, ns
, cmd
, req
);
457 return nvme_rw(n
, ns
, cmd
, req
);
459 trace_pci_nvme_err_invalid_opc(cmd
->opcode
);
460 return NVME_INVALID_OPCODE
| NVME_DNR
;
464 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
466 n
->sq
[sq
->sqid
] = NULL
;
467 timer_del(sq
->timer
);
468 timer_free(sq
->timer
);
475 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
477 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)cmd
;
478 NvmeRequest
*req
, *next
;
481 uint16_t qid
= le16_to_cpu(c
->qid
);
483 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
484 trace_pci_nvme_err_invalid_del_sq(qid
);
485 return NVME_INVALID_QID
| NVME_DNR
;
488 trace_pci_nvme_del_sq(qid
);
491 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
492 req
= QTAILQ_FIRST(&sq
->out_req_list
);
494 blk_aio_cancel(req
->aiocb
);
496 if (!nvme_check_cqid(n
, sq
->cqid
)) {
497 cq
= n
->cq
[sq
->cqid
];
498 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
501 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
503 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
504 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
513 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
514 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
520 sq
->dma_addr
= dma_addr
;
524 sq
->head
= sq
->tail
= 0;
525 sq
->io_req
= g_new(NvmeRequest
, sq
->size
);
527 QTAILQ_INIT(&sq
->req_list
);
528 QTAILQ_INIT(&sq
->out_req_list
);
529 for (i
= 0; i
< sq
->size
; i
++) {
530 sq
->io_req
[i
].sq
= sq
;
531 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
533 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
537 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
541 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
544 NvmeCreateSq
*c
= (NvmeCreateSq
*)cmd
;
546 uint16_t cqid
= le16_to_cpu(c
->cqid
);
547 uint16_t sqid
= le16_to_cpu(c
->sqid
);
548 uint16_t qsize
= le16_to_cpu(c
->qsize
);
549 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
550 uint64_t prp1
= le64_to_cpu(c
->prp1
);
552 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
554 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
555 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
556 return NVME_INVALID_CQID
| NVME_DNR
;
558 if (unlikely(!sqid
|| !nvme_check_sqid(n
, sqid
))) {
559 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
560 return NVME_INVALID_QID
| NVME_DNR
;
562 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
563 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
564 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
566 if (unlikely(!prp1
|| prp1
& (n
->page_size
- 1))) {
567 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
568 return NVME_INVALID_FIELD
| NVME_DNR
;
570 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
571 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
572 return NVME_INVALID_FIELD
| NVME_DNR
;
574 sq
= g_malloc0(sizeof(*sq
));
575 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
579 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
581 n
->cq
[cq
->cqid
] = NULL
;
582 timer_del(cq
->timer
);
583 timer_free(cq
->timer
);
584 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
590 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
592 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)cmd
;
594 uint16_t qid
= le16_to_cpu(c
->qid
);
596 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
597 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
598 return NVME_INVALID_CQID
| NVME_DNR
;
602 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
603 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
604 return NVME_INVALID_QUEUE_DEL
;
606 nvme_irq_deassert(n
, cq
);
607 trace_pci_nvme_del_cq(qid
);
612 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
613 uint16_t cqid
, uint16_t vector
, uint16_t size
, uint16_t irq_enabled
)
618 cq
->dma_addr
= dma_addr
;
620 cq
->irq_enabled
= irq_enabled
;
622 cq
->head
= cq
->tail
= 0;
623 QTAILQ_INIT(&cq
->req_list
);
624 QTAILQ_INIT(&cq
->sq_list
);
625 msix_vector_use(&n
->parent_obj
, cq
->vector
);
627 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
630 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
633 NvmeCreateCq
*c
= (NvmeCreateCq
*)cmd
;
634 uint16_t cqid
= le16_to_cpu(c
->cqid
);
635 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
636 uint16_t qsize
= le16_to_cpu(c
->qsize
);
637 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
638 uint64_t prp1
= le64_to_cpu(c
->prp1
);
640 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
641 NVME_CQ_FLAGS_IEN(qflags
) != 0);
643 if (unlikely(!cqid
|| !nvme_check_cqid(n
, cqid
))) {
644 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
645 return NVME_INVALID_CQID
| NVME_DNR
;
647 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
648 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
649 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
651 if (unlikely(!prp1
)) {
652 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
653 return NVME_INVALID_FIELD
| NVME_DNR
;
655 if (unlikely(vector
> n
->params
.num_queues
)) {
656 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
657 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
659 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
660 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
661 return NVME_INVALID_FIELD
| NVME_DNR
;
664 cq
= g_malloc0(sizeof(*cq
));
665 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
666 NVME_CQ_FLAGS_IEN(qflags
));
670 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeIdentify
*c
)
672 uint64_t prp1
= le64_to_cpu(c
->prp1
);
673 uint64_t prp2
= le64_to_cpu(c
->prp2
);
675 trace_pci_nvme_identify_ctrl();
677 return nvme_dma_read_prp(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
),
681 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeIdentify
*c
)
684 uint32_t nsid
= le32_to_cpu(c
->nsid
);
685 uint64_t prp1
= le64_to_cpu(c
->prp1
);
686 uint64_t prp2
= le64_to_cpu(c
->prp2
);
688 trace_pci_nvme_identify_ns(nsid
);
690 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
691 trace_pci_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
692 return NVME_INVALID_NSID
| NVME_DNR
;
695 ns
= &n
->namespaces
[nsid
- 1];
697 return nvme_dma_read_prp(n
, (uint8_t *)&ns
->id_ns
, sizeof(ns
->id_ns
),
701 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeIdentify
*c
)
703 static const int data_len
= NVME_IDENTIFY_DATA_SIZE
;
704 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
705 uint64_t prp1
= le64_to_cpu(c
->prp1
);
706 uint64_t prp2
= le64_to_cpu(c
->prp2
);
711 trace_pci_nvme_identify_nslist(min_nsid
);
713 list
= g_malloc0(data_len
);
714 for (i
= 0; i
< n
->num_namespaces
; i
++) {
718 list
[j
++] = cpu_to_le32(i
+ 1);
719 if (j
== data_len
/ sizeof(uint32_t)) {
723 ret
= nvme_dma_read_prp(n
, (uint8_t *)list
, data_len
, prp1
, prp2
);
728 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeCmd
*cmd
)
730 NvmeIdentify
*c
= (NvmeIdentify
*)cmd
;
732 switch (le32_to_cpu(c
->cns
)) {
734 return nvme_identify_ns(n
, c
);
735 case NVME_ID_CNS_CTRL
:
736 return nvme_identify_ctrl(n
, c
);
737 case NVME_ID_CNS_NS_ACTIVE_LIST
:
738 return nvme_identify_nslist(n
, c
);
740 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
741 return NVME_INVALID_FIELD
| NVME_DNR
;
745 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
747 trace_pci_nvme_setfeat_timestamp(ts
);
749 n
->host_timestamp
= le64_to_cpu(ts
);
750 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
753 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
755 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
756 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
758 union nvme_timestamp
{
760 uint64_t timestamp
:48;
768 union nvme_timestamp ts
;
772 * If the sum of the Timestamp value set by the host and the elapsed
773 * time exceeds 2^48, the value returned should be reduced modulo 2^48.
775 ts
.timestamp
= (n
->host_timestamp
+ elapsed_time
) & 0xffffffffffff;
777 /* If the host timestamp is non-zero, set the timestamp origin */
778 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
780 trace_pci_nvme_getfeat_timestamp(ts
.all
);
782 return cpu_to_le64(ts
.all
);
785 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeCmd
*cmd
)
787 uint64_t prp1
= le64_to_cpu(cmd
->prp1
);
788 uint64_t prp2
= le64_to_cpu(cmd
->prp2
);
790 uint64_t timestamp
= nvme_get_timestamp(n
);
792 return nvme_dma_read_prp(n
, (uint8_t *)×tamp
,
793 sizeof(timestamp
), prp1
, prp2
);
796 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
798 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
802 case NVME_VOLATILE_WRITE_CACHE
:
803 result
= blk_enable_write_cache(n
->conf
.blk
);
804 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
806 case NVME_NUMBER_OF_QUEUES
:
807 result
= cpu_to_le32((n
->params
.num_queues
- 2) |
808 ((n
->params
.num_queues
- 2) << 16));
809 trace_pci_nvme_getfeat_numq(result
);
812 return nvme_get_feature_timestamp(n
, cmd
);
814 trace_pci_nvme_err_invalid_getfeat(dw10
);
815 return NVME_INVALID_FIELD
| NVME_DNR
;
818 req
->cqe
.result
= result
;
822 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeCmd
*cmd
)
826 uint64_t prp1
= le64_to_cpu(cmd
->prp1
);
827 uint64_t prp2
= le64_to_cpu(cmd
->prp2
);
829 ret
= nvme_dma_write_prp(n
, (uint8_t *)×tamp
,
830 sizeof(timestamp
), prp1
, prp2
);
831 if (ret
!= NVME_SUCCESS
) {
835 nvme_set_timestamp(n
, timestamp
);
840 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
842 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
843 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
846 case NVME_VOLATILE_WRITE_CACHE
:
847 blk_set_enable_write_cache(n
->conf
.blk
, dw11
& 1);
849 case NVME_NUMBER_OF_QUEUES
:
850 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
851 ((dw11
>> 16) & 0xFFFF) + 1,
852 n
->params
.num_queues
- 1,
853 n
->params
.num_queues
- 1);
854 req
->cqe
.result
= cpu_to_le32((n
->params
.num_queues
- 2) |
855 ((n
->params
.num_queues
- 2) << 16));
858 return nvme_set_feature_timestamp(n
, cmd
);
860 trace_pci_nvme_err_invalid_setfeat(dw10
);
861 return NVME_INVALID_FIELD
| NVME_DNR
;
866 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
868 switch (cmd
->opcode
) {
869 case NVME_ADM_CMD_DELETE_SQ
:
870 return nvme_del_sq(n
, cmd
);
871 case NVME_ADM_CMD_CREATE_SQ
:
872 return nvme_create_sq(n
, cmd
);
873 case NVME_ADM_CMD_DELETE_CQ
:
874 return nvme_del_cq(n
, cmd
);
875 case NVME_ADM_CMD_CREATE_CQ
:
876 return nvme_create_cq(n
, cmd
);
877 case NVME_ADM_CMD_IDENTIFY
:
878 return nvme_identify(n
, cmd
);
879 case NVME_ADM_CMD_SET_FEATURES
:
880 return nvme_set_feature(n
, cmd
, req
);
881 case NVME_ADM_CMD_GET_FEATURES
:
882 return nvme_get_feature(n
, cmd
, req
);
884 trace_pci_nvme_err_invalid_admin_opc(cmd
->opcode
);
885 return NVME_INVALID_OPCODE
| NVME_DNR
;
889 static void nvme_process_sq(void *opaque
)
891 NvmeSQueue
*sq
= opaque
;
892 NvmeCtrl
*n
= sq
->ctrl
;
893 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
900 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
901 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
902 nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
));
903 nvme_inc_sq_head(sq
);
905 req
= QTAILQ_FIRST(&sq
->req_list
);
906 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
907 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
908 memset(&req
->cqe
, 0, sizeof(req
->cqe
));
909 req
->cqe
.cid
= cmd
.cid
;
911 status
= sq
->sqid
? nvme_io_cmd(n
, &cmd
, req
) :
912 nvme_admin_cmd(n
, &cmd
, req
);
913 if (status
!= NVME_NO_COMPLETE
) {
914 req
->status
= status
;
915 nvme_enqueue_req_completion(cq
, req
);
920 static void nvme_clear_ctrl(NvmeCtrl
*n
)
924 blk_drain(n
->conf
.blk
);
926 for (i
= 0; i
< n
->params
.num_queues
; i
++) {
927 if (n
->sq
[i
] != NULL
) {
928 nvme_free_sq(n
->sq
[i
], n
);
931 for (i
= 0; i
< n
->params
.num_queues
; i
++) {
932 if (n
->cq
[i
] != NULL
) {
933 nvme_free_cq(n
->cq
[i
], n
);
937 blk_flush(n
->conf
.blk
);
941 static int nvme_start_ctrl(NvmeCtrl
*n
)
943 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
944 uint32_t page_size
= 1 << page_bits
;
946 if (unlikely(n
->cq
[0])) {
947 trace_pci_nvme_err_startfail_cq();
950 if (unlikely(n
->sq
[0])) {
951 trace_pci_nvme_err_startfail_sq();
954 if (unlikely(!n
->bar
.asq
)) {
955 trace_pci_nvme_err_startfail_nbarasq();
958 if (unlikely(!n
->bar
.acq
)) {
959 trace_pci_nvme_err_startfail_nbaracq();
962 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
963 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
966 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
967 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
970 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
971 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
972 trace_pci_nvme_err_startfail_page_too_small(
973 NVME_CC_MPS(n
->bar
.cc
),
974 NVME_CAP_MPSMIN(n
->bar
.cap
));
977 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
978 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
979 trace_pci_nvme_err_startfail_page_too_large(
980 NVME_CC_MPS(n
->bar
.cc
),
981 NVME_CAP_MPSMAX(n
->bar
.cap
));
984 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
985 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
986 trace_pci_nvme_err_startfail_cqent_too_small(
987 NVME_CC_IOCQES(n
->bar
.cc
),
988 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
991 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
992 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
993 trace_pci_nvme_err_startfail_cqent_too_large(
994 NVME_CC_IOCQES(n
->bar
.cc
),
995 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
998 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
999 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
1000 trace_pci_nvme_err_startfail_sqent_too_small(
1001 NVME_CC_IOSQES(n
->bar
.cc
),
1002 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
1005 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
1006 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
1007 trace_pci_nvme_err_startfail_sqent_too_large(
1008 NVME_CC_IOSQES(n
->bar
.cc
),
1009 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
1012 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
1013 trace_pci_nvme_err_startfail_asqent_sz_zero();
1016 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
1017 trace_pci_nvme_err_startfail_acqent_sz_zero();
1021 n
->page_bits
= page_bits
;
1022 n
->page_size
= page_size
;
1023 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
1024 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
1025 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
1026 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
1027 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
1028 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
1029 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
1031 nvme_set_timestamp(n
, 0ULL);
1036 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
1039 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
1040 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
1041 "MMIO write not 32-bit aligned,"
1042 " offset=0x%"PRIx64
"", offset
);
1043 /* should be ignored, fall through for now */
1046 if (unlikely(size
< sizeof(uint32_t))) {
1047 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
1048 "MMIO write smaller than 32-bits,"
1049 " offset=0x%"PRIx64
", size=%u",
1051 /* should be ignored, fall through for now */
1055 case 0xc: /* INTMS */
1056 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
1057 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
1058 "undefined access to interrupt mask set"
1059 " when MSI-X is enabled");
1060 /* should be ignored, fall through for now */
1062 n
->bar
.intms
|= data
& 0xffffffff;
1063 n
->bar
.intmc
= n
->bar
.intms
;
1064 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
1067 case 0x10: /* INTMC */
1068 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
1069 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
1070 "undefined access to interrupt mask clr"
1071 " when MSI-X is enabled");
1072 /* should be ignored, fall through for now */
1074 n
->bar
.intms
&= ~(data
& 0xffffffff);
1075 n
->bar
.intmc
= n
->bar
.intms
;
1076 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
1080 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
1081 /* Windows first sends data, then sends enable bit */
1082 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
1083 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
1088 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
1090 if (unlikely(nvme_start_ctrl(n
))) {
1091 trace_pci_nvme_err_startfail();
1092 n
->bar
.csts
= NVME_CSTS_FAILED
;
1094 trace_pci_nvme_mmio_start_success();
1095 n
->bar
.csts
= NVME_CSTS_READY
;
1097 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
1098 trace_pci_nvme_mmio_stopped();
1100 n
->bar
.csts
&= ~NVME_CSTS_READY
;
1102 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
1103 trace_pci_nvme_mmio_shutdown_set();
1106 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
1107 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
1108 trace_pci_nvme_mmio_shutdown_cleared();
1109 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
1113 case 0x1C: /* CSTS */
1114 if (data
& (1 << 4)) {
1115 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
1116 "attempted to W1C CSTS.NSSRO"
1117 " but CAP.NSSRS is zero (not supported)");
1118 } else if (data
!= 0) {
1119 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
1120 "attempted to set a read only bit"
1121 " of controller status");
1124 case 0x20: /* NSSR */
1125 if (data
== 0x4E564D65) {
1126 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
1128 /* The spec says that writes of other values have no effect */
1132 case 0x24: /* AQA */
1133 n
->bar
.aqa
= data
& 0xffffffff;
1134 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
1136 case 0x28: /* ASQ */
1138 trace_pci_nvme_mmio_asqaddr(data
);
1140 case 0x2c: /* ASQ hi */
1141 n
->bar
.asq
|= data
<< 32;
1142 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
1144 case 0x30: /* ACQ */
1145 trace_pci_nvme_mmio_acqaddr(data
);
1148 case 0x34: /* ACQ hi */
1149 n
->bar
.acq
|= data
<< 32;
1150 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
1152 case 0x38: /* CMBLOC */
1153 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
1154 "invalid write to reserved CMBLOC"
1155 " when CMBSZ is zero, ignored");
1157 case 0x3C: /* CMBSZ */
1158 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
1159 "invalid write to read only CMBSZ, ignored");
1161 case 0xE00: /* PMRCAP */
1162 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
1163 "invalid write to PMRCAP register, ignored");
1165 case 0xE04: /* TODO PMRCTL */
1167 case 0xE08: /* PMRSTS */
1168 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
1169 "invalid write to PMRSTS register, ignored");
1171 case 0xE0C: /* PMREBS */
1172 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
1173 "invalid write to PMREBS register, ignored");
1175 case 0xE10: /* PMRSWTP */
1176 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
1177 "invalid write to PMRSWTP register, ignored");
1179 case 0xE14: /* TODO PMRMSC */
1182 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
1183 "invalid MMIO write,"
1184 " offset=0x%"PRIx64
", data=%"PRIx64
"",
1190 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
1192 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1193 uint8_t *ptr
= (uint8_t *)&n
->bar
;
1196 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
1197 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
1198 "MMIO read not 32-bit aligned,"
1199 " offset=0x%"PRIx64
"", addr
);
1200 /* should RAZ, fall through for now */
1201 } else if (unlikely(size
< sizeof(uint32_t))) {
1202 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
1203 "MMIO read smaller than 32-bits,"
1204 " offset=0x%"PRIx64
"", addr
);
1205 /* should RAZ, fall through for now */
1208 if (addr
< sizeof(n
->bar
)) {
1210 * When PMRWBM bit 1 is set then read from
1211 * from PMRSTS should ensure prior writes
1212 * made it to persistent media
1214 if (addr
== 0xE08 &&
1215 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
1216 memory_region_msync(&n
->pmrdev
->mr
, 0, n
->pmrdev
->size
);
1218 memcpy(&val
, ptr
+ addr
, size
);
1220 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
1221 "MMIO read beyond last register,"
1222 " offset=0x%"PRIx64
", returning 0", addr
);
1228 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
1232 if (unlikely(addr
& ((1 << 2) - 1))) {
1233 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
1234 "doorbell write not 32-bit aligned,"
1235 " offset=0x%"PRIx64
", ignoring", addr
);
1239 if (((addr
- 0x1000) >> 2) & 1) {
1240 /* Completion queue doorbell write */
1242 uint16_t new_head
= val
& 0xffff;
1246 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
1247 if (unlikely(nvme_check_cqid(n
, qid
))) {
1248 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
1249 "completion queue doorbell write"
1250 " for nonexistent queue,"
1251 " sqid=%"PRIu32
", ignoring", qid
);
1256 if (unlikely(new_head
>= cq
->size
)) {
1257 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
1258 "completion queue doorbell write value"
1259 " beyond queue size, sqid=%"PRIu32
","
1260 " new_head=%"PRIu16
", ignoring",
1265 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
1266 cq
->head
= new_head
;
1269 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
1270 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1272 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1275 if (cq
->tail
== cq
->head
) {
1276 nvme_irq_deassert(n
, cq
);
1279 /* Submission queue doorbell write */
1281 uint16_t new_tail
= val
& 0xffff;
1284 qid
= (addr
- 0x1000) >> 3;
1285 if (unlikely(nvme_check_sqid(n
, qid
))) {
1286 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
1287 "submission queue doorbell write"
1288 " for nonexistent queue,"
1289 " sqid=%"PRIu32
", ignoring", qid
);
1294 if (unlikely(new_tail
>= sq
->size
)) {
1295 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
1296 "submission queue doorbell write value"
1297 " beyond queue size, sqid=%"PRIu32
","
1298 " new_tail=%"PRIu16
", ignoring",
1303 sq
->tail
= new_tail
;
1304 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1308 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
1311 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1312 if (addr
< sizeof(n
->bar
)) {
1313 nvme_write_bar(n
, addr
, data
, size
);
1314 } else if (addr
>= 0x1000) {
1315 nvme_process_db(n
, addr
, data
);
1319 static const MemoryRegionOps nvme_mmio_ops
= {
1320 .read
= nvme_mmio_read
,
1321 .write
= nvme_mmio_write
,
1322 .endianness
= DEVICE_LITTLE_ENDIAN
,
1324 .min_access_size
= 2,
1325 .max_access_size
= 8,
1329 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
1332 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1333 stn_le_p(&n
->cmbuf
[addr
], size
, data
);
1336 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
1338 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1339 return ldn_le_p(&n
->cmbuf
[addr
], size
);
1342 static const MemoryRegionOps nvme_cmb_ops
= {
1343 .read
= nvme_cmb_read
,
1344 .write
= nvme_cmb_write
,
1345 .endianness
= DEVICE_LITTLE_ENDIAN
,
1347 .min_access_size
= 1,
1348 .max_access_size
= 8,
1352 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
1354 NvmeCtrl
*n
= NVME(pci_dev
);
1355 NvmeIdCtrl
*id
= &n
->id_ctrl
;
1361 if (!n
->params
.num_queues
) {
1362 error_setg(errp
, "num_queues can't be zero");
1367 error_setg(errp
, "drive property not set");
1371 bs_size
= blk_getlength(n
->conf
.blk
);
1373 error_setg(errp
, "could not get backing file size");
1377 if (!n
->params
.serial
) {
1378 error_setg(errp
, "serial property not set");
1382 if (!n
->params
.cmb_size_mb
&& n
->pmrdev
) {
1383 if (host_memory_backend_is_mapped(n
->pmrdev
)) {
1384 char *path
= object_get_canonical_path_component(OBJECT(n
->pmrdev
));
1385 error_setg(errp
, "can't use already busy memdev: %s", path
);
1390 if (!is_power_of_2(n
->pmrdev
->size
)) {
1391 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
1395 host_memory_backend_set_mapped(n
->pmrdev
, true);
1398 blkconf_blocksizes(&n
->conf
);
1399 if (!blkconf_apply_backend_options(&n
->conf
, blk_is_read_only(n
->conf
.blk
),
1404 pci_conf
= pci_dev
->config
;
1405 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
1406 pci_config_set_prog_interface(pci_dev
->config
, 0x2);
1407 pci_config_set_class(pci_dev
->config
, PCI_CLASS_STORAGE_EXPRESS
);
1408 pcie_endpoint_cap_init(pci_dev
, 0x80);
1410 n
->num_namespaces
= 1;
1412 /* num_queues is really number of pairs, so each has two doorbells */
1413 n
->reg_size
= pow2ceil(NVME_REG_SIZE
+
1414 2 * n
->params
.num_queues
* NVME_DB_SIZE
);
1415 n
->ns_size
= bs_size
/ (uint64_t)n
->num_namespaces
;
1417 n
->namespaces
= g_new0(NvmeNamespace
, n
->num_namespaces
);
1418 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.num_queues
);
1419 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.num_queues
);
1421 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
,
1422 "nvme", n
->reg_size
);
1423 pci_register_bar(pci_dev
, 0,
1424 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
,
1426 msix_init_exclusive_bar(pci_dev
, n
->params
.num_queues
, 4, NULL
);
1428 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
1429 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
1430 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
1431 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
1432 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
1437 id
->oacs
= cpu_to_le16(0);
1440 id
->sqes
= (0x6 << 4) | 0x6;
1441 id
->cqes
= (0x4 << 4) | 0x4;
1442 id
->nn
= cpu_to_le32(n
->num_namespaces
);
1443 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROS
| NVME_ONCS_TIMESTAMP
);
1444 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
1445 id
->psd
[0].enlat
= cpu_to_le32(0x10);
1446 id
->psd
[0].exlat
= cpu_to_le32(0x4);
1447 if (blk_enable_write_cache(n
->conf
.blk
)) {
1452 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
1453 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
1454 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
1455 NVME_CAP_SET_CSS(n
->bar
.cap
, 1);
1456 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
1458 n
->bar
.vs
= 0x00010200;
1459 n
->bar
.intmc
= n
->bar
.intms
= 0;
1461 if (n
->params
.cmb_size_mb
) {
1463 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, 2);
1464 NVME_CMBLOC_SET_OFST(n
->bar
.cmbloc
, 0);
1466 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
1467 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
1468 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 0);
1469 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
1470 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
1471 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
1472 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
1474 n
->cmbloc
= n
->bar
.cmbloc
;
1475 n
->cmbsz
= n
->bar
.cmbsz
;
1477 n
->cmbuf
= g_malloc0(NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
1478 memory_region_init_io(&n
->ctrl_mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
1479 "nvme-cmb", NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
1480 pci_register_bar(pci_dev
, NVME_CMBLOC_BIR(n
->bar
.cmbloc
),
1481 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
|
1482 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->ctrl_mem
);
1484 } else if (n
->pmrdev
) {
1485 /* Controller Capabilities register */
1486 NVME_CAP_SET_PMRS(n
->bar
.cap
, 1);
1488 /* PMR Capabities register */
1490 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 0);
1491 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 0);
1492 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, 2);
1493 NVME_PMRCAP_SET_PMRTU(n
->bar
.pmrcap
, 0);
1494 /* Turn on bit 1 support */
1495 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
1496 NVME_PMRCAP_SET_PMRTO(n
->bar
.pmrcap
, 0);
1497 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 0);
1499 /* PMR Control register */
1501 NVME_PMRCTL_SET_EN(n
->bar
.pmrctl
, 0);
1503 /* PMR Status register */
1505 NVME_PMRSTS_SET_ERR(n
->bar
.pmrsts
, 0);
1506 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 0);
1507 NVME_PMRSTS_SET_HSTS(n
->bar
.pmrsts
, 0);
1508 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 0);
1510 /* PMR Elasticity Buffer Size register */
1512 NVME_PMREBS_SET_PMRSZU(n
->bar
.pmrebs
, 0);
1513 NVME_PMREBS_SET_RBB(n
->bar
.pmrebs
, 0);
1514 NVME_PMREBS_SET_PMRWBZ(n
->bar
.pmrebs
, 0);
1516 /* PMR Sustained Write Throughput register */
1518 NVME_PMRSWTP_SET_PMRSWTU(n
->bar
.pmrswtp
, 0);
1519 NVME_PMRSWTP_SET_PMRSWTV(n
->bar
.pmrswtp
, 0);
1521 /* PMR Memory Space Control register */
1523 NVME_PMRMSC_SET_CMSE(n
->bar
.pmrmsc
, 0);
1524 NVME_PMRMSC_SET_CBA(n
->bar
.pmrmsc
, 0);
1526 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
1527 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
|
1528 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmrdev
->mr
);
1531 for (i
= 0; i
< n
->num_namespaces
; i
++) {
1532 NvmeNamespace
*ns
= &n
->namespaces
[i
];
1533 NvmeIdNs
*id_ns
= &ns
->id_ns
;
1540 id_ns
->lbaf
[0].ds
= BDRV_SECTOR_BITS
;
1541 id_ns
->ncap
= id_ns
->nuse
= id_ns
->nsze
=
1542 cpu_to_le64(n
->ns_size
>>
1543 id_ns
->lbaf
[NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
)].ds
);
1547 static void nvme_exit(PCIDevice
*pci_dev
)
1549 NvmeCtrl
*n
= NVME(pci_dev
);
1552 g_free(n
->namespaces
);
1556 if (n
->params
.cmb_size_mb
) {
1561 host_memory_backend_set_mapped(n
->pmrdev
, false);
1563 msix_uninit_exclusive_bar(pci_dev
);
1566 static Property nvme_props
[] = {
1567 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, conf
),
1568 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmrdev
, TYPE_MEMORY_BACKEND
,
1569 HostMemoryBackend
*),
1570 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
1571 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
1572 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 64),
1573 DEFINE_PROP_END_OF_LIST(),
1576 static const VMStateDescription nvme_vmstate
= {
1581 static void nvme_class_init(ObjectClass
*oc
, void *data
)
1583 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1584 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
1586 pc
->realize
= nvme_realize
;
1587 pc
->exit
= nvme_exit
;
1588 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
1589 pc
->vendor_id
= PCI_VENDOR_ID_INTEL
;
1590 pc
->device_id
= 0x5845;
1593 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1594 dc
->desc
= "Non-Volatile Memory Express";
1595 device_class_set_props(dc
, nvme_props
);
1596 dc
->vmsd
= &nvme_vmstate
;
1599 static void nvme_instance_init(Object
*obj
)
1601 NvmeCtrl
*s
= NVME(obj
);
1603 device_add_bootindex_property(obj
, &s
->conf
.bootindex
,
1604 "bootindex", "/namespace@1,0",
1608 static const TypeInfo nvme_info
= {
1610 .parent
= TYPE_PCI_DEVICE
,
1611 .instance_size
= sizeof(NvmeCtrl
),
1612 .class_init
= nvme_class_init
,
1613 .instance_init
= nvme_instance_init
,
1614 .interfaces
= (InterfaceInfo
[]) {
1615 { INTERFACE_PCIE_DEVICE
},
1620 static void nvme_register_types(void)
1622 type_register_static(&nvme_info
);
1625 type_init(nvme_register_types
)