2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * http://www.nvmexpress.org/resources/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * num_queues=<N[optional]>
24 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
25 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
28 #include "qemu/osdep.h"
29 #include "qemu/units.h"
30 #include "hw/block/block.h"
32 #include "hw/pci/msix.h"
33 #include "hw/pci/pci.h"
34 #include "sysemu/sysemu.h"
35 #include "qapi/error.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/block-backend.h"
40 #include "qemu/module.h"
41 #include "qemu/cutils.h"
45 #define NVME_GUEST_ERR(trace, fmt, ...) \
47 (trace_##trace)(__VA_ARGS__); \
48 qemu_log_mask(LOG_GUEST_ERROR, #trace \
49 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
52 static void nvme_process_sq(void *opaque
);
54 static void nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
56 if (n
->cmbsz
&& addr
>= n
->ctrl_mem
.addr
&&
57 addr
< (n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
))) {
58 memcpy(buf
, (void *)&n
->cmbuf
[addr
- n
->ctrl_mem
.addr
], size
);
60 pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
64 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
66 return sqid
< n
->num_queues
&& n
->sq
[sqid
] != NULL
? 0 : -1;
69 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
71 return cqid
< n
->num_queues
&& n
->cq
[cqid
] != NULL
? 0 : -1;
74 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
77 if (cq
->tail
>= cq
->size
) {
79 cq
->phase
= !cq
->phase
;
83 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
85 sq
->head
= (sq
->head
+ 1) % sq
->size
;
88 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
90 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
93 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
95 return sq
->head
== sq
->tail
;
98 static void nvme_irq_check(NvmeCtrl
*n
)
100 if (msix_enabled(&(n
->parent_obj
))) {
103 if (~n
->bar
.intms
& n
->irq_status
) {
104 pci_irq_assert(&n
->parent_obj
);
106 pci_irq_deassert(&n
->parent_obj
);
110 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
112 if (cq
->irq_enabled
) {
113 if (msix_enabled(&(n
->parent_obj
))) {
114 trace_nvme_irq_msix(cq
->vector
);
115 msix_notify(&(n
->parent_obj
), cq
->vector
);
117 trace_nvme_irq_pin();
118 assert(cq
->cqid
< 64);
119 n
->irq_status
|= 1 << cq
->cqid
;
123 trace_nvme_irq_masked();
127 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
129 if (cq
->irq_enabled
) {
130 if (msix_enabled(&(n
->parent_obj
))) {
133 assert(cq
->cqid
< 64);
134 n
->irq_status
&= ~(1 << cq
->cqid
);
140 static uint16_t nvme_map_prp(QEMUSGList
*qsg
, QEMUIOVector
*iov
, uint64_t prp1
,
141 uint64_t prp2
, uint32_t len
, NvmeCtrl
*n
)
143 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
144 trans_len
= MIN(len
, trans_len
);
145 int num_prps
= (len
>> n
->page_bits
) + 1;
147 if (unlikely(!prp1
)) {
148 trace_nvme_err_invalid_prp();
149 return NVME_INVALID_FIELD
| NVME_DNR
;
150 } else if (n
->cmbsz
&& prp1
>= n
->ctrl_mem
.addr
&&
151 prp1
< n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
)) {
153 qemu_iovec_init(iov
, num_prps
);
154 qemu_iovec_add(iov
, (void *)&n
->cmbuf
[prp1
- n
->ctrl_mem
.addr
], trans_len
);
156 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
157 qemu_sglist_add(qsg
, prp1
, trans_len
);
161 if (unlikely(!prp2
)) {
162 trace_nvme_err_invalid_prp2_missing();
165 if (len
> n
->page_size
) {
166 uint64_t prp_list
[n
->max_prp_ents
];
167 uint32_t nents
, prp_trans
;
170 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
171 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
172 nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
174 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
176 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
177 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
178 trace_nvme_err_invalid_prplist_ent(prp_ent
);
183 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
184 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
185 nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
187 prp_ent
= le64_to_cpu(prp_list
[i
]);
190 if (unlikely(!prp_ent
|| prp_ent
& (n
->page_size
- 1))) {
191 trace_nvme_err_invalid_prplist_ent(prp_ent
);
195 trans_len
= MIN(len
, n
->page_size
);
197 qemu_sglist_add(qsg
, prp_ent
, trans_len
);
199 qemu_iovec_add(iov
, (void *)&n
->cmbuf
[prp_ent
- n
->ctrl_mem
.addr
], trans_len
);
205 if (unlikely(prp2
& (n
->page_size
- 1))) {
206 trace_nvme_err_invalid_prp2_align(prp2
);
210 qemu_sglist_add(qsg
, prp2
, len
);
212 qemu_iovec_add(iov
, (void *)&n
->cmbuf
[prp2
- n
->ctrl_mem
.addr
], trans_len
);
219 qemu_sglist_destroy(qsg
);
220 return NVME_INVALID_FIELD
| NVME_DNR
;
223 static uint16_t nvme_dma_write_prp(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
224 uint64_t prp1
, uint64_t prp2
)
228 uint16_t status
= NVME_SUCCESS
;
230 if (nvme_map_prp(&qsg
, &iov
, prp1
, prp2
, len
, n
)) {
231 return NVME_INVALID_FIELD
| NVME_DNR
;
234 if (dma_buf_write(ptr
, len
, &qsg
)) {
235 status
= NVME_INVALID_FIELD
| NVME_DNR
;
237 qemu_sglist_destroy(&qsg
);
239 if (qemu_iovec_to_buf(&iov
, 0, ptr
, len
) != len
) {
240 status
= NVME_INVALID_FIELD
| NVME_DNR
;
242 qemu_iovec_destroy(&iov
);
247 static uint16_t nvme_dma_read_prp(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
248 uint64_t prp1
, uint64_t prp2
)
252 uint16_t status
= NVME_SUCCESS
;
254 trace_nvme_dma_read(prp1
, prp2
);
256 if (nvme_map_prp(&qsg
, &iov
, prp1
, prp2
, len
, n
)) {
257 return NVME_INVALID_FIELD
| NVME_DNR
;
260 if (unlikely(dma_buf_read(ptr
, len
, &qsg
))) {
261 trace_nvme_err_invalid_dma();
262 status
= NVME_INVALID_FIELD
| NVME_DNR
;
264 qemu_sglist_destroy(&qsg
);
266 if (unlikely(qemu_iovec_from_buf(&iov
, 0, ptr
, len
) != len
)) {
267 trace_nvme_err_invalid_dma();
268 status
= NVME_INVALID_FIELD
| NVME_DNR
;
270 qemu_iovec_destroy(&iov
);
275 static void nvme_post_cqes(void *opaque
)
277 NvmeCQueue
*cq
= opaque
;
278 NvmeCtrl
*n
= cq
->ctrl
;
279 NvmeRequest
*req
, *next
;
281 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
285 if (nvme_cq_full(cq
)) {
289 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
291 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
292 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
293 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
294 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
295 nvme_inc_cq_tail(cq
);
296 pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
298 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
300 if (cq
->tail
!= cq
->head
) {
301 nvme_irq_assert(n
, cq
);
305 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
307 assert(cq
->cqid
== req
->sq
->cqid
);
308 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
309 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
310 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
313 static void nvme_rw_cb(void *opaque
, int ret
)
315 NvmeRequest
*req
= opaque
;
316 NvmeSQueue
*sq
= req
->sq
;
317 NvmeCtrl
*n
= sq
->ctrl
;
318 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
321 block_acct_done(blk_get_stats(n
->conf
.blk
), &req
->acct
);
322 req
->status
= NVME_SUCCESS
;
324 block_acct_failed(blk_get_stats(n
->conf
.blk
), &req
->acct
);
325 req
->status
= NVME_INTERNAL_DEV_ERROR
;
328 qemu_sglist_destroy(&req
->qsg
);
330 nvme_enqueue_req_completion(cq
, req
);
333 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeNamespace
*ns
, NvmeCmd
*cmd
,
337 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, 0,
339 req
->aiocb
= blk_aio_flush(n
->conf
.blk
, nvme_rw_cb
, req
);
341 return NVME_NO_COMPLETE
;
344 static uint16_t nvme_write_zeros(NvmeCtrl
*n
, NvmeNamespace
*ns
, NvmeCmd
*cmd
,
347 NvmeRwCmd
*rw
= (NvmeRwCmd
*)cmd
;
348 const uint8_t lba_index
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
349 const uint8_t data_shift
= ns
->id_ns
.lbaf
[lba_index
].ds
;
350 uint64_t slba
= le64_to_cpu(rw
->slba
);
351 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
352 uint64_t offset
= slba
<< data_shift
;
353 uint32_t count
= nlb
<< data_shift
;
355 if (unlikely(slba
+ nlb
> ns
->id_ns
.nsze
)) {
356 trace_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
357 return NVME_LBA_RANGE
| NVME_DNR
;
361 block_acct_start(blk_get_stats(n
->conf
.blk
), &req
->acct
, 0,
363 req
->aiocb
= blk_aio_pwrite_zeroes(n
->conf
.blk
, offset
, count
,
364 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
, req
);
365 return NVME_NO_COMPLETE
;
368 static uint16_t nvme_rw(NvmeCtrl
*n
, NvmeNamespace
*ns
, NvmeCmd
*cmd
,
371 NvmeRwCmd
*rw
= (NvmeRwCmd
*)cmd
;
372 uint32_t nlb
= le32_to_cpu(rw
->nlb
) + 1;
373 uint64_t slba
= le64_to_cpu(rw
->slba
);
374 uint64_t prp1
= le64_to_cpu(rw
->prp1
);
375 uint64_t prp2
= le64_to_cpu(rw
->prp2
);
377 uint8_t lba_index
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
378 uint8_t data_shift
= ns
->id_ns
.lbaf
[lba_index
].ds
;
379 uint64_t data_size
= (uint64_t)nlb
<< data_shift
;
380 uint64_t data_offset
= slba
<< data_shift
;
381 int is_write
= rw
->opcode
== NVME_CMD_WRITE
? 1 : 0;
382 enum BlockAcctType acct
= is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
;
384 trace_nvme_rw(is_write
? "write" : "read", nlb
, data_size
, slba
);
386 if (unlikely((slba
+ nlb
) > ns
->id_ns
.nsze
)) {
387 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
388 trace_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
389 return NVME_LBA_RANGE
| NVME_DNR
;
392 if (nvme_map_prp(&req
->qsg
, &req
->iov
, prp1
, prp2
, data_size
, n
)) {
393 block_acct_invalid(blk_get_stats(n
->conf
.blk
), acct
);
394 return NVME_INVALID_FIELD
| NVME_DNR
;
397 dma_acct_start(n
->conf
.blk
, &req
->acct
, &req
->qsg
, acct
);
398 if (req
->qsg
.nsg
> 0) {
400 req
->aiocb
= is_write
?
401 dma_blk_write(n
->conf
.blk
, &req
->qsg
, data_offset
, BDRV_SECTOR_SIZE
,
403 dma_blk_read(n
->conf
.blk
, &req
->qsg
, data_offset
, BDRV_SECTOR_SIZE
,
407 req
->aiocb
= is_write
?
408 blk_aio_pwritev(n
->conf
.blk
, data_offset
, &req
->iov
, 0, nvme_rw_cb
,
410 blk_aio_preadv(n
->conf
.blk
, data_offset
, &req
->iov
, 0, nvme_rw_cb
,
414 return NVME_NO_COMPLETE
;
417 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
420 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
422 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
423 trace_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
424 return NVME_INVALID_NSID
| NVME_DNR
;
427 ns
= &n
->namespaces
[nsid
- 1];
428 switch (cmd
->opcode
) {
430 return nvme_flush(n
, ns
, cmd
, req
);
431 case NVME_CMD_WRITE_ZEROS
:
432 return nvme_write_zeros(n
, ns
, cmd
, req
);
435 return nvme_rw(n
, ns
, cmd
, req
);
437 trace_nvme_err_invalid_opc(cmd
->opcode
);
438 return NVME_INVALID_OPCODE
| NVME_DNR
;
442 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
444 n
->sq
[sq
->sqid
] = NULL
;
445 timer_del(sq
->timer
);
446 timer_free(sq
->timer
);
453 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
455 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)cmd
;
456 NvmeRequest
*req
, *next
;
459 uint16_t qid
= le16_to_cpu(c
->qid
);
461 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
462 trace_nvme_err_invalid_del_sq(qid
);
463 return NVME_INVALID_QID
| NVME_DNR
;
466 trace_nvme_del_sq(qid
);
469 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
470 req
= QTAILQ_FIRST(&sq
->out_req_list
);
472 blk_aio_cancel(req
->aiocb
);
474 if (!nvme_check_cqid(n
, sq
->cqid
)) {
475 cq
= n
->cq
[sq
->cqid
];
476 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
479 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
481 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
482 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
491 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
492 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
498 sq
->dma_addr
= dma_addr
;
502 sq
->head
= sq
->tail
= 0;
503 sq
->io_req
= g_new(NvmeRequest
, sq
->size
);
505 QTAILQ_INIT(&sq
->req_list
);
506 QTAILQ_INIT(&sq
->out_req_list
);
507 for (i
= 0; i
< sq
->size
; i
++) {
508 sq
->io_req
[i
].sq
= sq
;
509 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
511 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
515 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
519 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
522 NvmeCreateSq
*c
= (NvmeCreateSq
*)cmd
;
524 uint16_t cqid
= le16_to_cpu(c
->cqid
);
525 uint16_t sqid
= le16_to_cpu(c
->sqid
);
526 uint16_t qsize
= le16_to_cpu(c
->qsize
);
527 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
528 uint64_t prp1
= le64_to_cpu(c
->prp1
);
530 trace_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
532 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
533 trace_nvme_err_invalid_create_sq_cqid(cqid
);
534 return NVME_INVALID_CQID
| NVME_DNR
;
536 if (unlikely(!sqid
|| !nvme_check_sqid(n
, sqid
))) {
537 trace_nvme_err_invalid_create_sq_sqid(sqid
);
538 return NVME_INVALID_QID
| NVME_DNR
;
540 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
541 trace_nvme_err_invalid_create_sq_size(qsize
);
542 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
544 if (unlikely(!prp1
|| prp1
& (n
->page_size
- 1))) {
545 trace_nvme_err_invalid_create_sq_addr(prp1
);
546 return NVME_INVALID_FIELD
| NVME_DNR
;
548 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
549 trace_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
550 return NVME_INVALID_FIELD
| NVME_DNR
;
552 sq
= g_malloc0(sizeof(*sq
));
553 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
557 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
559 n
->cq
[cq
->cqid
] = NULL
;
560 timer_del(cq
->timer
);
561 timer_free(cq
->timer
);
562 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
568 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
570 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)cmd
;
572 uint16_t qid
= le16_to_cpu(c
->qid
);
574 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
575 trace_nvme_err_invalid_del_cq_cqid(qid
);
576 return NVME_INVALID_CQID
| NVME_DNR
;
580 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
581 trace_nvme_err_invalid_del_cq_notempty(qid
);
582 return NVME_INVALID_QUEUE_DEL
;
584 nvme_irq_deassert(n
, cq
);
585 trace_nvme_del_cq(qid
);
590 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
591 uint16_t cqid
, uint16_t vector
, uint16_t size
, uint16_t irq_enabled
)
596 cq
->dma_addr
= dma_addr
;
598 cq
->irq_enabled
= irq_enabled
;
600 cq
->head
= cq
->tail
= 0;
601 QTAILQ_INIT(&cq
->req_list
);
602 QTAILQ_INIT(&cq
->sq_list
);
603 msix_vector_use(&n
->parent_obj
, cq
->vector
);
605 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
608 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeCmd
*cmd
)
611 NvmeCreateCq
*c
= (NvmeCreateCq
*)cmd
;
612 uint16_t cqid
= le16_to_cpu(c
->cqid
);
613 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
614 uint16_t qsize
= le16_to_cpu(c
->qsize
);
615 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
616 uint64_t prp1
= le64_to_cpu(c
->prp1
);
618 trace_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
619 NVME_CQ_FLAGS_IEN(qflags
) != 0);
621 if (unlikely(!cqid
|| !nvme_check_cqid(n
, cqid
))) {
622 trace_nvme_err_invalid_create_cq_cqid(cqid
);
623 return NVME_INVALID_CQID
| NVME_DNR
;
625 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
626 trace_nvme_err_invalid_create_cq_size(qsize
);
627 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
629 if (unlikely(!prp1
)) {
630 trace_nvme_err_invalid_create_cq_addr(prp1
);
631 return NVME_INVALID_FIELD
| NVME_DNR
;
633 if (unlikely(vector
> n
->num_queues
)) {
634 trace_nvme_err_invalid_create_cq_vector(vector
);
635 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
637 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
638 trace_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
639 return NVME_INVALID_FIELD
| NVME_DNR
;
642 cq
= g_malloc0(sizeof(*cq
));
643 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
644 NVME_CQ_FLAGS_IEN(qflags
));
648 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeIdentify
*c
)
650 uint64_t prp1
= le64_to_cpu(c
->prp1
);
651 uint64_t prp2
= le64_to_cpu(c
->prp2
);
653 trace_nvme_identify_ctrl();
655 return nvme_dma_read_prp(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
),
659 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeIdentify
*c
)
662 uint32_t nsid
= le32_to_cpu(c
->nsid
);
663 uint64_t prp1
= le64_to_cpu(c
->prp1
);
664 uint64_t prp2
= le64_to_cpu(c
->prp2
);
666 trace_nvme_identify_ns(nsid
);
668 if (unlikely(nsid
== 0 || nsid
> n
->num_namespaces
)) {
669 trace_nvme_err_invalid_ns(nsid
, n
->num_namespaces
);
670 return NVME_INVALID_NSID
| NVME_DNR
;
673 ns
= &n
->namespaces
[nsid
- 1];
675 return nvme_dma_read_prp(n
, (uint8_t *)&ns
->id_ns
, sizeof(ns
->id_ns
),
679 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeIdentify
*c
)
681 static const int data_len
= 4 * KiB
;
682 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
683 uint64_t prp1
= le64_to_cpu(c
->prp1
);
684 uint64_t prp2
= le64_to_cpu(c
->prp2
);
689 trace_nvme_identify_nslist(min_nsid
);
691 list
= g_malloc0(data_len
);
692 for (i
= 0; i
< n
->num_namespaces
; i
++) {
696 list
[j
++] = cpu_to_le32(i
+ 1);
697 if (j
== data_len
/ sizeof(uint32_t)) {
701 ret
= nvme_dma_read_prp(n
, (uint8_t *)list
, data_len
, prp1
, prp2
);
706 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeCmd
*cmd
)
708 NvmeIdentify
*c
= (NvmeIdentify
*)cmd
;
710 switch (le32_to_cpu(c
->cns
)) {
712 return nvme_identify_ns(n
, c
);
714 return nvme_identify_ctrl(n
, c
);
716 return nvme_identify_nslist(n
, c
);
718 trace_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
719 return NVME_INVALID_FIELD
| NVME_DNR
;
723 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
725 trace_nvme_setfeat_timestamp(ts
);
727 n
->host_timestamp
= le64_to_cpu(ts
);
728 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
731 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
733 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
734 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
736 union nvme_timestamp
{
738 uint64_t timestamp
:48;
746 union nvme_timestamp ts
;
750 * If the sum of the Timestamp value set by the host and the elapsed
751 * time exceeds 2^48, the value returned should be reduced modulo 2^48.
753 ts
.timestamp
= (n
->host_timestamp
+ elapsed_time
) & 0xffffffffffff;
755 /* If the host timestamp is non-zero, set the timestamp origin */
756 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
758 trace_nvme_getfeat_timestamp(ts
.all
);
760 return cpu_to_le64(ts
.all
);
763 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeCmd
*cmd
)
765 uint64_t prp1
= le64_to_cpu(cmd
->prp1
);
766 uint64_t prp2
= le64_to_cpu(cmd
->prp2
);
768 uint64_t timestamp
= nvme_get_timestamp(n
);
770 return nvme_dma_read_prp(n
, (uint8_t *)×tamp
,
771 sizeof(timestamp
), prp1
, prp2
);
774 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
776 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
780 case NVME_VOLATILE_WRITE_CACHE
:
781 result
= blk_enable_write_cache(n
->conf
.blk
);
782 trace_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
784 case NVME_NUMBER_OF_QUEUES
:
785 result
= cpu_to_le32((n
->num_queues
- 2) | ((n
->num_queues
- 2) << 16));
786 trace_nvme_getfeat_numq(result
);
789 return nvme_get_feature_timestamp(n
, cmd
);
792 trace_nvme_err_invalid_getfeat(dw10
);
793 return NVME_INVALID_FIELD
| NVME_DNR
;
796 req
->cqe
.result
= result
;
800 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeCmd
*cmd
)
804 uint64_t prp1
= le64_to_cpu(cmd
->prp1
);
805 uint64_t prp2
= le64_to_cpu(cmd
->prp2
);
807 ret
= nvme_dma_write_prp(n
, (uint8_t *)×tamp
,
808 sizeof(timestamp
), prp1
, prp2
);
809 if (ret
!= NVME_SUCCESS
) {
813 nvme_set_timestamp(n
, timestamp
);
818 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
820 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
821 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
824 case NVME_VOLATILE_WRITE_CACHE
:
825 blk_set_enable_write_cache(n
->conf
.blk
, dw11
& 1);
827 case NVME_NUMBER_OF_QUEUES
:
828 trace_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
829 ((dw11
>> 16) & 0xFFFF) + 1,
830 n
->num_queues
- 1, n
->num_queues
- 1);
832 cpu_to_le32((n
->num_queues
- 2) | ((n
->num_queues
- 2) << 16));
836 return nvme_set_feature_timestamp(n
, cmd
);
840 trace_nvme_err_invalid_setfeat(dw10
);
841 return NVME_INVALID_FIELD
| NVME_DNR
;
846 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeCmd
*cmd
, NvmeRequest
*req
)
848 switch (cmd
->opcode
) {
849 case NVME_ADM_CMD_DELETE_SQ
:
850 return nvme_del_sq(n
, cmd
);
851 case NVME_ADM_CMD_CREATE_SQ
:
852 return nvme_create_sq(n
, cmd
);
853 case NVME_ADM_CMD_DELETE_CQ
:
854 return nvme_del_cq(n
, cmd
);
855 case NVME_ADM_CMD_CREATE_CQ
:
856 return nvme_create_cq(n
, cmd
);
857 case NVME_ADM_CMD_IDENTIFY
:
858 return nvme_identify(n
, cmd
);
859 case NVME_ADM_CMD_SET_FEATURES
:
860 return nvme_set_feature(n
, cmd
, req
);
861 case NVME_ADM_CMD_GET_FEATURES
:
862 return nvme_get_feature(n
, cmd
, req
);
864 trace_nvme_err_invalid_admin_opc(cmd
->opcode
);
865 return NVME_INVALID_OPCODE
| NVME_DNR
;
869 static void nvme_process_sq(void *opaque
)
871 NvmeSQueue
*sq
= opaque
;
872 NvmeCtrl
*n
= sq
->ctrl
;
873 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
880 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
881 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
882 nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
));
883 nvme_inc_sq_head(sq
);
885 req
= QTAILQ_FIRST(&sq
->req_list
);
886 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
887 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
888 memset(&req
->cqe
, 0, sizeof(req
->cqe
));
889 req
->cqe
.cid
= cmd
.cid
;
891 status
= sq
->sqid
? nvme_io_cmd(n
, &cmd
, req
) :
892 nvme_admin_cmd(n
, &cmd
, req
);
893 if (status
!= NVME_NO_COMPLETE
) {
894 req
->status
= status
;
895 nvme_enqueue_req_completion(cq
, req
);
900 static void nvme_clear_ctrl(NvmeCtrl
*n
)
904 blk_drain(n
->conf
.blk
);
906 for (i
= 0; i
< n
->num_queues
; i
++) {
907 if (n
->sq
[i
] != NULL
) {
908 nvme_free_sq(n
->sq
[i
], n
);
911 for (i
= 0; i
< n
->num_queues
; i
++) {
912 if (n
->cq
[i
] != NULL
) {
913 nvme_free_cq(n
->cq
[i
], n
);
917 blk_flush(n
->conf
.blk
);
921 static int nvme_start_ctrl(NvmeCtrl
*n
)
923 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
924 uint32_t page_size
= 1 << page_bits
;
926 if (unlikely(n
->cq
[0])) {
927 trace_nvme_err_startfail_cq();
930 if (unlikely(n
->sq
[0])) {
931 trace_nvme_err_startfail_sq();
934 if (unlikely(!n
->bar
.asq
)) {
935 trace_nvme_err_startfail_nbarasq();
938 if (unlikely(!n
->bar
.acq
)) {
939 trace_nvme_err_startfail_nbaracq();
942 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
943 trace_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
946 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
947 trace_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
950 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
951 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
952 trace_nvme_err_startfail_page_too_small(
953 NVME_CC_MPS(n
->bar
.cc
),
954 NVME_CAP_MPSMIN(n
->bar
.cap
));
957 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
958 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
959 trace_nvme_err_startfail_page_too_large(
960 NVME_CC_MPS(n
->bar
.cc
),
961 NVME_CAP_MPSMAX(n
->bar
.cap
));
964 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
965 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
966 trace_nvme_err_startfail_cqent_too_small(
967 NVME_CC_IOCQES(n
->bar
.cc
),
968 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
971 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
972 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
973 trace_nvme_err_startfail_cqent_too_large(
974 NVME_CC_IOCQES(n
->bar
.cc
),
975 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
978 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
979 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
980 trace_nvme_err_startfail_sqent_too_small(
981 NVME_CC_IOSQES(n
->bar
.cc
),
982 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
985 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
986 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
987 trace_nvme_err_startfail_sqent_too_large(
988 NVME_CC_IOSQES(n
->bar
.cc
),
989 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
992 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
993 trace_nvme_err_startfail_asqent_sz_zero();
996 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
997 trace_nvme_err_startfail_acqent_sz_zero();
1001 n
->page_bits
= page_bits
;
1002 n
->page_size
= page_size
;
1003 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
1004 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
1005 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
1006 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
1007 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
1008 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
1009 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
1011 nvme_set_timestamp(n
, 0ULL);
1016 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
1019 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
1020 NVME_GUEST_ERR(nvme_ub_mmiowr_misaligned32
,
1021 "MMIO write not 32-bit aligned,"
1022 " offset=0x%"PRIx64
"", offset
);
1023 /* should be ignored, fall through for now */
1026 if (unlikely(size
< sizeof(uint32_t))) {
1027 NVME_GUEST_ERR(nvme_ub_mmiowr_toosmall
,
1028 "MMIO write smaller than 32-bits,"
1029 " offset=0x%"PRIx64
", size=%u",
1031 /* should be ignored, fall through for now */
1035 case 0xc: /* INTMS */
1036 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
1037 NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix
,
1038 "undefined access to interrupt mask set"
1039 " when MSI-X is enabled");
1040 /* should be ignored, fall through for now */
1042 n
->bar
.intms
|= data
& 0xffffffff;
1043 n
->bar
.intmc
= n
->bar
.intms
;
1044 trace_nvme_mmio_intm_set(data
& 0xffffffff,
1048 case 0x10: /* INTMC */
1049 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
1050 NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix
,
1051 "undefined access to interrupt mask clr"
1052 " when MSI-X is enabled");
1053 /* should be ignored, fall through for now */
1055 n
->bar
.intms
&= ~(data
& 0xffffffff);
1056 n
->bar
.intmc
= n
->bar
.intms
;
1057 trace_nvme_mmio_intm_clr(data
& 0xffffffff,
1062 trace_nvme_mmio_cfg(data
& 0xffffffff);
1063 /* Windows first sends data, then sends enable bit */
1064 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
1065 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
1070 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
1072 if (unlikely(nvme_start_ctrl(n
))) {
1073 trace_nvme_err_startfail();
1074 n
->bar
.csts
= NVME_CSTS_FAILED
;
1076 trace_nvme_mmio_start_success();
1077 n
->bar
.csts
= NVME_CSTS_READY
;
1079 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
1080 trace_nvme_mmio_stopped();
1082 n
->bar
.csts
&= ~NVME_CSTS_READY
;
1084 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
1085 trace_nvme_mmio_shutdown_set();
1088 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
1089 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
1090 trace_nvme_mmio_shutdown_cleared();
1091 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
1095 case 0x1C: /* CSTS */
1096 if (data
& (1 << 4)) {
1097 NVME_GUEST_ERR(nvme_ub_mmiowr_ssreset_w1c_unsupported
,
1098 "attempted to W1C CSTS.NSSRO"
1099 " but CAP.NSSRS is zero (not supported)");
1100 } else if (data
!= 0) {
1101 NVME_GUEST_ERR(nvme_ub_mmiowr_ro_csts
,
1102 "attempted to set a read only bit"
1103 " of controller status");
1106 case 0x20: /* NSSR */
1107 if (data
== 0x4E564D65) {
1108 trace_nvme_ub_mmiowr_ssreset_unsupported();
1110 /* The spec says that writes of other values have no effect */
1114 case 0x24: /* AQA */
1115 n
->bar
.aqa
= data
& 0xffffffff;
1116 trace_nvme_mmio_aqattr(data
& 0xffffffff);
1118 case 0x28: /* ASQ */
1120 trace_nvme_mmio_asqaddr(data
);
1122 case 0x2c: /* ASQ hi */
1123 n
->bar
.asq
|= data
<< 32;
1124 trace_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
1126 case 0x30: /* ACQ */
1127 trace_nvme_mmio_acqaddr(data
);
1130 case 0x34: /* ACQ hi */
1131 n
->bar
.acq
|= data
<< 32;
1132 trace_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
1134 case 0x38: /* CMBLOC */
1135 NVME_GUEST_ERR(nvme_ub_mmiowr_cmbloc_reserved
,
1136 "invalid write to reserved CMBLOC"
1137 " when CMBSZ is zero, ignored");
1139 case 0x3C: /* CMBSZ */
1140 NVME_GUEST_ERR(nvme_ub_mmiowr_cmbsz_readonly
,
1141 "invalid write to read only CMBSZ, ignored");
1144 NVME_GUEST_ERR(nvme_ub_mmiowr_invalid
,
1145 "invalid MMIO write,"
1146 " offset=0x%"PRIx64
", data=%"PRIx64
"",
1152 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
1154 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1155 uint8_t *ptr
= (uint8_t *)&n
->bar
;
1158 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
1159 NVME_GUEST_ERR(nvme_ub_mmiord_misaligned32
,
1160 "MMIO read not 32-bit aligned,"
1161 " offset=0x%"PRIx64
"", addr
);
1162 /* should RAZ, fall through for now */
1163 } else if (unlikely(size
< sizeof(uint32_t))) {
1164 NVME_GUEST_ERR(nvme_ub_mmiord_toosmall
,
1165 "MMIO read smaller than 32-bits,"
1166 " offset=0x%"PRIx64
"", addr
);
1167 /* should RAZ, fall through for now */
1170 if (addr
< sizeof(n
->bar
)) {
1171 memcpy(&val
, ptr
+ addr
, size
);
1173 NVME_GUEST_ERR(nvme_ub_mmiord_invalid_ofs
,
1174 "MMIO read beyond last register,"
1175 " offset=0x%"PRIx64
", returning 0", addr
);
1181 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
1185 if (unlikely(addr
& ((1 << 2) - 1))) {
1186 NVME_GUEST_ERR(nvme_ub_db_wr_misaligned
,
1187 "doorbell write not 32-bit aligned,"
1188 " offset=0x%"PRIx64
", ignoring", addr
);
1192 if (((addr
- 0x1000) >> 2) & 1) {
1193 /* Completion queue doorbell write */
1195 uint16_t new_head
= val
& 0xffff;
1199 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
1200 if (unlikely(nvme_check_cqid(n
, qid
))) {
1201 NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cq
,
1202 "completion queue doorbell write"
1203 " for nonexistent queue,"
1204 " sqid=%"PRIu32
", ignoring", qid
);
1209 if (unlikely(new_head
>= cq
->size
)) {
1210 NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cqhead
,
1211 "completion queue doorbell write value"
1212 " beyond queue size, sqid=%"PRIu32
","
1213 " new_head=%"PRIu16
", ignoring",
1218 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
1219 cq
->head
= new_head
;
1222 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
1223 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1225 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1228 if (cq
->tail
== cq
->head
) {
1229 nvme_irq_deassert(n
, cq
);
1232 /* Submission queue doorbell write */
1234 uint16_t new_tail
= val
& 0xffff;
1237 qid
= (addr
- 0x1000) >> 3;
1238 if (unlikely(nvme_check_sqid(n
, qid
))) {
1239 NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sq
,
1240 "submission queue doorbell write"
1241 " for nonexistent queue,"
1242 " sqid=%"PRIu32
", ignoring", qid
);
1247 if (unlikely(new_tail
>= sq
->size
)) {
1248 NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sqtail
,
1249 "submission queue doorbell write value"
1250 " beyond queue size, sqid=%"PRIu32
","
1251 " new_tail=%"PRIu16
", ignoring",
1256 sq
->tail
= new_tail
;
1257 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1261 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
1264 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1265 if (addr
< sizeof(n
->bar
)) {
1266 nvme_write_bar(n
, addr
, data
, size
);
1267 } else if (addr
>= 0x1000) {
1268 nvme_process_db(n
, addr
, data
);
1272 static const MemoryRegionOps nvme_mmio_ops
= {
1273 .read
= nvme_mmio_read
,
1274 .write
= nvme_mmio_write
,
1275 .endianness
= DEVICE_LITTLE_ENDIAN
,
1277 .min_access_size
= 2,
1278 .max_access_size
= 8,
1282 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
1285 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1286 stn_le_p(&n
->cmbuf
[addr
], size
, data
);
1289 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
1291 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
1292 return ldn_le_p(&n
->cmbuf
[addr
], size
);
1295 static const MemoryRegionOps nvme_cmb_ops
= {
1296 .read
= nvme_cmb_read
,
1297 .write
= nvme_cmb_write
,
1298 .endianness
= DEVICE_LITTLE_ENDIAN
,
1300 .min_access_size
= 1,
1301 .max_access_size
= 8,
1305 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
1307 NvmeCtrl
*n
= NVME(pci_dev
);
1308 NvmeIdCtrl
*id
= &n
->id_ctrl
;
1314 if (!n
->num_queues
) {
1315 error_setg(errp
, "num_queues can't be zero");
1320 error_setg(errp
, "drive property not set");
1324 bs_size
= blk_getlength(n
->conf
.blk
);
1326 error_setg(errp
, "could not get backing file size");
1331 error_setg(errp
, "serial property not set");
1334 blkconf_blocksizes(&n
->conf
);
1335 if (!blkconf_apply_backend_options(&n
->conf
, blk_is_read_only(n
->conf
.blk
),
1340 pci_conf
= pci_dev
->config
;
1341 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
1342 pci_config_set_prog_interface(pci_dev
->config
, 0x2);
1343 pci_config_set_class(pci_dev
->config
, PCI_CLASS_STORAGE_EXPRESS
);
1344 pcie_endpoint_cap_init(pci_dev
, 0x80);
1346 n
->num_namespaces
= 1;
1347 n
->reg_size
= pow2ceil(0x1004 + 2 * (n
->num_queues
+ 1) * 4);
1348 n
->ns_size
= bs_size
/ (uint64_t)n
->num_namespaces
;
1350 n
->namespaces
= g_new0(NvmeNamespace
, n
->num_namespaces
);
1351 n
->sq
= g_new0(NvmeSQueue
*, n
->num_queues
);
1352 n
->cq
= g_new0(NvmeCQueue
*, n
->num_queues
);
1354 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
,
1355 "nvme", n
->reg_size
);
1356 pci_register_bar(pci_dev
, 0,
1357 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
,
1359 msix_init_exclusive_bar(pci_dev
, n
->num_queues
, 4, NULL
);
1361 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
1362 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
1363 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
1364 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
1365 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->serial
, ' ');
1370 id
->oacs
= cpu_to_le16(0);
1373 id
->sqes
= (0x6 << 4) | 0x6;
1374 id
->cqes
= (0x4 << 4) | 0x4;
1375 id
->nn
= cpu_to_le32(n
->num_namespaces
);
1376 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROS
| NVME_ONCS_TIMESTAMP
);
1377 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
1378 id
->psd
[0].enlat
= cpu_to_le32(0x10);
1379 id
->psd
[0].exlat
= cpu_to_le32(0x4);
1380 if (blk_enable_write_cache(n
->conf
.blk
)) {
1385 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
1386 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
1387 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
1388 NVME_CAP_SET_CSS(n
->bar
.cap
, 1);
1389 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
1391 n
->bar
.vs
= 0x00010200;
1392 n
->bar
.intmc
= n
->bar
.intms
= 0;
1394 if (n
->cmb_size_mb
) {
1396 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, 2);
1397 NVME_CMBLOC_SET_OFST(n
->bar
.cmbloc
, 0);
1399 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
1400 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
1401 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 0);
1402 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
1403 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
1404 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
1405 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->cmb_size_mb
);
1407 n
->cmbloc
= n
->bar
.cmbloc
;
1408 n
->cmbsz
= n
->bar
.cmbsz
;
1410 n
->cmbuf
= g_malloc0(NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
1411 memory_region_init_io(&n
->ctrl_mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
1412 "nvme-cmb", NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
1413 pci_register_bar(pci_dev
, NVME_CMBLOC_BIR(n
->bar
.cmbloc
),
1414 PCI_BASE_ADDRESS_SPACE_MEMORY
| PCI_BASE_ADDRESS_MEM_TYPE_64
|
1415 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->ctrl_mem
);
1419 for (i
= 0; i
< n
->num_namespaces
; i
++) {
1420 NvmeNamespace
*ns
= &n
->namespaces
[i
];
1421 NvmeIdNs
*id_ns
= &ns
->id_ns
;
1428 id_ns
->lbaf
[0].ds
= BDRV_SECTOR_BITS
;
1429 id_ns
->ncap
= id_ns
->nuse
= id_ns
->nsze
=
1430 cpu_to_le64(n
->ns_size
>>
1431 id_ns
->lbaf
[NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
)].ds
);
1435 static void nvme_exit(PCIDevice
*pci_dev
)
1437 NvmeCtrl
*n
= NVME(pci_dev
);
1440 g_free(n
->namespaces
);
1444 if (n
->cmb_size_mb
) {
1447 msix_uninit_exclusive_bar(pci_dev
);
1450 static Property nvme_props
[] = {
1451 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, conf
),
1452 DEFINE_PROP_STRING("serial", NvmeCtrl
, serial
),
1453 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, cmb_size_mb
, 0),
1454 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, num_queues
, 64),
1455 DEFINE_PROP_END_OF_LIST(),
1458 static const VMStateDescription nvme_vmstate
= {
1463 static void nvme_class_init(ObjectClass
*oc
, void *data
)
1465 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1466 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
1468 pc
->realize
= nvme_realize
;
1469 pc
->exit
= nvme_exit
;
1470 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
1471 pc
->vendor_id
= PCI_VENDOR_ID_INTEL
;
1472 pc
->device_id
= 0x5845;
1475 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1476 dc
->desc
= "Non-Volatile Memory Express";
1477 dc
->props
= nvme_props
;
1478 dc
->vmsd
= &nvme_vmstate
;
1481 static void nvme_instance_init(Object
*obj
)
1483 NvmeCtrl
*s
= NVME(obj
);
1485 device_add_bootindex_property(obj
, &s
->conf
.bootindex
,
1486 "bootindex", "/namespace@1,0",
1487 DEVICE(obj
), &error_abort
);
1490 static const TypeInfo nvme_info
= {
1492 .parent
= TYPE_PCI_DEVICE
,
1493 .instance_size
= sizeof(NvmeCtrl
),
1494 .class_init
= nvme_class_init
,
1495 .instance_init
= nvme_instance_init
,
1496 .interfaces
= (InterfaceInfo
[]) {
1497 { INTERFACE_PCIE_DEVICE
},
1502 static void nvme_register_types(void)
1504 type_register_static(&nvme_info
);
1507 type_init(nvme_register_types
)