]> git.proxmox.com Git - mirror_qemu.git/blame - hw/block/nvme.c
Include qemu/module.h where needed, drop it from qemu-common.h
[mirror_qemu.git] / hw / block / nvme.c
CommitLineData
f3c507ad
KB
1/*
2 * QEMU NVM Express Controller
3 *
4 * Copyright (c) 2012, Intel Corporation
5 *
6 * Written by Keith Busch <keith.busch@intel.com>
7 *
8 * This code is licensed under the GNU GPL v2 or later.
9 */
10
11/**
a896f7f2 12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
f3c507ad
KB
13 *
14 * http://www.nvmexpress.org/resources/
15 */
16
17/**
18 * Usage: add options:
19 * -drive file=<file>,if=none,id=<drive_id>
a896f7f2 20 * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
7c895269
WZ
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * num_queues=<N[optional]>
a896f7f2
SB
23 *
24 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
b2b2b67a 25 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
f3c507ad
KB
26 */
27
80c71a24 28#include "qemu/osdep.h"
e8400cf3 29#include "qemu/units.h"
a9c94277
MA
30#include "hw/block/block.h"
31#include "hw/hw.h"
32#include "hw/pci/msix.h"
33#include "hw/pci/pci.h"
33739c71 34#include "sysemu/sysemu.h"
da34e65c 35#include "qapi/error.h"
33739c71 36#include "qapi/visitor.h"
4be74634 37#include "sysemu/block-backend.h"
f3c507ad 38
1ee24514 39#include "qemu/log.h"
0b8fa32f 40#include "qemu/module.h"
6b39bad0 41#include "qemu/cutils.h"
1ee24514 42#include "trace.h"
f3c507ad
KB
43#include "nvme.h"
44
1ee24514
DG
45#define NVME_GUEST_ERR(trace, fmt, ...) \
46 do { \
47 (trace_##trace)(__VA_ARGS__); \
48 qemu_log_mask(LOG_GUEST_ERROR, #trace \
49 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
50 } while (0)
51
f3c507ad
KB
52static void nvme_process_sq(void *opaque);
53
a896f7f2
SB
54static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
55{
56 if (n->cmbsz && addr >= n->ctrl_mem.addr &&
57 addr < (n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size))) {
58 memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
59 } else {
60 pci_dma_read(&n->parent_obj, addr, buf, size);
61 }
62}
63
f3c507ad
KB
64static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
65{
66 return sqid < n->num_queues && n->sq[sqid] != NULL ? 0 : -1;
67}
68
69static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
70{
71 return cqid < n->num_queues && n->cq[cqid] != NULL ? 0 : -1;
72}
73
74static void nvme_inc_cq_tail(NvmeCQueue *cq)
75{
76 cq->tail++;
77 if (cq->tail >= cq->size) {
78 cq->tail = 0;
79 cq->phase = !cq->phase;
80 }
81}
82
83static void nvme_inc_sq_head(NvmeSQueue *sq)
84{
85 sq->head = (sq->head + 1) % sq->size;
86}
87
88static uint8_t nvme_cq_full(NvmeCQueue *cq)
89{
90 return (cq->tail + 1) % cq->size == cq->head;
91}
92
93static uint8_t nvme_sq_empty(NvmeSQueue *sq)
94{
95 return sq->head == sq->tail;
96}
97
5e9aa92e
HN
98static void nvme_irq_check(NvmeCtrl *n)
99{
100 if (msix_enabled(&(n->parent_obj))) {
101 return;
102 }
103 if (~n->bar.intms & n->irq_status) {
104 pci_irq_assert(&n->parent_obj);
105 } else {
106 pci_irq_deassert(&n->parent_obj);
107 }
108}
109
110static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
f3c507ad
KB
111{
112 if (cq->irq_enabled) {
113 if (msix_enabled(&(n->parent_obj))) {
1ee24514 114 trace_nvme_irq_msix(cq->vector);
f3c507ad
KB
115 msix_notify(&(n->parent_obj), cq->vector);
116 } else {
1ee24514 117 trace_nvme_irq_pin();
5e9aa92e
HN
118 assert(cq->cqid < 64);
119 n->irq_status |= 1 << cq->cqid;
120 nvme_irq_check(n);
f3c507ad 121 }
1ee24514
DG
122 } else {
123 trace_nvme_irq_masked();
f3c507ad
KB
124 }
125}
126
5e9aa92e
HN
127static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
128{
129 if (cq->irq_enabled) {
130 if (msix_enabled(&(n->parent_obj))) {
131 return;
132 } else {
133 assert(cq->cqid < 64);
134 n->irq_status &= ~(1 << cq->cqid);
135 nvme_irq_check(n);
136 }
137 }
138}
139
b2b2b67a
SB
140static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
141 uint64_t prp2, uint32_t len, NvmeCtrl *n)
f3c507ad
KB
142{
143 hwaddr trans_len = n->page_size - (prp1 % n->page_size);
144 trans_len = MIN(len, trans_len);
145 int num_prps = (len >> n->page_bits) + 1;
146
1ee24514
DG
147 if (unlikely(!prp1)) {
148 trace_nvme_err_invalid_prp();
f3c507ad 149 return NVME_INVALID_FIELD | NVME_DNR;
b2b2b67a
SB
150 } else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
151 prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
152 qsg->nsg = 0;
153 qemu_iovec_init(iov, num_prps);
154 qemu_iovec_add(iov, (void *)&n->cmbuf[prp1 - n->ctrl_mem.addr], trans_len);
155 } else {
156 pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
157 qemu_sglist_add(qsg, prp1, trans_len);
f3c507ad 158 }
f3c507ad
KB
159 len -= trans_len;
160 if (len) {
1ee24514
DG
161 if (unlikely(!prp2)) {
162 trace_nvme_err_invalid_prp2_missing();
f3c507ad
KB
163 goto unmap;
164 }
165 if (len > n->page_size) {
166 uint64_t prp_list[n->max_prp_ents];
167 uint32_t nents, prp_trans;
168 int i = 0;
169
170 nents = (len + n->page_size - 1) >> n->page_bits;
171 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
b2b2b67a 172 nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
f3c507ad
KB
173 while (len != 0) {
174 uint64_t prp_ent = le64_to_cpu(prp_list[i]);
175
176 if (i == n->max_prp_ents - 1 && len > n->page_size) {
1ee24514
DG
177 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
178 trace_nvme_err_invalid_prplist_ent(prp_ent);
f3c507ad
KB
179 goto unmap;
180 }
181
182 i = 0;
183 nents = (len + n->page_size - 1) >> n->page_bits;
184 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
b2b2b67a 185 nvme_addr_read(n, prp_ent, (void *)prp_list,
f3c507ad
KB
186 prp_trans);
187 prp_ent = le64_to_cpu(prp_list[i]);
188 }
189
1ee24514
DG
190 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
191 trace_nvme_err_invalid_prplist_ent(prp_ent);
f3c507ad
KB
192 goto unmap;
193 }
194
195 trans_len = MIN(len, n->page_size);
b2b2b67a
SB
196 if (qsg->nsg){
197 qemu_sglist_add(qsg, prp_ent, trans_len);
198 } else {
199 qemu_iovec_add(iov, (void *)&n->cmbuf[prp_ent - n->ctrl_mem.addr], trans_len);
200 }
f3c507ad
KB
201 len -= trans_len;
202 i++;
203 }
204 } else {
1ee24514
DG
205 if (unlikely(prp2 & (n->page_size - 1))) {
206 trace_nvme_err_invalid_prp2_align(prp2);
f3c507ad
KB
207 goto unmap;
208 }
b2b2b67a
SB
209 if (qsg->nsg) {
210 qemu_sglist_add(qsg, prp2, len);
211 } else {
212 qemu_iovec_add(iov, (void *)&n->cmbuf[prp2 - n->ctrl_mem.addr], trans_len);
213 }
f3c507ad
KB
214 }
215 }
216 return NVME_SUCCESS;
217
218 unmap:
219 qemu_sglist_destroy(qsg);
220 return NVME_INVALID_FIELD | NVME_DNR;
221}
222
3036a626
KH
223static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
224 uint64_t prp1, uint64_t prp2)
225{
226 QEMUSGList qsg;
227 QEMUIOVector iov;
228 uint16_t status = NVME_SUCCESS;
229
230 if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
231 return NVME_INVALID_FIELD | NVME_DNR;
232 }
233 if (qsg.nsg > 0) {
234 if (dma_buf_write(ptr, len, &qsg)) {
235 status = NVME_INVALID_FIELD | NVME_DNR;
236 }
237 qemu_sglist_destroy(&qsg);
238 } else {
239 if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
240 status = NVME_INVALID_FIELD | NVME_DNR;
241 }
242 qemu_iovec_destroy(&iov);
243 }
244 return status;
245}
246
f3c507ad
KB
247static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
248 uint64_t prp1, uint64_t prp2)
249{
250 QEMUSGList qsg;
b2b2b67a
SB
251 QEMUIOVector iov;
252 uint16_t status = NVME_SUCCESS;
f3c507ad 253
1ee24514
DG
254 trace_nvme_dma_read(prp1, prp2);
255
b2b2b67a 256 if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
f3c507ad
KB
257 return NVME_INVALID_FIELD | NVME_DNR;
258 }
b2b2b67a 259 if (qsg.nsg > 0) {
1ee24514
DG
260 if (unlikely(dma_buf_read(ptr, len, &qsg))) {
261 trace_nvme_err_invalid_dma();
b2b2b67a
SB
262 status = NVME_INVALID_FIELD | NVME_DNR;
263 }
f3c507ad 264 qemu_sglist_destroy(&qsg);
b2b2b67a 265 } else {
25349e82 266 if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
1ee24514 267 trace_nvme_err_invalid_dma();
b2b2b67a
SB
268 status = NVME_INVALID_FIELD | NVME_DNR;
269 }
270 qemu_iovec_destroy(&iov);
f3c507ad 271 }
b2b2b67a 272 return status;
f3c507ad
KB
273}
274
275static void nvme_post_cqes(void *opaque)
276{
277 NvmeCQueue *cq = opaque;
278 NvmeCtrl *n = cq->ctrl;
279 NvmeRequest *req, *next;
280
281 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
282 NvmeSQueue *sq;
283 hwaddr addr;
284
285 if (nvme_cq_full(cq)) {
286 break;
287 }
288
289 QTAILQ_REMOVE(&cq->req_list, req, entry);
290 sq = req->sq;
291 req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
292 req->cqe.sq_id = cpu_to_le16(sq->sqid);
293 req->cqe.sq_head = cpu_to_le16(sq->head);
294 addr = cq->dma_addr + cq->tail * n->cqe_size;
295 nvme_inc_cq_tail(cq);
296 pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
297 sizeof(req->cqe));
298 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
299 }
6da02181
KB
300 if (cq->tail != cq->head) {
301 nvme_irq_assert(n, cq);
302 }
f3c507ad
KB
303}
304
305static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
306{
307 assert(cq->cqid == req->sq->cqid);
308 QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
309 QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
bc72ad67 310 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
f3c507ad
KB
311}
312
313static void nvme_rw_cb(void *opaque, int ret)
314{
315 NvmeRequest *req = opaque;
316 NvmeSQueue *sq = req->sq;
317 NvmeCtrl *n = sq->ctrl;
318 NvmeCQueue *cq = n->cq[sq->cqid];
319
f3c507ad 320 if (!ret) {
1753f3dc 321 block_acct_done(blk_get_stats(n->conf.blk), &req->acct);
f3c507ad
KB
322 req->status = NVME_SUCCESS;
323 } else {
1753f3dc 324 block_acct_failed(blk_get_stats(n->conf.blk), &req->acct);
f3c507ad
KB
325 req->status = NVME_INTERNAL_DEV_ERROR;
326 }
8b9d74e0
CH
327 if (req->has_sg) {
328 qemu_sglist_destroy(&req->qsg);
329 }
f3c507ad
KB
330 nvme_enqueue_req_completion(cq, req);
331}
332
8b9d74e0
CH
333static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
334 NvmeRequest *req)
335{
336 req->has_sg = false;
337 block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
338 BLOCK_ACCT_FLUSH);
339 req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req);
340
341 return NVME_NO_COMPLETE;
342}
343
c03e7ef1
CH
344static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
345 NvmeRequest *req)
346{
347 NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
348 const uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
349 const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
350 uint64_t slba = le64_to_cpu(rw->slba);
351 uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
9d6459d2
KB
352 uint64_t offset = slba << data_shift;
353 uint32_t count = nlb << data_shift;
c03e7ef1 354
1ee24514
DG
355 if (unlikely(slba + nlb > ns->id_ns.nsze)) {
356 trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
c03e7ef1
CH
357 return NVME_LBA_RANGE | NVME_DNR;
358 }
359
360 req->has_sg = false;
361 block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
362 BLOCK_ACCT_WRITE);
9d6459d2 363 req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count,
c03e7ef1
CH
364 BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req);
365 return NVME_NO_COMPLETE;
366}
367
f3c507ad
KB
368static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
369 NvmeRequest *req)
370{
371 NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
372 uint32_t nlb = le32_to_cpu(rw->nlb) + 1;
373 uint64_t slba = le64_to_cpu(rw->slba);
374 uint64_t prp1 = le64_to_cpu(rw->prp1);
375 uint64_t prp2 = le64_to_cpu(rw->prp2);
376
377 uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
378 uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
2115f2a1 379 uint64_t data_size = (uint64_t)nlb << data_shift;
cbe0ed62 380 uint64_t data_offset = slba << data_shift;
f3c507ad 381 int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
1753f3dc 382 enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
f3c507ad 383
1ee24514
DG
384 trace_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba);
385
386 if (unlikely((slba + nlb) > ns->id_ns.nsze)) {
1753f3dc 387 block_acct_invalid(blk_get_stats(n->conf.blk), acct);
1ee24514 388 trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
f3c507ad
KB
389 return NVME_LBA_RANGE | NVME_DNR;
390 }
1753f3dc 391
b2b2b67a 392 if (nvme_map_prp(&req->qsg, &req->iov, prp1, prp2, data_size, n)) {
1753f3dc 393 block_acct_invalid(blk_get_stats(n->conf.blk), acct);
f3c507ad
KB
394 return NVME_INVALID_FIELD | NVME_DNR;
395 }
1753f3dc 396
1753f3dc 397 dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct);
b2b2b67a
SB
398 if (req->qsg.nsg > 0) {
399 req->has_sg = true;
400 req->aiocb = is_write ?
401 dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
402 nvme_rw_cb, req) :
403 dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
404 nvme_rw_cb, req);
405 } else {
406 req->has_sg = false;
407 req->aiocb = is_write ?
408 blk_aio_pwritev(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
409 req) :
410 blk_aio_preadv(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
411 req);
412 }
f3c507ad
KB
413
414 return NVME_NO_COMPLETE;
415}
416
417static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
418{
419 NvmeNamespace *ns;
420 uint32_t nsid = le32_to_cpu(cmd->nsid);
421
1ee24514
DG
422 if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
423 trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
f3c507ad
KB
424 return NVME_INVALID_NSID | NVME_DNR;
425 }
426
427 ns = &n->namespaces[nsid - 1];
428 switch (cmd->opcode) {
429 case NVME_CMD_FLUSH:
8b9d74e0 430 return nvme_flush(n, ns, cmd, req);
c03e7ef1
CH
431 case NVME_CMD_WRITE_ZEROS:
432 return nvme_write_zeros(n, ns, cmd, req);
f3c507ad
KB
433 case NVME_CMD_WRITE:
434 case NVME_CMD_READ:
435 return nvme_rw(n, ns, cmd, req);
436 default:
1ee24514 437 trace_nvme_err_invalid_opc(cmd->opcode);
f3c507ad
KB
438 return NVME_INVALID_OPCODE | NVME_DNR;
439 }
440}
441
442static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
443{
444 n->sq[sq->sqid] = NULL;
bc72ad67
AB
445 timer_del(sq->timer);
446 timer_free(sq->timer);
f3c507ad
KB
447 g_free(sq->io_req);
448 if (sq->sqid) {
449 g_free(sq);
450 }
451}
452
453static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
454{
455 NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
456 NvmeRequest *req, *next;
457 NvmeSQueue *sq;
458 NvmeCQueue *cq;
459 uint16_t qid = le16_to_cpu(c->qid);
460
1ee24514
DG
461 if (unlikely(!qid || nvme_check_sqid(n, qid))) {
462 trace_nvme_err_invalid_del_sq(qid);
f3c507ad
KB
463 return NVME_INVALID_QID | NVME_DNR;
464 }
465
1ee24514
DG
466 trace_nvme_del_sq(qid);
467
f3c507ad
KB
468 sq = n->sq[qid];
469 while (!QTAILQ_EMPTY(&sq->out_req_list)) {
470 req = QTAILQ_FIRST(&sq->out_req_list);
471 assert(req->aiocb);
4be74634 472 blk_aio_cancel(req->aiocb);
f3c507ad
KB
473 }
474 if (!nvme_check_cqid(n, sq->cqid)) {
475 cq = n->cq[sq->cqid];
476 QTAILQ_REMOVE(&cq->sq_list, sq, entry);
477
478 nvme_post_cqes(cq);
479 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
480 if (req->sq == sq) {
481 QTAILQ_REMOVE(&cq->req_list, req, entry);
482 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
483 }
484 }
485 }
486
487 nvme_free_sq(sq, n);
488 return NVME_SUCCESS;
489}
490
491static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
492 uint16_t sqid, uint16_t cqid, uint16_t size)
493{
494 int i;
495 NvmeCQueue *cq;
496
497 sq->ctrl = n;
498 sq->dma_addr = dma_addr;
499 sq->sqid = sqid;
500 sq->size = size;
501 sq->cqid = cqid;
502 sq->head = sq->tail = 0;
02c4f26b 503 sq->io_req = g_new(NvmeRequest, sq->size);
f3c507ad
KB
504
505 QTAILQ_INIT(&sq->req_list);
506 QTAILQ_INIT(&sq->out_req_list);
507 for (i = 0; i < sq->size; i++) {
508 sq->io_req[i].sq = sq;
509 QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
510 }
bc72ad67 511 sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq);
f3c507ad
KB
512
513 assert(n->cq[cqid]);
514 cq = n->cq[cqid];
515 QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
516 n->sq[sqid] = sq;
517}
518
519static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
520{
521 NvmeSQueue *sq;
522 NvmeCreateSq *c = (NvmeCreateSq *)cmd;
523
524 uint16_t cqid = le16_to_cpu(c->cqid);
525 uint16_t sqid = le16_to_cpu(c->sqid);
526 uint16_t qsize = le16_to_cpu(c->qsize);
527 uint16_t qflags = le16_to_cpu(c->sq_flags);
528 uint64_t prp1 = le64_to_cpu(c->prp1);
529
1ee24514
DG
530 trace_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
531
532 if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
533 trace_nvme_err_invalid_create_sq_cqid(cqid);
f3c507ad
KB
534 return NVME_INVALID_CQID | NVME_DNR;
535 }
1ee24514
DG
536 if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) {
537 trace_nvme_err_invalid_create_sq_sqid(sqid);
f3c507ad
KB
538 return NVME_INVALID_QID | NVME_DNR;
539 }
1ee24514
DG
540 if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
541 trace_nvme_err_invalid_create_sq_size(qsize);
f3c507ad
KB
542 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
543 }
1ee24514
DG
544 if (unlikely(!prp1 || prp1 & (n->page_size - 1))) {
545 trace_nvme_err_invalid_create_sq_addr(prp1);
f3c507ad
KB
546 return NVME_INVALID_FIELD | NVME_DNR;
547 }
1ee24514
DG
548 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
549 trace_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
f3c507ad
KB
550 return NVME_INVALID_FIELD | NVME_DNR;
551 }
552 sq = g_malloc0(sizeof(*sq));
553 nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1);
554 return NVME_SUCCESS;
555}
556
557static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
558{
559 n->cq[cq->cqid] = NULL;
bc72ad67
AB
560 timer_del(cq->timer);
561 timer_free(cq->timer);
f3c507ad
KB
562 msix_vector_unuse(&n->parent_obj, cq->vector);
563 if (cq->cqid) {
564 g_free(cq);
565 }
566}
567
568static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
569{
570 NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
571 NvmeCQueue *cq;
572 uint16_t qid = le16_to_cpu(c->qid);
573
1ee24514
DG
574 if (unlikely(!qid || nvme_check_cqid(n, qid))) {
575 trace_nvme_err_invalid_del_cq_cqid(qid);
f3c507ad
KB
576 return NVME_INVALID_CQID | NVME_DNR;
577 }
578
579 cq = n->cq[qid];
1ee24514
DG
580 if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
581 trace_nvme_err_invalid_del_cq_notempty(qid);
f3c507ad
KB
582 return NVME_INVALID_QUEUE_DEL;
583 }
ad3a7e45 584 nvme_irq_deassert(n, cq);
1ee24514 585 trace_nvme_del_cq(qid);
f3c507ad
KB
586 nvme_free_cq(cq, n);
587 return NVME_SUCCESS;
588}
589
590static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
591 uint16_t cqid, uint16_t vector, uint16_t size, uint16_t irq_enabled)
592{
593 cq->ctrl = n;
594 cq->cqid = cqid;
595 cq->size = size;
596 cq->dma_addr = dma_addr;
597 cq->phase = 1;
598 cq->irq_enabled = irq_enabled;
599 cq->vector = vector;
600 cq->head = cq->tail = 0;
601 QTAILQ_INIT(&cq->req_list);
602 QTAILQ_INIT(&cq->sq_list);
603 msix_vector_use(&n->parent_obj, cq->vector);
604 n->cq[cqid] = cq;
bc72ad67 605 cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
f3c507ad
KB
606}
607
608static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
609{
610 NvmeCQueue *cq;
611 NvmeCreateCq *c = (NvmeCreateCq *)cmd;
612 uint16_t cqid = le16_to_cpu(c->cqid);
613 uint16_t vector = le16_to_cpu(c->irq_vector);
614 uint16_t qsize = le16_to_cpu(c->qsize);
615 uint16_t qflags = le16_to_cpu(c->cq_flags);
616 uint64_t prp1 = le64_to_cpu(c->prp1);
617
1ee24514
DG
618 trace_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
619 NVME_CQ_FLAGS_IEN(qflags) != 0);
620
621 if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) {
622 trace_nvme_err_invalid_create_cq_cqid(cqid);
f3c507ad
KB
623 return NVME_INVALID_CQID | NVME_DNR;
624 }
1ee24514
DG
625 if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
626 trace_nvme_err_invalid_create_cq_size(qsize);
f3c507ad
KB
627 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
628 }
1ee24514
DG
629 if (unlikely(!prp1)) {
630 trace_nvme_err_invalid_create_cq_addr(prp1);
f3c507ad
KB
631 return NVME_INVALID_FIELD | NVME_DNR;
632 }
1ee24514
DG
633 if (unlikely(vector > n->num_queues)) {
634 trace_nvme_err_invalid_create_cq_vector(vector);
f3c507ad
KB
635 return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
636 }
1ee24514
DG
637 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
638 trace_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
f3c507ad
KB
639 return NVME_INVALID_FIELD | NVME_DNR;
640 }
641
642 cq = g_malloc0(sizeof(*cq));
643 nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1,
644 NVME_CQ_FLAGS_IEN(qflags));
645 return NVME_SUCCESS;
646}
647
03035a23
CH
648static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
649{
650 uint64_t prp1 = le64_to_cpu(c->prp1);
651 uint64_t prp2 = le64_to_cpu(c->prp2);
652
1ee24514
DG
653 trace_nvme_identify_ctrl();
654
03035a23
CH
655 return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
656 prp1, prp2);
657}
658
659static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
f3c507ad
KB
660{
661 NvmeNamespace *ns;
f3c507ad
KB
662 uint32_t nsid = le32_to_cpu(c->nsid);
663 uint64_t prp1 = le64_to_cpu(c->prp1);
664 uint64_t prp2 = le64_to_cpu(c->prp2);
665
1ee24514
DG
666 trace_nvme_identify_ns(nsid);
667
668 if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
669 trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
f3c507ad
KB
670 return NVME_INVALID_NSID | NVME_DNR;
671 }
672
673 ns = &n->namespaces[nsid - 1];
1ee24514 674
f3c507ad
KB
675 return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
676 prp1, prp2);
677}
678
03035a23
CH
679static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
680{
e8400cf3 681 static const int data_len = 4 * KiB;
03035a23
CH
682 uint32_t min_nsid = le32_to_cpu(c->nsid);
683 uint64_t prp1 = le64_to_cpu(c->prp1);
684 uint64_t prp2 = le64_to_cpu(c->prp2);
685 uint32_t *list;
686 uint16_t ret;
687 int i, j = 0;
688
1ee24514
DG
689 trace_nvme_identify_nslist(min_nsid);
690
03035a23
CH
691 list = g_malloc0(data_len);
692 for (i = 0; i < n->num_namespaces; i++) {
693 if (i < min_nsid) {
694 continue;
695 }
696 list[j++] = cpu_to_le32(i + 1);
697 if (j == data_len / sizeof(uint32_t)) {
698 break;
699 }
700 }
701 ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2);
702 g_free(list);
703 return ret;
704}
705
03035a23
CH
706static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
707{
708 NvmeIdentify *c = (NvmeIdentify *)cmd;
709
710 switch (le32_to_cpu(c->cns)) {
711 case 0x00:
712 return nvme_identify_ns(n, c);
713 case 0x01:
714 return nvme_identify_ctrl(n, c);
715 case 0x02:
716 return nvme_identify_nslist(n, c);
717 default:
1ee24514 718 trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
03035a23
CH
719 return NVME_INVALID_FIELD | NVME_DNR;
720 }
721}
722
3036a626
KH
723static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts)
724{
725 trace_nvme_setfeat_timestamp(ts);
726
727 n->host_timestamp = le64_to_cpu(ts);
728 n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
729}
730
731static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
732{
733 uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
734 uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms;
735
736 union nvme_timestamp {
737 struct {
738 uint64_t timestamp:48;
739 uint64_t sync:1;
740 uint64_t origin:3;
741 uint64_t rsvd1:12;
742 };
743 uint64_t all;
744 };
745
746 union nvme_timestamp ts;
747 ts.all = 0;
748
749 /*
750 * If the sum of the Timestamp value set by the host and the elapsed
751 * time exceeds 2^48, the value returned should be reduced modulo 2^48.
752 */
753 ts.timestamp = (n->host_timestamp + elapsed_time) & 0xffffffffffff;
754
755 /* If the host timestamp is non-zero, set the timestamp origin */
756 ts.origin = n->host_timestamp ? 0x01 : 0x00;
757
758 trace_nvme_getfeat_timestamp(ts.all);
759
760 return cpu_to_le64(ts.all);
761}
762
763static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
764{
765 uint64_t prp1 = le64_to_cpu(cmd->prp1);
766 uint64_t prp2 = le64_to_cpu(cmd->prp2);
767
768 uint64_t timestamp = nvme_get_timestamp(n);
769
770 return nvme_dma_read_prp(n, (uint8_t *)&timestamp,
771 sizeof(timestamp), prp1, prp2);
772}
773
f3c507ad
KB
774static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
775{
776 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
30349fd0 777 uint32_t result;
f3c507ad
KB
778
779 switch (dw10) {
aacd5650 780 case NVME_VOLATILE_WRITE_CACHE:
30349fd0 781 result = blk_enable_write_cache(n->conf.blk);
1ee24514 782 trace_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
30349fd0
CH
783 break;
784 case NVME_NUMBER_OF_QUEUES:
cdd34637 785 result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
1ee24514 786 trace_nvme_getfeat_numq(result);
aacd5650 787 break;
3036a626
KH
788 case NVME_TIMESTAMP:
789 return nvme_get_feature_timestamp(n, cmd);
790 break;
f3c507ad 791 default:
1ee24514 792 trace_nvme_err_invalid_getfeat(dw10);
f3c507ad
KB
793 return NVME_INVALID_FIELD | NVME_DNR;
794 }
30349fd0
CH
795
796 req->cqe.result = result;
f3c507ad
KB
797 return NVME_SUCCESS;
798}
799
3036a626
KH
800static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
801{
802 uint16_t ret;
803 uint64_t timestamp;
804 uint64_t prp1 = le64_to_cpu(cmd->prp1);
805 uint64_t prp2 = le64_to_cpu(cmd->prp2);
806
807 ret = nvme_dma_write_prp(n, (uint8_t *)&timestamp,
808 sizeof(timestamp), prp1, prp2);
809 if (ret != NVME_SUCCESS) {
810 return ret;
811 }
812
813 nvme_set_timestamp(n, timestamp);
814
815 return NVME_SUCCESS;
816}
817
f3c507ad
KB
818static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
819{
820 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
30349fd0 821 uint32_t dw11 = le32_to_cpu(cmd->cdw11);
f3c507ad
KB
822
823 switch (dw10) {
30349fd0
CH
824 case NVME_VOLATILE_WRITE_CACHE:
825 blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
826 break;
f3c507ad 827 case NVME_NUMBER_OF_QUEUES:
1ee24514
DG
828 trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
829 ((dw11 >> 16) & 0xFFFF) + 1,
830 n->num_queues - 1, n->num_queues - 1);
e7026f19 831 req->cqe.result =
cdd34637 832 cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
f3c507ad 833 break;
3036a626
KH
834
835 case NVME_TIMESTAMP:
836 return nvme_set_feature_timestamp(n, cmd);
837 break;
838
f3c507ad 839 default:
1ee24514 840 trace_nvme_err_invalid_setfeat(dw10);
f3c507ad
KB
841 return NVME_INVALID_FIELD | NVME_DNR;
842 }
843 return NVME_SUCCESS;
844}
845
846static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
847{
848 switch (cmd->opcode) {
849 case NVME_ADM_CMD_DELETE_SQ:
850 return nvme_del_sq(n, cmd);
851 case NVME_ADM_CMD_CREATE_SQ:
852 return nvme_create_sq(n, cmd);
853 case NVME_ADM_CMD_DELETE_CQ:
854 return nvme_del_cq(n, cmd);
855 case NVME_ADM_CMD_CREATE_CQ:
856 return nvme_create_cq(n, cmd);
857 case NVME_ADM_CMD_IDENTIFY:
858 return nvme_identify(n, cmd);
859 case NVME_ADM_CMD_SET_FEATURES:
860 return nvme_set_feature(n, cmd, req);
861 case NVME_ADM_CMD_GET_FEATURES:
862 return nvme_get_feature(n, cmd, req);
863 default:
1ee24514 864 trace_nvme_err_invalid_admin_opc(cmd->opcode);
f3c507ad
KB
865 return NVME_INVALID_OPCODE | NVME_DNR;
866 }
867}
868
869static void nvme_process_sq(void *opaque)
870{
871 NvmeSQueue *sq = opaque;
872 NvmeCtrl *n = sq->ctrl;
873 NvmeCQueue *cq = n->cq[sq->cqid];
874
875 uint16_t status;
876 hwaddr addr;
877 NvmeCmd cmd;
878 NvmeRequest *req;
879
880 while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
881 addr = sq->dma_addr + sq->head * n->sqe_size;
a896f7f2 882 nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
f3c507ad
KB
883 nvme_inc_sq_head(sq);
884
885 req = QTAILQ_FIRST(&sq->req_list);
886 QTAILQ_REMOVE(&sq->req_list, req, entry);
887 QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
888 memset(&req->cqe, 0, sizeof(req->cqe));
889 req->cqe.cid = cmd.cid;
890
891 status = sq->sqid ? nvme_io_cmd(n, &cmd, req) :
892 nvme_admin_cmd(n, &cmd, req);
893 if (status != NVME_NO_COMPLETE) {
894 req->status = status;
895 nvme_enqueue_req_completion(cq, req);
896 }
897 }
898}
899
900static void nvme_clear_ctrl(NvmeCtrl *n)
901{
902 int i;
903
6bf74636
ID
904 blk_drain(n->conf.blk);
905
f3c507ad
KB
906 for (i = 0; i < n->num_queues; i++) {
907 if (n->sq[i] != NULL) {
908 nvme_free_sq(n->sq[i], n);
909 }
910 }
911 for (i = 0; i < n->num_queues; i++) {
912 if (n->cq[i] != NULL) {
913 nvme_free_cq(n->cq[i], n);
914 }
915 }
916
4be74634 917 blk_flush(n->conf.blk);
f3c507ad
KB
918 n->bar.cc = 0;
919}
920
921static int nvme_start_ctrl(NvmeCtrl *n)
922{
923 uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
924 uint32_t page_size = 1 << page_bits;
925
1ee24514
DG
926 if (unlikely(n->cq[0])) {
927 trace_nvme_err_startfail_cq();
928 return -1;
929 }
930 if (unlikely(n->sq[0])) {
931 trace_nvme_err_startfail_sq();
932 return -1;
933 }
934 if (unlikely(!n->bar.asq)) {
935 trace_nvme_err_startfail_nbarasq();
936 return -1;
937 }
938 if (unlikely(!n->bar.acq)) {
939 trace_nvme_err_startfail_nbaracq();
940 return -1;
941 }
942 if (unlikely(n->bar.asq & (page_size - 1))) {
943 trace_nvme_err_startfail_asq_misaligned(n->bar.asq);
944 return -1;
945 }
946 if (unlikely(n->bar.acq & (page_size - 1))) {
947 trace_nvme_err_startfail_acq_misaligned(n->bar.acq);
948 return -1;
949 }
950 if (unlikely(NVME_CC_MPS(n->bar.cc) <
951 NVME_CAP_MPSMIN(n->bar.cap))) {
952 trace_nvme_err_startfail_page_too_small(
953 NVME_CC_MPS(n->bar.cc),
954 NVME_CAP_MPSMIN(n->bar.cap));
955 return -1;
956 }
957 if (unlikely(NVME_CC_MPS(n->bar.cc) >
958 NVME_CAP_MPSMAX(n->bar.cap))) {
959 trace_nvme_err_startfail_page_too_large(
960 NVME_CC_MPS(n->bar.cc),
961 NVME_CAP_MPSMAX(n->bar.cap));
962 return -1;
963 }
964 if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
965 NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
966 trace_nvme_err_startfail_cqent_too_small(
967 NVME_CC_IOCQES(n->bar.cc),
968 NVME_CTRL_CQES_MIN(n->bar.cap));
969 return -1;
970 }
971 if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
972 NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
973 trace_nvme_err_startfail_cqent_too_large(
974 NVME_CC_IOCQES(n->bar.cc),
975 NVME_CTRL_CQES_MAX(n->bar.cap));
976 return -1;
977 }
978 if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
979 NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
980 trace_nvme_err_startfail_sqent_too_small(
981 NVME_CC_IOSQES(n->bar.cc),
982 NVME_CTRL_SQES_MIN(n->bar.cap));
983 return -1;
984 }
985 if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
986 NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
987 trace_nvme_err_startfail_sqent_too_large(
988 NVME_CC_IOSQES(n->bar.cc),
989 NVME_CTRL_SQES_MAX(n->bar.cap));
990 return -1;
991 }
992 if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
993 trace_nvme_err_startfail_asqent_sz_zero();
994 return -1;
995 }
996 if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
997 trace_nvme_err_startfail_acqent_sz_zero();
f3c507ad
KB
998 return -1;
999 }
1000
1001 n->page_bits = page_bits;
1002 n->page_size = page_size;
1003 n->max_prp_ents = n->page_size / sizeof(uint64_t);
1004 n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc);
1005 n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc);
1006 nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0,
1007 NVME_AQA_ACQS(n->bar.aqa) + 1, 1);
1008 nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
1009 NVME_AQA_ASQS(n->bar.aqa) + 1);
1010
3036a626
KH
1011 nvme_set_timestamp(n, 0ULL);
1012
f3c507ad
KB
1013 return 0;
1014}
1015
1016static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
1017 unsigned size)
1018{
1ee24514
DG
1019 if (unlikely(offset & (sizeof(uint32_t) - 1))) {
1020 NVME_GUEST_ERR(nvme_ub_mmiowr_misaligned32,
1021 "MMIO write not 32-bit aligned,"
1022 " offset=0x%"PRIx64"", offset);
1023 /* should be ignored, fall through for now */
1024 }
1025
1026 if (unlikely(size < sizeof(uint32_t))) {
1027 NVME_GUEST_ERR(nvme_ub_mmiowr_toosmall,
1028 "MMIO write smaller than 32-bits,"
1029 " offset=0x%"PRIx64", size=%u",
1030 offset, size);
1031 /* should be ignored, fall through for now */
1032 }
1033
f3c507ad 1034 switch (offset) {
1ee24514
DG
1035 case 0xc: /* INTMS */
1036 if (unlikely(msix_enabled(&(n->parent_obj)))) {
1037 NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
1038 "undefined access to interrupt mask set"
1039 " when MSI-X is enabled");
1040 /* should be ignored, fall through for now */
1041 }
f3c507ad
KB
1042 n->bar.intms |= data & 0xffffffff;
1043 n->bar.intmc = n->bar.intms;
1ee24514
DG
1044 trace_nvme_mmio_intm_set(data & 0xffffffff,
1045 n->bar.intmc);
5e9aa92e 1046 nvme_irq_check(n);
f3c507ad 1047 break;
1ee24514
DG
1048 case 0x10: /* INTMC */
1049 if (unlikely(msix_enabled(&(n->parent_obj)))) {
1050 NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
1051 "undefined access to interrupt mask clr"
1052 " when MSI-X is enabled");
1053 /* should be ignored, fall through for now */
1054 }
f3c507ad
KB
1055 n->bar.intms &= ~(data & 0xffffffff);
1056 n->bar.intmc = n->bar.intms;
1ee24514
DG
1057 trace_nvme_mmio_intm_clr(data & 0xffffffff,
1058 n->bar.intmc);
5e9aa92e 1059 nvme_irq_check(n);
f3c507ad 1060 break;
1ee24514
DG
1061 case 0x14: /* CC */
1062 trace_nvme_mmio_cfg(data & 0xffffffff);
4a4d614f
DS
1063 /* Windows first sends data, then sends enable bit */
1064 if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
1065 !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
1066 {
1067 n->bar.cc = data;
1068 }
1069
f3c507ad
KB
1070 if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
1071 n->bar.cc = data;
1ee24514
DG
1072 if (unlikely(nvme_start_ctrl(n))) {
1073 trace_nvme_err_startfail();
f3c507ad
KB
1074 n->bar.csts = NVME_CSTS_FAILED;
1075 } else {
1ee24514 1076 trace_nvme_mmio_start_success();
f3c507ad
KB
1077 n->bar.csts = NVME_CSTS_READY;
1078 }
1079 } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
1ee24514 1080 trace_nvme_mmio_stopped();
f3c507ad
KB
1081 nvme_clear_ctrl(n);
1082 n->bar.csts &= ~NVME_CSTS_READY;
1083 }
1084 if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
1ee24514
DG
1085 trace_nvme_mmio_shutdown_set();
1086 nvme_clear_ctrl(n);
1087 n->bar.cc = data;
1088 n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
f3c507ad 1089 } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
1ee24514
DG
1090 trace_nvme_mmio_shutdown_cleared();
1091 n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
1092 n->bar.cc = data;
1093 }
1094 break;
1095 case 0x1C: /* CSTS */
1096 if (data & (1 << 4)) {
1097 NVME_GUEST_ERR(nvme_ub_mmiowr_ssreset_w1c_unsupported,
1098 "attempted to W1C CSTS.NSSRO"
1099 " but CAP.NSSRS is zero (not supported)");
1100 } else if (data != 0) {
1101 NVME_GUEST_ERR(nvme_ub_mmiowr_ro_csts,
1102 "attempted to set a read only bit"
1103 " of controller status");
1104 }
1105 break;
1106 case 0x20: /* NSSR */
1107 if (data == 0x4E564D65) {
1108 trace_nvme_ub_mmiowr_ssreset_unsupported();
1109 } else {
1110 /* The spec says that writes of other values have no effect */
1111 return;
f3c507ad
KB
1112 }
1113 break;
1ee24514 1114 case 0x24: /* AQA */
f3c507ad 1115 n->bar.aqa = data & 0xffffffff;
1ee24514 1116 trace_nvme_mmio_aqattr(data & 0xffffffff);
f3c507ad 1117 break;
1ee24514 1118 case 0x28: /* ASQ */
f3c507ad 1119 n->bar.asq = data;
1ee24514 1120 trace_nvme_mmio_asqaddr(data);
f3c507ad 1121 break;
1ee24514 1122 case 0x2c: /* ASQ hi */
f3c507ad 1123 n->bar.asq |= data << 32;
1ee24514 1124 trace_nvme_mmio_asqaddr_hi(data, n->bar.asq);
f3c507ad 1125 break;
1ee24514
DG
1126 case 0x30: /* ACQ */
1127 trace_nvme_mmio_acqaddr(data);
f3c507ad
KB
1128 n->bar.acq = data;
1129 break;
1ee24514 1130 case 0x34: /* ACQ hi */
f3c507ad 1131 n->bar.acq |= data << 32;
1ee24514 1132 trace_nvme_mmio_acqaddr_hi(data, n->bar.acq);
f3c507ad 1133 break;
1ee24514
DG
1134 case 0x38: /* CMBLOC */
1135 NVME_GUEST_ERR(nvme_ub_mmiowr_cmbloc_reserved,
1136 "invalid write to reserved CMBLOC"
1137 " when CMBSZ is zero, ignored");
1138 return;
1139 case 0x3C: /* CMBSZ */
1140 NVME_GUEST_ERR(nvme_ub_mmiowr_cmbsz_readonly,
1141 "invalid write to read only CMBSZ, ignored");
1142 return;
f3c507ad 1143 default:
1ee24514
DG
1144 NVME_GUEST_ERR(nvme_ub_mmiowr_invalid,
1145 "invalid MMIO write,"
1146 " offset=0x%"PRIx64", data=%"PRIx64"",
1147 offset, data);
f3c507ad
KB
1148 break;
1149 }
1150}
1151
1152static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
1153{
1154 NvmeCtrl *n = (NvmeCtrl *)opaque;
1155 uint8_t *ptr = (uint8_t *)&n->bar;
1156 uint64_t val = 0;
1157
1ee24514
DG
1158 if (unlikely(addr & (sizeof(uint32_t) - 1))) {
1159 NVME_GUEST_ERR(nvme_ub_mmiord_misaligned32,
1160 "MMIO read not 32-bit aligned,"
1161 " offset=0x%"PRIx64"", addr);
1162 /* should RAZ, fall through for now */
1163 } else if (unlikely(size < sizeof(uint32_t))) {
1164 NVME_GUEST_ERR(nvme_ub_mmiord_toosmall,
1165 "MMIO read smaller than 32-bits,"
1166 " offset=0x%"PRIx64"", addr);
1167 /* should RAZ, fall through for now */
1168 }
1169
f3c507ad
KB
1170 if (addr < sizeof(n->bar)) {
1171 memcpy(&val, ptr + addr, size);
1ee24514
DG
1172 } else {
1173 NVME_GUEST_ERR(nvme_ub_mmiord_invalid_ofs,
1174 "MMIO read beyond last register,"
1175 " offset=0x%"PRIx64", returning 0", addr);
f3c507ad 1176 }
1ee24514 1177
f3c507ad
KB
1178 return val;
1179}
1180
1181static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
1182{
1183 uint32_t qid;
1184
1ee24514
DG
1185 if (unlikely(addr & ((1 << 2) - 1))) {
1186 NVME_GUEST_ERR(nvme_ub_db_wr_misaligned,
1187 "doorbell write not 32-bit aligned,"
1188 " offset=0x%"PRIx64", ignoring", addr);
f3c507ad
KB
1189 return;
1190 }
1191
1192 if (((addr - 0x1000) >> 2) & 1) {
1ee24514
DG
1193 /* Completion queue doorbell write */
1194
f3c507ad
KB
1195 uint16_t new_head = val & 0xffff;
1196 int start_sqs;
1197 NvmeCQueue *cq;
1198
1199 qid = (addr - (0x1000 + (1 << 2))) >> 3;
1ee24514
DG
1200 if (unlikely(nvme_check_cqid(n, qid))) {
1201 NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cq,
1202 "completion queue doorbell write"
1203 " for nonexistent queue,"
1204 " sqid=%"PRIu32", ignoring", qid);
f3c507ad
KB
1205 return;
1206 }
1207
1208 cq = n->cq[qid];
1ee24514
DG
1209 if (unlikely(new_head >= cq->size)) {
1210 NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cqhead,
1211 "completion queue doorbell write value"
1212 " beyond queue size, sqid=%"PRIu32","
1213 " new_head=%"PRIu16", ignoring",
1214 qid, new_head);
f3c507ad
KB
1215 return;
1216 }
1217
1218 start_sqs = nvme_cq_full(cq) ? 1 : 0;
1219 cq->head = new_head;
1220 if (start_sqs) {
1221 NvmeSQueue *sq;
1222 QTAILQ_FOREACH(sq, &cq->sq_list, entry) {
bc72ad67 1223 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
f3c507ad 1224 }
bc72ad67 1225 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
f3c507ad
KB
1226 }
1227
5e9aa92e
HN
1228 if (cq->tail == cq->head) {
1229 nvme_irq_deassert(n, cq);
f3c507ad
KB
1230 }
1231 } else {
1ee24514
DG
1232 /* Submission queue doorbell write */
1233
f3c507ad
KB
1234 uint16_t new_tail = val & 0xffff;
1235 NvmeSQueue *sq;
1236
1237 qid = (addr - 0x1000) >> 3;
1ee24514
DG
1238 if (unlikely(nvme_check_sqid(n, qid))) {
1239 NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sq,
1240 "submission queue doorbell write"
1241 " for nonexistent queue,"
1242 " sqid=%"PRIu32", ignoring", qid);
f3c507ad
KB
1243 return;
1244 }
1245
1246 sq = n->sq[qid];
1ee24514
DG
1247 if (unlikely(new_tail >= sq->size)) {
1248 NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sqtail,
1249 "submission queue doorbell write value"
1250 " beyond queue size, sqid=%"PRIu32","
1251 " new_tail=%"PRIu16", ignoring",
1252 qid, new_tail);
f3c507ad
KB
1253 return;
1254 }
1255
1256 sq->tail = new_tail;
bc72ad67 1257 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
f3c507ad
KB
1258 }
1259}
1260
1261static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data,
1262 unsigned size)
1263{
1264 NvmeCtrl *n = (NvmeCtrl *)opaque;
1265 if (addr < sizeof(n->bar)) {
1266 nvme_write_bar(n, addr, data, size);
1267 } else if (addr >= 0x1000) {
1268 nvme_process_db(n, addr, data);
1269 }
1270}
1271
1272static const MemoryRegionOps nvme_mmio_ops = {
1273 .read = nvme_mmio_read,
1274 .write = nvme_mmio_write,
1275 .endianness = DEVICE_LITTLE_ENDIAN,
1276 .impl = {
1277 .min_access_size = 2,
1278 .max_access_size = 8,
1279 },
1280};
1281
a896f7f2
SB
1282static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data,
1283 unsigned size)
1284{
1285 NvmeCtrl *n = (NvmeCtrl *)opaque;
71a86dde 1286 stn_le_p(&n->cmbuf[addr], size, data);
a896f7f2
SB
1287}
1288
1289static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size)
1290{
a896f7f2 1291 NvmeCtrl *n = (NvmeCtrl *)opaque;
71a86dde 1292 return ldn_le_p(&n->cmbuf[addr], size);
a896f7f2
SB
1293}
1294
1295static const MemoryRegionOps nvme_cmb_ops = {
1296 .read = nvme_cmb_read,
1297 .write = nvme_cmb_write,
1298 .endianness = DEVICE_LITTLE_ENDIAN,
1299 .impl = {
87ad860c 1300 .min_access_size = 1,
a896f7f2
SB
1301 .max_access_size = 8,
1302 },
1303};
1304
e01d6a41 1305static void nvme_realize(PCIDevice *pci_dev, Error **errp)
f3c507ad
KB
1306{
1307 NvmeCtrl *n = NVME(pci_dev);
1308 NvmeIdCtrl *id = &n->id_ctrl;
1309
1310 int i;
1311 int64_t bs_size;
1312 uint8_t *pci_conf;
1313
2410e133
LQ
1314 if (!n->num_queues) {
1315 error_setg(errp, "num_queues can't be zero");
1316 return;
1317 }
1318
4be74634 1319 if (!n->conf.blk) {
e01d6a41
MZ
1320 error_setg(errp, "drive property not set");
1321 return;
f3c507ad
KB
1322 }
1323
4be74634 1324 bs_size = blk_getlength(n->conf.blk);
592408b8 1325 if (bs_size < 0) {
e01d6a41
MZ
1326 error_setg(errp, "could not get backing file size");
1327 return;
f3c507ad
KB
1328 }
1329
f3c507ad 1330 if (!n->serial) {
e01d6a41
MZ
1331 error_setg(errp, "serial property not set");
1332 return;
f3c507ad 1333 }
0eb28a42 1334 blkconf_blocksizes(&n->conf);
ceff3e1f
MZ
1335 if (!blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
1336 false, errp)) {
e01d6a41 1337 return;
a17c17a2 1338 }
f3c507ad
KB
1339
1340 pci_conf = pci_dev->config;
1341 pci_conf[PCI_INTERRUPT_PIN] = 1;
1342 pci_config_set_prog_interface(pci_dev->config, 0x2);
1343 pci_config_set_class(pci_dev->config, PCI_CLASS_STORAGE_EXPRESS);
a3d25ddd 1344 pcie_endpoint_cap_init(pci_dev, 0x80);
f3c507ad
KB
1345
1346 n->num_namespaces = 1;
26efcec1 1347 n->reg_size = pow2ceil(0x1004 + 2 * (n->num_queues + 1) * 4);
f3c507ad
KB
1348 n->ns_size = bs_size / (uint64_t)n->num_namespaces;
1349
02c4f26b
MA
1350 n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
1351 n->sq = g_new0(NvmeSQueue *, n->num_queues);
1352 n->cq = g_new0(NvmeCQueue *, n->num_queues);
f3c507ad 1353
2d256e6f
PB
1354 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n,
1355 "nvme", n->reg_size);
a3d25ddd 1356 pci_register_bar(pci_dev, 0,
f3c507ad
KB
1357 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
1358 &n->iomem);
a3d25ddd 1359 msix_init_exclusive_bar(pci_dev, n->num_queues, 4, NULL);
f3c507ad
KB
1360
1361 id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
1362 id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
1363 strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
1364 strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' ');
1365 strpadcpy((char *)id->sn, sizeof(id->sn), n->serial, ' ');
1366 id->rab = 6;
1367 id->ieee[0] = 0x00;
1368 id->ieee[1] = 0x02;
1369 id->ieee[2] = 0xb3;
1370 id->oacs = cpu_to_le16(0);
1371 id->frmw = 7 << 1;
1372 id->lpa = 1 << 0;
1373 id->sqes = (0x6 << 4) | 0x6;
1374 id->cqes = (0x4 << 4) | 0x4;
1375 id->nn = cpu_to_le32(n->num_namespaces);
3036a626 1376 id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS | NVME_ONCS_TIMESTAMP);
f3c507ad
KB
1377 id->psd[0].mp = cpu_to_le16(0x9c4);
1378 id->psd[0].enlat = cpu_to_le32(0x10);
1379 id->psd[0].exlat = cpu_to_le32(0x4);
30349fd0
CH
1380 if (blk_enable_write_cache(n->conf.blk)) {
1381 id->vwc = 1;
1382 }
f3c507ad
KB
1383
1384 n->bar.cap = 0;
1385 NVME_CAP_SET_MQES(n->bar.cap, 0x7ff);
1386 NVME_CAP_SET_CQR(n->bar.cap, 1);
1387 NVME_CAP_SET_AMS(n->bar.cap, 1);
1388 NVME_CAP_SET_TO(n->bar.cap, 0xf);
1389 NVME_CAP_SET_CSS(n->bar.cap, 1);
be0677a9 1390 NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
f3c507ad 1391
a896f7f2 1392 n->bar.vs = 0x00010200;
f3c507ad
KB
1393 n->bar.intmc = n->bar.intms = 0;
1394
a896f7f2
SB
1395 if (n->cmb_size_mb) {
1396
1397 NVME_CMBLOC_SET_BIR(n->bar.cmbloc, 2);
1398 NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0);
1399
1400 NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
1401 NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
1402 NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 0);
b2b2b67a
SB
1403 NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
1404 NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
a896f7f2
SB
1405 NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
1406 NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->cmb_size_mb);
1407
b2b2b67a
SB
1408 n->cmbloc = n->bar.cmbloc;
1409 n->cmbsz = n->bar.cmbsz;
1410
a896f7f2
SB
1411 n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
1412 memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
1413 "nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
a3d25ddd 1414 pci_register_bar(pci_dev, NVME_CMBLOC_BIR(n->bar.cmbloc),
a896f7f2
SB
1415 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64 |
1416 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem);
1417
1418 }
1419
f3c507ad
KB
1420 for (i = 0; i < n->num_namespaces; i++) {
1421 NvmeNamespace *ns = &n->namespaces[i];
1422 NvmeIdNs *id_ns = &ns->id_ns;
1423 id_ns->nsfeat = 0;
1424 id_ns->nlbaf = 0;
1425 id_ns->flbas = 0;
1426 id_ns->mc = 0;
1427 id_ns->dpc = 0;
1428 id_ns->dps = 0;
1429 id_ns->lbaf[0].ds = BDRV_SECTOR_BITS;
1430 id_ns->ncap = id_ns->nuse = id_ns->nsze =
1431 cpu_to_le64(n->ns_size >>
1432 id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas)].ds);
1433 }
f3c507ad
KB
1434}
1435
1436static void nvme_exit(PCIDevice *pci_dev)
1437{
1438 NvmeCtrl *n = NVME(pci_dev);
1439
1440 nvme_clear_ctrl(n);
1441 g_free(n->namespaces);
1442 g_free(n->cq);
1443 g_free(n->sq);
a896f7f2 1444
a883d6a0
LQ
1445 if (n->cmb_size_mb) {
1446 g_free(n->cmbuf);
1447 }
f3c507ad 1448 msix_uninit_exclusive_bar(pci_dev);
f3c507ad
KB
1449}
1450
1451static Property nvme_props[] = {
1452 DEFINE_BLOCK_PROPERTIES(NvmeCtrl, conf),
1453 DEFINE_PROP_STRING("serial", NvmeCtrl, serial),
a896f7f2 1454 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, cmb_size_mb, 0),
7c895269 1455 DEFINE_PROP_UINT32("num_queues", NvmeCtrl, num_queues, 64),
f3c507ad
KB
1456 DEFINE_PROP_END_OF_LIST(),
1457};
1458
1459static const VMStateDescription nvme_vmstate = {
1460 .name = "nvme",
1461 .unmigratable = 1,
1462};
1463
1464static void nvme_class_init(ObjectClass *oc, void *data)
1465{
1466 DeviceClass *dc = DEVICE_CLASS(oc);
1467 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1468
e01d6a41 1469 pc->realize = nvme_realize;
f3c507ad
KB
1470 pc->exit = nvme_exit;
1471 pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
1472 pc->vendor_id = PCI_VENDOR_ID_INTEL;
1473 pc->device_id = 0x5845;
47989f14 1474 pc->revision = 2;
f3c507ad 1475
125ee0ed 1476 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
f3c507ad
KB
1477 dc->desc = "Non-Volatile Memory Express";
1478 dc->props = nvme_props;
1479 dc->vmsd = &nvme_vmstate;
1480}
1481
a907ec52 1482static void nvme_instance_init(Object *obj)
33739c71
GA
1483{
1484 NvmeCtrl *s = NVME(obj);
33739c71 1485
a907ec52
LE
1486 device_add_bootindex_property(obj, &s->conf.bootindex,
1487 "bootindex", "/namespace@1,0",
1488 DEVICE(obj), &error_abort);
33739c71
GA
1489}
1490
f3c507ad 1491static const TypeInfo nvme_info = {
08db59e1 1492 .name = TYPE_NVME,
f3c507ad
KB
1493 .parent = TYPE_PCI_DEVICE,
1494 .instance_size = sizeof(NvmeCtrl),
1495 .class_init = nvme_class_init,
33739c71 1496 .instance_init = nvme_instance_init,
71d78767
EH
1497 .interfaces = (InterfaceInfo[]) {
1498 { INTERFACE_PCIE_DEVICE },
1499 { }
1500 },
f3c507ad
KB
1501};
1502
1503static void nvme_register_types(void)
1504{
1505 type_register_static(&nvme_info);
1506}
1507
1508type_init(nvme_register_types)