2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id>
21 * -device nvme,serial=<serial>,id=<bus_name>, \
22 * cmb_size_mb=<cmb_size_mb[optional]>, \
23 * [pmrdev=<mem_backend_file_id>,] \
24 * max_ioqpairs=<N[optional]>, \
25 * aerl=<N[optional]>,aer_max_queued=<N[optional]>, \
26 * mdts=<N[optional]>,zoned.zasl=<N[optional]>, \
28 * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
29 * zoned=<true|false[optional]>, \
32 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
33 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the
34 * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to
35 * always enable the CMBLOC and CMBSZ registers (v1.3 behavior).
37 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
39 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
40 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
42 * The PMR will use BAR 4/5 exclusively.
44 * To place controller(s) and namespace(s) to a subsystem, then provide
45 * nvme-subsys device as above.
47 * nvme subsystem device parameters
48 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
50 * This parameter provides the `<nqn_id>` part of the string
51 * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field
52 * of subsystem controllers. Note that `<nqn_id>` should be unique per
53 * subsystem, but this is not enforced by QEMU. If not specified, it will
54 * default to the value of the `id` parameter (`<subsys_id>`).
56 * nvme device parameters
57 * ~~~~~~~~~~~~~~~~~~~~~~
59 * Specifying this parameter attaches the controller to the subsystem and
60 * the SUBNQN field in the controller will report the NQN of the subsystem
61 * device. This also enables multi controller capability represented in
62 * Identify Controller data structure in CMIC (Controller Multi-path I/O and
63 * Namesapce Sharing Capabilities).
66 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
67 * of concurrently outstanding Asynchronous Event Request commands support
68 * by the controller. This is a 0's based value.
71 * This is the maximum number of events that the device will enqueue for
72 * completion when there are no outstanding AERs. When the maximum number of
73 * enqueued events are reached, subsequent events will be dropped.
76 * Indicates the maximum data transfer size for a command that transfers data
77 * between host-accessible memory and the controller. The value is specified
78 * as a power of two (2^n) and is in units of the minimum memory page size
79 * (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB).
82 * Indicates the maximum data transfer size for the Zone Append command. Like
83 * `mdts`, the value is specified as a power of two (2^n) and is in units of
84 * the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e.
85 * defaulting to the value of `mdts`).
87 * nvme namespace device parameters
88 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
90 * If given, the namespace will be attached to all controllers in the
91 * subsystem. Otherwise, `bus` must be given to attach this namespace to a
92 * specific controller as a non-shared namespace.
94 * Setting `zoned` to true selects Zoned Command Set at the namespace.
95 * In this case, the following namespace properties are available to configure
97 * zoned.zone_size=<zone size in bytes, default: 128MiB>
98 * The number may be followed by K, M, G as in kilo-, mega- or giga-.
100 * zoned.zone_capacity=<zone capacity in bytes, default: zone size>
101 * The value 0 (default) forces zone capacity to be the same as zone
102 * size. The value of this property may not exceed zone size.
104 * zoned.descr_ext_size=<zone descriptor extension size, default 0>
105 * This value needs to be specified in 64B units. If it is zero,
106 * namespace(s) will not support zone descriptor extensions.
108 * zoned.max_active=<Maximum Active Resources (zones), default: 0>
109 * The default value means there is no limit to the number of
110 * concurrently active zones.
112 * zoned.max_open=<Maximum Open Resources (zones), default: 0>
113 * The default value means there is no limit to the number of
114 * concurrently open zones.
116 * zoned.cross_read=<enable RAZB, default: false>
117 * Setting this property to true enables Read Across Zone Boundaries.
120 #include "qemu/osdep.h"
121 #include "qemu/units.h"
122 #include "qemu/error-report.h"
123 #include "hw/block/block.h"
124 #include "hw/pci/msix.h"
125 #include "hw/pci/pci.h"
126 #include "hw/qdev-properties.h"
127 #include "migration/vmstate.h"
128 #include "sysemu/sysemu.h"
129 #include "qapi/error.h"
130 #include "qapi/visitor.h"
131 #include "sysemu/hostmem.h"
132 #include "sysemu/block-backend.h"
133 #include "exec/memory.h"
134 #include "qemu/log.h"
135 #include "qemu/module.h"
136 #include "qemu/cutils.h"
141 #define NVME_MAX_IOQPAIRS 0xffff
142 #define NVME_DB_SIZE 4
143 #define NVME_SPEC_VER 0x00010400
144 #define NVME_CMB_BIR 2
145 #define NVME_PMR_BIR 4
146 #define NVME_TEMPERATURE 0x143
147 #define NVME_TEMPERATURE_WARNING 0x157
148 #define NVME_TEMPERATURE_CRITICAL 0x175
149 #define NVME_NUM_FW_SLOTS 1
151 #define NVME_GUEST_ERR(trace, fmt, ...) \
153 (trace_##trace)(__VA_ARGS__); \
154 qemu_log_mask(LOG_GUEST_ERROR, #trace \
155 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
158 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
159 [NVME_ARBITRATION
] = true,
160 [NVME_POWER_MANAGEMENT
] = true,
161 [NVME_TEMPERATURE_THRESHOLD
] = true,
162 [NVME_ERROR_RECOVERY
] = true,
163 [NVME_VOLATILE_WRITE_CACHE
] = true,
164 [NVME_NUMBER_OF_QUEUES
] = true,
165 [NVME_INTERRUPT_COALESCING
] = true,
166 [NVME_INTERRUPT_VECTOR_CONF
] = true,
167 [NVME_WRITE_ATOMICITY
] = true,
168 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
169 [NVME_TIMESTAMP
] = true,
172 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
173 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
174 [NVME_ERROR_RECOVERY
] = NVME_FEAT_CAP_CHANGE
| NVME_FEAT_CAP_NS
,
175 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
176 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
177 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
178 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
181 static const uint32_t nvme_cse_acs
[256] = {
182 [NVME_ADM_CMD_DELETE_SQ
] = NVME_CMD_EFF_CSUPP
,
183 [NVME_ADM_CMD_CREATE_SQ
] = NVME_CMD_EFF_CSUPP
,
184 [NVME_ADM_CMD_GET_LOG_PAGE
] = NVME_CMD_EFF_CSUPP
,
185 [NVME_ADM_CMD_DELETE_CQ
] = NVME_CMD_EFF_CSUPP
,
186 [NVME_ADM_CMD_CREATE_CQ
] = NVME_CMD_EFF_CSUPP
,
187 [NVME_ADM_CMD_IDENTIFY
] = NVME_CMD_EFF_CSUPP
,
188 [NVME_ADM_CMD_ABORT
] = NVME_CMD_EFF_CSUPP
,
189 [NVME_ADM_CMD_SET_FEATURES
] = NVME_CMD_EFF_CSUPP
,
190 [NVME_ADM_CMD_GET_FEATURES
] = NVME_CMD_EFF_CSUPP
,
191 [NVME_ADM_CMD_ASYNC_EV_REQ
] = NVME_CMD_EFF_CSUPP
,
194 static const uint32_t nvme_cse_iocs_none
[256];
196 static const uint32_t nvme_cse_iocs_nvm
[256] = {
197 [NVME_CMD_FLUSH
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
198 [NVME_CMD_WRITE_ZEROES
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
199 [NVME_CMD_WRITE
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
200 [NVME_CMD_READ
] = NVME_CMD_EFF_CSUPP
,
201 [NVME_CMD_DSM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
202 [NVME_CMD_COPY
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
203 [NVME_CMD_COMPARE
] = NVME_CMD_EFF_CSUPP
,
206 static const uint32_t nvme_cse_iocs_zoned
[256] = {
207 [NVME_CMD_FLUSH
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
208 [NVME_CMD_WRITE_ZEROES
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
209 [NVME_CMD_WRITE
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
210 [NVME_CMD_READ
] = NVME_CMD_EFF_CSUPP
,
211 [NVME_CMD_DSM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
212 [NVME_CMD_COPY
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
213 [NVME_CMD_COMPARE
] = NVME_CMD_EFF_CSUPP
,
214 [NVME_CMD_ZONE_APPEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
215 [NVME_CMD_ZONE_MGMT_SEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
216 [NVME_CMD_ZONE_MGMT_RECV
] = NVME_CMD_EFF_CSUPP
,
219 static void nvme_process_sq(void *opaque
);
221 static uint16_t nvme_cid(NvmeRequest
*req
)
227 return le16_to_cpu(req
->cqe
.cid
);
230 static uint16_t nvme_sqid(NvmeRequest
*req
)
232 return le16_to_cpu(req
->sq
->sqid
);
235 static void nvme_assign_zone_state(NvmeNamespace
*ns
, NvmeZone
*zone
,
238 if (QTAILQ_IN_USE(zone
, entry
)) {
239 switch (nvme_get_zone_state(zone
)) {
240 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
241 QTAILQ_REMOVE(&ns
->exp_open_zones
, zone
, entry
);
243 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
244 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
246 case NVME_ZONE_STATE_CLOSED
:
247 QTAILQ_REMOVE(&ns
->closed_zones
, zone
, entry
);
249 case NVME_ZONE_STATE_FULL
:
250 QTAILQ_REMOVE(&ns
->full_zones
, zone
, entry
);
256 nvme_set_zone_state(zone
, state
);
259 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
260 QTAILQ_INSERT_TAIL(&ns
->exp_open_zones
, zone
, entry
);
262 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
263 QTAILQ_INSERT_TAIL(&ns
->imp_open_zones
, zone
, entry
);
265 case NVME_ZONE_STATE_CLOSED
:
266 QTAILQ_INSERT_TAIL(&ns
->closed_zones
, zone
, entry
);
268 case NVME_ZONE_STATE_FULL
:
269 QTAILQ_INSERT_TAIL(&ns
->full_zones
, zone
, entry
);
270 case NVME_ZONE_STATE_READ_ONLY
:
278 * Check if we can open a zone without exceeding open/active limits.
279 * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5).
281 static int nvme_aor_check(NvmeNamespace
*ns
, uint32_t act
, uint32_t opn
)
283 if (ns
->params
.max_active_zones
!= 0 &&
284 ns
->nr_active_zones
+ act
> ns
->params
.max_active_zones
) {
285 trace_pci_nvme_err_insuff_active_res(ns
->params
.max_active_zones
);
286 return NVME_ZONE_TOO_MANY_ACTIVE
| NVME_DNR
;
288 if (ns
->params
.max_open_zones
!= 0 &&
289 ns
->nr_open_zones
+ opn
> ns
->params
.max_open_zones
) {
290 trace_pci_nvme_err_insuff_open_res(ns
->params
.max_open_zones
);
291 return NVME_ZONE_TOO_MANY_OPEN
| NVME_DNR
;
297 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
305 lo
= n
->params
.legacy_cmb
? n
->cmb
.mem
.addr
: n
->cmb
.cba
;
306 hi
= lo
+ int128_get64(n
->cmb
.mem
.size
);
308 return addr
>= lo
&& addr
< hi
;
311 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
313 hwaddr base
= n
->params
.legacy_cmb
? n
->cmb
.mem
.addr
: n
->cmb
.cba
;
314 return &n
->cmb
.buf
[addr
- base
];
317 static bool nvme_addr_is_pmr(NvmeCtrl
*n
, hwaddr addr
)
325 hi
= n
->pmr
.cba
+ int128_get64(n
->pmr
.dev
->mr
.size
);
327 return addr
>= n
->pmr
.cba
&& addr
< hi
;
330 static inline void *nvme_addr_to_pmr(NvmeCtrl
*n
, hwaddr addr
)
332 return memory_region_get_ram_ptr(&n
->pmr
.dev
->mr
) + (addr
- n
->pmr
.cba
);
335 static int nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
337 hwaddr hi
= addr
+ size
- 1;
342 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
343 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
347 if (nvme_addr_is_pmr(n
, addr
) && nvme_addr_is_pmr(n
, hi
)) {
348 memcpy(buf
, nvme_addr_to_pmr(n
, addr
), size
);
352 return pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
355 static bool nvme_nsid_valid(NvmeCtrl
*n
, uint32_t nsid
)
357 return nsid
&& (nsid
== NVME_NSID_BROADCAST
|| nsid
<= n
->num_namespaces
);
360 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
362 return sqid
< n
->params
.max_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
365 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
367 return cqid
< n
->params
.max_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
370 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
373 if (cq
->tail
>= cq
->size
) {
375 cq
->phase
= !cq
->phase
;
379 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
381 sq
->head
= (sq
->head
+ 1) % sq
->size
;
384 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
386 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
389 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
391 return sq
->head
== sq
->tail
;
394 static void nvme_irq_check(NvmeCtrl
*n
)
396 if (msix_enabled(&(n
->parent_obj
))) {
399 if (~n
->bar
.intms
& n
->irq_status
) {
400 pci_irq_assert(&n
->parent_obj
);
402 pci_irq_deassert(&n
->parent_obj
);
406 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
408 if (cq
->irq_enabled
) {
409 if (msix_enabled(&(n
->parent_obj
))) {
410 trace_pci_nvme_irq_msix(cq
->vector
);
411 msix_notify(&(n
->parent_obj
), cq
->vector
);
413 trace_pci_nvme_irq_pin();
414 assert(cq
->vector
< 32);
415 n
->irq_status
|= 1 << cq
->vector
;
419 trace_pci_nvme_irq_masked();
423 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
425 if (cq
->irq_enabled
) {
426 if (msix_enabled(&(n
->parent_obj
))) {
429 assert(cq
->vector
< 32);
430 n
->irq_status
&= ~(1 << cq
->vector
);
436 static void nvme_req_clear(NvmeRequest
*req
)
440 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
441 req
->status
= NVME_SUCCESS
;
444 static void nvme_req_exit(NvmeRequest
*req
)
447 qemu_sglist_destroy(&req
->qsg
);
451 qemu_iovec_destroy(&req
->iov
);
455 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
462 trace_pci_nvme_map_addr_cmb(addr
, len
);
464 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
465 return NVME_DATA_TRAS_ERROR
;
468 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
473 static uint16_t nvme_map_addr_pmr(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
480 if (!nvme_addr_is_pmr(n
, addr
) || !nvme_addr_is_pmr(n
, addr
+ len
- 1)) {
481 return NVME_DATA_TRAS_ERROR
;
484 qemu_iovec_add(iov
, nvme_addr_to_pmr(n
, addr
), len
);
489 static uint16_t nvme_map_addr(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
490 hwaddr addr
, size_t len
)
492 bool cmb
= false, pmr
= false;
498 trace_pci_nvme_map_addr(addr
, len
);
500 if (nvme_addr_is_cmb(n
, addr
)) {
502 } else if (nvme_addr_is_pmr(n
, addr
)) {
507 if (qsg
&& qsg
->sg
) {
508 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
514 qemu_iovec_init(iov
, 1);
518 return nvme_map_addr_cmb(n
, iov
, addr
, len
);
520 return nvme_map_addr_pmr(n
, iov
, addr
, len
);
524 if (iov
&& iov
->iov
) {
525 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
531 pci_dma_sglist_init(qsg
, &n
->parent_obj
, 1);
534 qemu_sglist_add(qsg
, addr
, len
);
539 static uint16_t nvme_map_prp(NvmeCtrl
*n
, uint64_t prp1
, uint64_t prp2
,
540 uint32_t len
, NvmeRequest
*req
)
542 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
543 trans_len
= MIN(len
, trans_len
);
544 int num_prps
= (len
>> n
->page_bits
) + 1;
548 QEMUSGList
*qsg
= &req
->qsg
;
549 QEMUIOVector
*iov
= &req
->iov
;
551 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
553 if (nvme_addr_is_cmb(n
, prp1
) || (nvme_addr_is_pmr(n
, prp1
))) {
554 qemu_iovec_init(iov
, num_prps
);
556 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
559 status
= nvme_map_addr(n
, qsg
, iov
, prp1
, trans_len
);
566 if (len
> n
->page_size
) {
567 uint64_t prp_list
[n
->max_prp_ents
];
568 uint32_t nents
, prp_trans
;
571 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
572 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
573 ret
= nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
575 trace_pci_nvme_err_addr_read(prp2
);
576 return NVME_DATA_TRAS_ERROR
;
579 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
581 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
582 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
583 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
584 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
588 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
589 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
590 ret
= nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
593 trace_pci_nvme_err_addr_read(prp_ent
);
594 return NVME_DATA_TRAS_ERROR
;
596 prp_ent
= le64_to_cpu(prp_list
[i
]);
599 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
600 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
601 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
604 trans_len
= MIN(len
, n
->page_size
);
605 status
= nvme_map_addr(n
, qsg
, iov
, prp_ent
, trans_len
);
614 if (unlikely(prp2
& (n
->page_size
- 1))) {
615 trace_pci_nvme_err_invalid_prp2_align(prp2
);
616 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
618 status
= nvme_map_addr(n
, qsg
, iov
, prp2
, len
);
629 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
630 * number of bytes mapped in len.
632 static uint16_t nvme_map_sgl_data(NvmeCtrl
*n
, QEMUSGList
*qsg
,
634 NvmeSglDescriptor
*segment
, uint64_t nsgld
,
635 size_t *len
, NvmeRequest
*req
)
637 dma_addr_t addr
, trans_len
;
641 for (int i
= 0; i
< nsgld
; i
++) {
642 uint8_t type
= NVME_SGL_TYPE(segment
[i
].type
);
645 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
646 if (req
->cmd
.opcode
== NVME_CMD_WRITE
) {
649 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
651 case NVME_SGL_DESCR_TYPE_SEGMENT
:
652 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
653 return NVME_INVALID_NUM_SGL_DESCRS
| NVME_DNR
;
655 return NVME_SGL_DESCR_TYPE_INVALID
| NVME_DNR
;
658 dlen
= le32_to_cpu(segment
[i
].len
);
666 * All data has been mapped, but the SGL contains additional
667 * segments and/or descriptors. The controller might accept
668 * ignoring the rest of the SGL.
670 uint32_t sgls
= le32_to_cpu(n
->id_ctrl
.sgls
);
671 if (sgls
& NVME_CTRL_SGLS_EXCESS_LENGTH
) {
675 trace_pci_nvme_err_invalid_sgl_excess_length(nvme_cid(req
));
676 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
679 trans_len
= MIN(*len
, dlen
);
681 if (type
== NVME_SGL_DESCR_TYPE_BIT_BUCKET
) {
685 addr
= le64_to_cpu(segment
[i
].addr
);
687 if (UINT64_MAX
- addr
< dlen
) {
688 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
691 status
= nvme_map_addr(n
, qsg
, iov
, addr
, trans_len
);
703 static uint16_t nvme_map_sgl(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
704 NvmeSglDescriptor sgl
, size_t len
,
708 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
709 * dynamically allocating a potentially huge SGL. The spec allows the SGL
710 * to be larger (as in number of bytes required to describe the SGL
711 * descriptors and segment chain) than the command transfer size, so it is
712 * not bounded by MDTS.
714 const int SEG_CHUNK_SIZE
= 256;
716 NvmeSglDescriptor segment
[SEG_CHUNK_SIZE
], *sgld
, *last_sgld
;
724 addr
= le64_to_cpu(sgl
.addr
);
726 trace_pci_nvme_map_sgl(nvme_cid(req
), NVME_SGL_TYPE(sgl
.type
), len
);
729 * If the entire transfer can be described with a single data block it can
730 * be mapped directly.
732 if (NVME_SGL_TYPE(sgl
.type
) == NVME_SGL_DESCR_TYPE_DATA_BLOCK
) {
733 status
= nvme_map_sgl_data(n
, qsg
, iov
, sgld
, 1, &len
, req
);
742 switch (NVME_SGL_TYPE(sgld
->type
)) {
743 case NVME_SGL_DESCR_TYPE_SEGMENT
:
744 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
747 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
750 seg_len
= le32_to_cpu(sgld
->len
);
752 /* check the length of the (Last) Segment descriptor */
753 if ((!seg_len
|| seg_len
& 0xf) &&
754 (NVME_SGL_TYPE(sgld
->type
) != NVME_SGL_DESCR_TYPE_BIT_BUCKET
)) {
755 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
758 if (UINT64_MAX
- addr
< seg_len
) {
759 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
762 nsgld
= seg_len
/ sizeof(NvmeSglDescriptor
);
764 while (nsgld
> SEG_CHUNK_SIZE
) {
765 if (nvme_addr_read(n
, addr
, segment
, sizeof(segment
))) {
766 trace_pci_nvme_err_addr_read(addr
);
767 status
= NVME_DATA_TRAS_ERROR
;
771 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, SEG_CHUNK_SIZE
,
777 nsgld
-= SEG_CHUNK_SIZE
;
778 addr
+= SEG_CHUNK_SIZE
* sizeof(NvmeSglDescriptor
);
781 ret
= nvme_addr_read(n
, addr
, segment
, nsgld
*
782 sizeof(NvmeSglDescriptor
));
784 trace_pci_nvme_err_addr_read(addr
);
785 status
= NVME_DATA_TRAS_ERROR
;
789 last_sgld
= &segment
[nsgld
- 1];
792 * If the segment ends with a Data Block or Bit Bucket Descriptor Type,
795 switch (NVME_SGL_TYPE(last_sgld
->type
)) {
796 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
797 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
798 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
, &len
, req
);
810 * If the last descriptor was not a Data Block or Bit Bucket, then the
811 * current segment must not be a Last Segment.
813 if (NVME_SGL_TYPE(sgld
->type
) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT
) {
814 status
= NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
819 addr
= le64_to_cpu(sgld
->addr
);
822 * Do not map the last descriptor; it will be a Segment or Last Segment
823 * descriptor and is handled by the next iteration.
825 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
- 1, &len
, req
);
832 /* if there is any residual left in len, the SGL was too short */
834 status
= NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
842 qemu_iovec_destroy(iov
);
846 qemu_sglist_destroy(qsg
);
852 static uint16_t nvme_map_dptr(NvmeCtrl
*n
, size_t len
, NvmeRequest
*req
)
856 switch (NVME_CMD_FLAGS_PSDT(req
->cmd
.flags
)) {
858 prp1
= le64_to_cpu(req
->cmd
.dptr
.prp1
);
859 prp2
= le64_to_cpu(req
->cmd
.dptr
.prp2
);
861 return nvme_map_prp(n
, prp1
, prp2
, len
, req
);
862 case NVME_PSDT_SGL_MPTR_CONTIGUOUS
:
863 case NVME_PSDT_SGL_MPTR_SGL
:
864 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
865 if (!req
->sq
->sqid
) {
866 return NVME_INVALID_FIELD
| NVME_DNR
;
869 return nvme_map_sgl(n
, &req
->qsg
, &req
->iov
, req
->cmd
.dptr
.sgl
, len
,
872 return NVME_INVALID_FIELD
;
876 static uint16_t nvme_dma(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
877 DMADirection dir
, NvmeRequest
*req
)
879 uint16_t status
= NVME_SUCCESS
;
881 status
= nvme_map_dptr(n
, len
, req
);
886 /* assert that only one of qsg and iov carries data */
887 assert((req
->qsg
.nsg
> 0) != (req
->iov
.niov
> 0));
889 if (req
->qsg
.nsg
> 0) {
892 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
893 residual
= dma_buf_write(ptr
, len
, &req
->qsg
);
895 residual
= dma_buf_read(ptr
, len
, &req
->qsg
);
898 if (unlikely(residual
)) {
899 trace_pci_nvme_err_invalid_dma();
900 status
= NVME_INVALID_FIELD
| NVME_DNR
;
905 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
906 bytes
= qemu_iovec_to_buf(&req
->iov
, 0, ptr
, len
);
908 bytes
= qemu_iovec_from_buf(&req
->iov
, 0, ptr
, len
);
911 if (unlikely(bytes
!= len
)) {
912 trace_pci_nvme_err_invalid_dma();
913 status
= NVME_INVALID_FIELD
| NVME_DNR
;
920 static void nvme_post_cqes(void *opaque
)
922 NvmeCQueue
*cq
= opaque
;
923 NvmeCtrl
*n
= cq
->ctrl
;
924 NvmeRequest
*req
, *next
;
927 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
931 if (nvme_cq_full(cq
)) {
936 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
937 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
938 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
939 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
940 ret
= pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
943 trace_pci_nvme_err_addr_write(addr
);
944 trace_pci_nvme_err_cfs();
945 n
->bar
.csts
= NVME_CSTS_FAILED
;
948 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
949 nvme_inc_cq_tail(cq
);
951 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
953 if (cq
->tail
!= cq
->head
) {
954 nvme_irq_assert(n
, cq
);
958 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
960 assert(cq
->cqid
== req
->sq
->cqid
);
961 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
965 trace_pci_nvme_err_req_status(nvme_cid(req
), nvme_nsid(req
->ns
),
966 req
->status
, req
->cmd
.opcode
);
969 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
970 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
971 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
974 static void nvme_process_aers(void *opaque
)
976 NvmeCtrl
*n
= opaque
;
977 NvmeAsyncEvent
*event
, *next
;
979 trace_pci_nvme_process_aers(n
->aer_queued
);
981 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
983 NvmeAerResult
*result
;
985 /* can't post cqe if there is nothing to complete */
986 if (!n
->outstanding_aers
) {
987 trace_pci_nvme_no_outstanding_aers();
991 /* ignore if masked (cqe posted, but event not cleared) */
992 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
993 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
997 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
1000 n
->aer_mask
|= 1 << event
->result
.event_type
;
1001 n
->outstanding_aers
--;
1003 req
= n
->aer_reqs
[n
->outstanding_aers
];
1005 result
= (NvmeAerResult
*) &req
->cqe
.result
;
1006 result
->event_type
= event
->result
.event_type
;
1007 result
->event_info
= event
->result
.event_info
;
1008 result
->log_page
= event
->result
.log_page
;
1011 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
1014 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
1018 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
1019 uint8_t event_info
, uint8_t log_page
)
1021 NvmeAsyncEvent
*event
;
1023 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
1025 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
1026 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
1030 event
= g_new(NvmeAsyncEvent
, 1);
1031 event
->result
= (NvmeAerResult
) {
1032 .event_type
= event_type
,
1033 .event_info
= event_info
,
1034 .log_page
= log_page
,
1037 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
1040 nvme_process_aers(n
);
1043 static void nvme_smart_event(NvmeCtrl
*n
, uint8_t event
)
1047 /* Ref SPEC <Asynchronous Event Information 0x2013 SMART / Health Status> */
1048 if (!(NVME_AEC_SMART(n
->features
.async_config
) & event
)) {
1053 case NVME_SMART_SPARE
:
1054 aer_info
= NVME_AER_INFO_SMART_SPARE_THRESH
;
1056 case NVME_SMART_TEMPERATURE
:
1057 aer_info
= NVME_AER_INFO_SMART_TEMP_THRESH
;
1059 case NVME_SMART_RELIABILITY
:
1060 case NVME_SMART_MEDIA_READ_ONLY
:
1061 case NVME_SMART_FAILED_VOLATILE_MEDIA
:
1062 case NVME_SMART_PMR_UNRELIABLE
:
1063 aer_info
= NVME_AER_INFO_SMART_RELIABILITY
;
1069 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
, aer_info
, NVME_LOG_SMART_INFO
);
1072 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
1074 n
->aer_mask
&= ~(1 << event_type
);
1075 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1076 nvme_process_aers(n
);
1080 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
1082 uint8_t mdts
= n
->params
.mdts
;
1084 if (mdts
&& len
> n
->page_size
<< mdts
) {
1085 trace_pci_nvme_err_mdts(len
);
1086 return NVME_INVALID_FIELD
| NVME_DNR
;
1089 return NVME_SUCCESS
;
1092 static inline uint16_t nvme_check_bounds(NvmeNamespace
*ns
, uint64_t slba
,
1095 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
1097 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
1098 return NVME_LBA_RANGE
| NVME_DNR
;
1101 return NVME_SUCCESS
;
1104 static uint16_t nvme_check_dulbe(NvmeNamespace
*ns
, uint64_t slba
,
1107 BlockDriverState
*bs
= blk_bs(ns
->blkconf
.blk
);
1109 int64_t pnum
= 0, bytes
= nvme_l2b(ns
, nlb
);
1110 int64_t offset
= nvme_l2b(ns
, slba
);
1114 Error
*local_err
= NULL
;
1117 * `pnum` holds the number of bytes after offset that shares the same
1118 * allocation status as the byte at offset. If `pnum` is different from
1119 * `bytes`, we should check the allocation status of the next range and
1120 * continue this until all bytes have been checked.
1125 ret
= bdrv_block_status(bs
, offset
, bytes
, &pnum
, NULL
, NULL
);
1127 error_setg_errno(&local_err
, -ret
, "unable to get block status");
1128 error_report_err(local_err
);
1130 return NVME_INTERNAL_DEV_ERROR
;
1133 zeroed
= !!(ret
& BDRV_BLOCK_ZERO
);
1135 trace_pci_nvme_block_status(offset
, bytes
, pnum
, ret
, zeroed
);
1142 } while (pnum
!= bytes
);
1144 return NVME_SUCCESS
;
1147 static void nvme_aio_err(NvmeRequest
*req
, int ret
)
1149 uint16_t status
= NVME_SUCCESS
;
1150 Error
*local_err
= NULL
;
1152 switch (req
->cmd
.opcode
) {
1154 status
= NVME_UNRECOVERED_READ
;
1156 case NVME_CMD_FLUSH
:
1157 case NVME_CMD_WRITE
:
1158 case NVME_CMD_WRITE_ZEROES
:
1159 case NVME_CMD_ZONE_APPEND
:
1160 status
= NVME_WRITE_FAULT
;
1163 status
= NVME_INTERNAL_DEV_ERROR
;
1167 trace_pci_nvme_err_aio(nvme_cid(req
), strerror(ret
), status
);
1169 error_setg_errno(&local_err
, -ret
, "aio failed");
1170 error_report_err(local_err
);
1173 * Set the command status code to the first encountered error but allow a
1174 * subsequent Internal Device Error to trump it.
1176 if (req
->status
&& status
!= NVME_INTERNAL_DEV_ERROR
) {
1180 req
->status
= status
;
1183 static inline uint32_t nvme_zone_idx(NvmeNamespace
*ns
, uint64_t slba
)
1185 return ns
->zone_size_log2
> 0 ? slba
>> ns
->zone_size_log2
:
1186 slba
/ ns
->zone_size
;
1189 static inline NvmeZone
*nvme_get_zone_by_slba(NvmeNamespace
*ns
, uint64_t slba
)
1191 uint32_t zone_idx
= nvme_zone_idx(ns
, slba
);
1193 assert(zone_idx
< ns
->num_zones
);
1194 return &ns
->zone_array
[zone_idx
];
1197 static uint16_t nvme_check_zone_state_for_write(NvmeZone
*zone
)
1199 uint64_t zslba
= zone
->d
.zslba
;
1201 switch (nvme_get_zone_state(zone
)) {
1202 case NVME_ZONE_STATE_EMPTY
:
1203 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1204 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1205 case NVME_ZONE_STATE_CLOSED
:
1206 return NVME_SUCCESS
;
1207 case NVME_ZONE_STATE_FULL
:
1208 trace_pci_nvme_err_zone_is_full(zslba
);
1209 return NVME_ZONE_FULL
;
1210 case NVME_ZONE_STATE_OFFLINE
:
1211 trace_pci_nvme_err_zone_is_offline(zslba
);
1212 return NVME_ZONE_OFFLINE
;
1213 case NVME_ZONE_STATE_READ_ONLY
:
1214 trace_pci_nvme_err_zone_is_read_only(zslba
);
1215 return NVME_ZONE_READ_ONLY
;
1220 return NVME_INTERNAL_DEV_ERROR
;
1223 static uint16_t nvme_check_zone_write(NvmeNamespace
*ns
, NvmeZone
*zone
,
1224 uint64_t slba
, uint32_t nlb
)
1226 uint64_t zcap
= nvme_zone_wr_boundary(zone
);
1229 status
= nvme_check_zone_state_for_write(zone
);
1234 if (unlikely(slba
!= zone
->w_ptr
)) {
1235 trace_pci_nvme_err_write_not_at_wp(slba
, zone
->d
.zslba
, zone
->w_ptr
);
1236 return NVME_ZONE_INVALID_WRITE
;
1239 if (unlikely((slba
+ nlb
) > zcap
)) {
1240 trace_pci_nvme_err_zone_boundary(slba
, nlb
, zcap
);
1241 return NVME_ZONE_BOUNDARY_ERROR
;
1244 return NVME_SUCCESS
;
1247 static uint16_t nvme_check_zone_state_for_read(NvmeZone
*zone
)
1249 switch (nvme_get_zone_state(zone
)) {
1250 case NVME_ZONE_STATE_EMPTY
:
1251 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1252 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1253 case NVME_ZONE_STATE_FULL
:
1254 case NVME_ZONE_STATE_CLOSED
:
1255 case NVME_ZONE_STATE_READ_ONLY
:
1256 return NVME_SUCCESS
;
1257 case NVME_ZONE_STATE_OFFLINE
:
1258 trace_pci_nvme_err_zone_is_offline(zone
->d
.zslba
);
1259 return NVME_ZONE_OFFLINE
;
1264 return NVME_INTERNAL_DEV_ERROR
;
1267 static uint16_t nvme_check_zone_read(NvmeNamespace
*ns
, uint64_t slba
,
1270 NvmeZone
*zone
= nvme_get_zone_by_slba(ns
, slba
);
1271 uint64_t bndry
= nvme_zone_rd_boundary(ns
, zone
);
1272 uint64_t end
= slba
+ nlb
;
1275 status
= nvme_check_zone_state_for_read(zone
);
1278 } else if (unlikely(end
> bndry
)) {
1279 if (!ns
->params
.cross_zone_read
) {
1280 status
= NVME_ZONE_BOUNDARY_ERROR
;
1283 * Read across zone boundary - check that all subsequent
1284 * zones that are being read have an appropriate state.
1288 status
= nvme_check_zone_state_for_read(zone
);
1292 } while (end
> nvme_zone_rd_boundary(ns
, zone
));
1299 static uint16_t nvme_zrm_finish(NvmeNamespace
*ns
, NvmeZone
*zone
)
1301 switch (nvme_get_zone_state(zone
)) {
1302 case NVME_ZONE_STATE_FULL
:
1303 return NVME_SUCCESS
;
1305 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1306 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1307 nvme_aor_dec_open(ns
);
1309 case NVME_ZONE_STATE_CLOSED
:
1310 nvme_aor_dec_active(ns
);
1312 case NVME_ZONE_STATE_EMPTY
:
1313 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_FULL
);
1314 return NVME_SUCCESS
;
1317 return NVME_ZONE_INVAL_TRANSITION
;
1321 static uint16_t nvme_zrm_close(NvmeNamespace
*ns
, NvmeZone
*zone
)
1323 switch (nvme_get_zone_state(zone
)) {
1324 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1325 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1326 nvme_aor_dec_open(ns
);
1327 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
1329 case NVME_ZONE_STATE_CLOSED
:
1330 return NVME_SUCCESS
;
1333 return NVME_ZONE_INVAL_TRANSITION
;
1337 static void nvme_zrm_auto_transition_zone(NvmeNamespace
*ns
)
1341 if (ns
->params
.max_open_zones
&&
1342 ns
->nr_open_zones
== ns
->params
.max_open_zones
) {
1343 zone
= QTAILQ_FIRST(&ns
->imp_open_zones
);
1346 * Automatically close this implicitly open zone.
1348 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
1349 nvme_zrm_close(ns
, zone
);
1354 static uint16_t __nvme_zrm_open(NvmeNamespace
*ns
, NvmeZone
*zone
,
1360 switch (nvme_get_zone_state(zone
)) {
1361 case NVME_ZONE_STATE_EMPTY
:
1366 case NVME_ZONE_STATE_CLOSED
:
1367 nvme_zrm_auto_transition_zone(ns
);
1368 status
= nvme_aor_check(ns
, act
, 1);
1374 nvme_aor_inc_active(ns
);
1377 nvme_aor_inc_open(ns
);
1380 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_IMPLICITLY_OPEN
);
1381 return NVME_SUCCESS
;
1386 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1388 return NVME_SUCCESS
;
1391 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_EXPLICITLY_OPEN
);
1395 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1396 return NVME_SUCCESS
;
1399 return NVME_ZONE_INVAL_TRANSITION
;
1403 static inline uint16_t nvme_zrm_auto(NvmeNamespace
*ns
, NvmeZone
*zone
)
1405 return __nvme_zrm_open(ns
, zone
, true);
1408 static inline uint16_t nvme_zrm_open(NvmeNamespace
*ns
, NvmeZone
*zone
)
1410 return __nvme_zrm_open(ns
, zone
, false);
1413 static void __nvme_advance_zone_wp(NvmeNamespace
*ns
, NvmeZone
*zone
,
1418 if (zone
->d
.wp
== nvme_zone_wr_boundary(zone
)) {
1419 nvme_zrm_finish(ns
, zone
);
1423 static void nvme_finalize_zoned_write(NvmeNamespace
*ns
, NvmeRequest
*req
)
1425 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1430 slba
= le64_to_cpu(rw
->slba
);
1431 nlb
= le16_to_cpu(rw
->nlb
) + 1;
1432 zone
= nvme_get_zone_by_slba(ns
, slba
);
1434 __nvme_advance_zone_wp(ns
, zone
, nlb
);
1437 static inline bool nvme_is_write(NvmeRequest
*req
)
1439 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1441 return rw
->opcode
== NVME_CMD_WRITE
||
1442 rw
->opcode
== NVME_CMD_ZONE_APPEND
||
1443 rw
->opcode
== NVME_CMD_WRITE_ZEROES
;
1446 static void nvme_rw_cb(void *opaque
, int ret
)
1448 NvmeRequest
*req
= opaque
;
1449 NvmeNamespace
*ns
= req
->ns
;
1451 BlockBackend
*blk
= ns
->blkconf
.blk
;
1452 BlockAcctCookie
*acct
= &req
->acct
;
1453 BlockAcctStats
*stats
= blk_get_stats(blk
);
1455 trace_pci_nvme_rw_cb(nvme_cid(req
), blk_name(blk
));
1457 if (ns
->params
.zoned
&& nvme_is_write(req
)) {
1458 nvme_finalize_zoned_write(ns
, req
);
1462 block_acct_done(stats
, acct
);
1464 block_acct_failed(stats
, acct
);
1465 nvme_aio_err(req
, ret
);
1468 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1471 struct nvme_aio_flush_ctx
{
1474 BlockAcctCookie acct
;
1477 static void nvme_aio_flush_cb(void *opaque
, int ret
)
1479 struct nvme_aio_flush_ctx
*ctx
= opaque
;
1480 NvmeRequest
*req
= ctx
->req
;
1481 uintptr_t *num_flushes
= (uintptr_t *)&req
->opaque
;
1483 BlockBackend
*blk
= ctx
->ns
->blkconf
.blk
;
1484 BlockAcctCookie
*acct
= &ctx
->acct
;
1485 BlockAcctStats
*stats
= blk_get_stats(blk
);
1487 trace_pci_nvme_aio_flush_cb(nvme_cid(req
), blk_name(blk
));
1490 block_acct_done(stats
, acct
);
1492 block_acct_failed(stats
, acct
);
1493 nvme_aio_err(req
, ret
);
1503 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1506 static void nvme_aio_discard_cb(void *opaque
, int ret
)
1508 NvmeRequest
*req
= opaque
;
1509 uintptr_t *discards
= (uintptr_t *)&req
->opaque
;
1511 trace_pci_nvme_aio_discard_cb(nvme_cid(req
));
1514 nvme_aio_err(req
, ret
);
1523 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1526 struct nvme_zone_reset_ctx
{
1531 static void nvme_aio_zone_reset_cb(void *opaque
, int ret
)
1533 struct nvme_zone_reset_ctx
*ctx
= opaque
;
1534 NvmeRequest
*req
= ctx
->req
;
1535 NvmeNamespace
*ns
= req
->ns
;
1536 NvmeZone
*zone
= ctx
->zone
;
1537 uintptr_t *resets
= (uintptr_t *)&req
->opaque
;
1541 trace_pci_nvme_aio_zone_reset_cb(nvme_cid(req
), zone
->d
.zslba
);
1544 switch (nvme_get_zone_state(zone
)) {
1545 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1546 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1547 nvme_aor_dec_open(ns
);
1549 case NVME_ZONE_STATE_CLOSED
:
1550 nvme_aor_dec_active(ns
);
1552 case NVME_ZONE_STATE_FULL
:
1553 zone
->w_ptr
= zone
->d
.zslba
;
1554 zone
->d
.wp
= zone
->w_ptr
;
1555 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_EMPTY
);
1561 nvme_aio_err(req
, ret
);
1570 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1573 struct nvme_copy_ctx
{
1579 struct nvme_copy_in_ctx
{
1584 static void nvme_copy_cb(void *opaque
, int ret
)
1586 NvmeRequest
*req
= opaque
;
1587 NvmeNamespace
*ns
= req
->ns
;
1588 struct nvme_copy_ctx
*ctx
= req
->opaque
;
1590 trace_pci_nvme_copy_cb(nvme_cid(req
));
1592 if (ns
->params
.zoned
) {
1593 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
1594 uint64_t sdlba
= le64_to_cpu(copy
->sdlba
);
1595 NvmeZone
*zone
= nvme_get_zone_by_slba(ns
, sdlba
);
1597 __nvme_advance_zone_wp(ns
, zone
, ctx
->nlb
);
1601 block_acct_done(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1603 block_acct_failed(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1604 nvme_aio_err(req
, ret
);
1607 g_free(ctx
->bounce
);
1610 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1613 static void nvme_copy_in_complete(NvmeRequest
*req
)
1615 NvmeNamespace
*ns
= req
->ns
;
1616 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
1617 struct nvme_copy_ctx
*ctx
= req
->opaque
;
1618 uint64_t sdlba
= le64_to_cpu(copy
->sdlba
);
1621 trace_pci_nvme_copy_in_complete(nvme_cid(req
));
1623 block_acct_done(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1625 status
= nvme_check_bounds(ns
, sdlba
, ctx
->nlb
);
1627 trace_pci_nvme_err_invalid_lba_range(sdlba
, ctx
->nlb
, ns
->id_ns
.nsze
);
1631 if (ns
->params
.zoned
) {
1632 NvmeZone
*zone
= nvme_get_zone_by_slba(ns
, sdlba
);
1634 status
= nvme_check_zone_write(ns
, zone
, sdlba
, ctx
->nlb
);
1639 status
= nvme_zrm_auto(ns
, zone
);
1644 zone
->w_ptr
+= ctx
->nlb
;
1647 qemu_iovec_init(&req
->iov
, 1);
1648 qemu_iovec_add(&req
->iov
, ctx
->bounce
, nvme_l2b(ns
, ctx
->nlb
));
1650 block_acct_start(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
, 0,
1653 req
->aiocb
= blk_aio_pwritev(ns
->blkconf
.blk
, nvme_l2b(ns
, sdlba
),
1654 &req
->iov
, 0, nvme_copy_cb
, req
);
1659 req
->status
= status
;
1661 g_free(ctx
->bounce
);
1664 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1667 static void nvme_aio_copy_in_cb(void *opaque
, int ret
)
1669 struct nvme_copy_in_ctx
*in_ctx
= opaque
;
1670 NvmeRequest
*req
= in_ctx
->req
;
1671 NvmeNamespace
*ns
= req
->ns
;
1672 struct nvme_copy_ctx
*ctx
= req
->opaque
;
1674 qemu_iovec_destroy(&in_ctx
->iov
);
1677 trace_pci_nvme_aio_copy_in_cb(nvme_cid(req
));
1680 nvme_aio_err(req
, ret
);
1690 block_acct_failed(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1692 g_free(ctx
->bounce
);
1695 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1700 nvme_copy_in_complete(req
);
1703 struct nvme_compare_ctx
{
1708 static void nvme_compare_cb(void *opaque
, int ret
)
1710 NvmeRequest
*req
= opaque
;
1711 NvmeNamespace
*ns
= req
->ns
;
1712 struct nvme_compare_ctx
*ctx
= req
->opaque
;
1713 g_autofree
uint8_t *buf
= NULL
;
1716 trace_pci_nvme_compare_cb(nvme_cid(req
));
1719 block_acct_done(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1721 block_acct_failed(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1722 nvme_aio_err(req
, ret
);
1726 buf
= g_malloc(ctx
->iov
.size
);
1728 status
= nvme_dma(nvme_ctrl(req
), buf
, ctx
->iov
.size
,
1729 DMA_DIRECTION_TO_DEVICE
, req
);
1731 req
->status
= status
;
1735 if (memcmp(buf
, ctx
->bounce
, ctx
->iov
.size
)) {
1736 req
->status
= NVME_CMP_FAILURE
;
1740 qemu_iovec_destroy(&ctx
->iov
);
1741 g_free(ctx
->bounce
);
1744 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1747 static uint16_t nvme_dsm(NvmeCtrl
*n
, NvmeRequest
*req
)
1749 NvmeNamespace
*ns
= req
->ns
;
1750 NvmeDsmCmd
*dsm
= (NvmeDsmCmd
*) &req
->cmd
;
1752 uint32_t attr
= le32_to_cpu(dsm
->attributes
);
1753 uint32_t nr
= (le32_to_cpu(dsm
->nr
) & 0xff) + 1;
1755 uint16_t status
= NVME_SUCCESS
;
1757 trace_pci_nvme_dsm(nvme_cid(req
), nvme_nsid(ns
), nr
, attr
);
1759 if (attr
& NVME_DSMGMT_AD
) {
1762 NvmeDsmRange range
[nr
];
1763 uintptr_t *discards
= (uintptr_t *)&req
->opaque
;
1765 status
= nvme_dma(n
, (uint8_t *)range
, sizeof(range
),
1766 DMA_DIRECTION_TO_DEVICE
, req
);
1772 * AIO callbacks may be called immediately, so initialize discards to 1
1773 * to make sure the the callback does not complete the request before
1774 * all discards have been issued.
1778 for (int i
= 0; i
< nr
; i
++) {
1779 uint64_t slba
= le64_to_cpu(range
[i
].slba
);
1780 uint32_t nlb
= le32_to_cpu(range
[i
].nlb
);
1782 if (nvme_check_bounds(ns
, slba
, nlb
)) {
1783 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
,
1788 trace_pci_nvme_dsm_deallocate(nvme_cid(req
), nvme_nsid(ns
), slba
,
1791 if (nlb
> n
->dmrsl
) {
1792 trace_pci_nvme_dsm_single_range_limit_exceeded(nlb
, n
->dmrsl
);
1795 offset
= nvme_l2b(ns
, slba
);
1796 len
= nvme_l2b(ns
, nlb
);
1799 size_t bytes
= MIN(BDRV_REQUEST_MAX_BYTES
, len
);
1803 blk_aio_pdiscard(ns
->blkconf
.blk
, offset
, bytes
,
1804 nvme_aio_discard_cb
, req
);
1811 /* account for the 1-initialization */
1815 status
= NVME_NO_COMPLETE
;
1817 status
= req
->status
;
1824 static uint16_t nvme_copy(NvmeCtrl
*n
, NvmeRequest
*req
)
1826 NvmeNamespace
*ns
= req
->ns
;
1827 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
1828 g_autofree NvmeCopySourceRange
*range
= NULL
;
1830 uint16_t nr
= copy
->nr
+ 1;
1831 uint8_t format
= copy
->control
[0] & 0xf;
1834 uint8_t *bounce
= NULL
, *bouncep
= NULL
;
1835 struct nvme_copy_ctx
*ctx
;
1839 trace_pci_nvme_copy(nvme_cid(req
), nvme_nsid(ns
), nr
, format
);
1841 if (!(n
->id_ctrl
.ocfs
& (1 << format
))) {
1842 trace_pci_nvme_err_copy_invalid_format(format
);
1843 return NVME_INVALID_FIELD
| NVME_DNR
;
1846 if (nr
> ns
->id_ns
.msrc
+ 1) {
1847 return NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
1850 range
= g_new(NvmeCopySourceRange
, nr
);
1852 status
= nvme_dma(n
, (uint8_t *)range
, nr
* sizeof(NvmeCopySourceRange
),
1853 DMA_DIRECTION_TO_DEVICE
, req
);
1858 for (i
= 0; i
< nr
; i
++) {
1859 uint64_t slba
= le64_to_cpu(range
[i
].slba
);
1860 uint32_t _nlb
= le16_to_cpu(range
[i
].nlb
) + 1;
1862 if (_nlb
> le16_to_cpu(ns
->id_ns
.mssrl
)) {
1863 return NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
1866 status
= nvme_check_bounds(ns
, slba
, _nlb
);
1868 trace_pci_nvme_err_invalid_lba_range(slba
, _nlb
, ns
->id_ns
.nsze
);
1872 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
1873 status
= nvme_check_dulbe(ns
, slba
, _nlb
);
1879 if (ns
->params
.zoned
) {
1880 status
= nvme_check_zone_read(ns
, slba
, _nlb
);
1889 if (nlb
> le32_to_cpu(ns
->id_ns
.mcl
)) {
1890 return NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
1893 bounce
= bouncep
= g_malloc(nvme_l2b(ns
, nlb
));
1895 block_acct_start(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
, 0,
1898 ctx
= g_new(struct nvme_copy_ctx
, 1);
1900 ctx
->bounce
= bounce
;
1906 for (i
= 0; i
< nr
; i
++) {
1907 uint64_t slba
= le64_to_cpu(range
[i
].slba
);
1908 uint32_t nlb
= le16_to_cpu(range
[i
].nlb
) + 1;
1910 size_t len
= nvme_l2b(ns
, nlb
);
1911 int64_t offset
= nvme_l2b(ns
, slba
);
1913 trace_pci_nvme_copy_source_range(slba
, nlb
);
1915 struct nvme_copy_in_ctx
*in_ctx
= g_new(struct nvme_copy_in_ctx
, 1);
1918 qemu_iovec_init(&in_ctx
->iov
, 1);
1919 qemu_iovec_add(&in_ctx
->iov
, bouncep
, len
);
1923 blk_aio_preadv(ns
->blkconf
.blk
, offset
, &in_ctx
->iov
, 0,
1924 nvme_aio_copy_in_cb
, in_ctx
);
1929 /* account for the 1-initialization */
1933 nvme_copy_in_complete(req
);
1936 return NVME_NO_COMPLETE
;
1939 static uint16_t nvme_compare(NvmeCtrl
*n
, NvmeRequest
*req
)
1941 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1942 NvmeNamespace
*ns
= req
->ns
;
1943 BlockBackend
*blk
= ns
->blkconf
.blk
;
1944 uint64_t slba
= le64_to_cpu(rw
->slba
);
1945 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
1946 size_t len
= nvme_l2b(ns
, nlb
);
1947 int64_t offset
= nvme_l2b(ns
, slba
);
1948 uint8_t *bounce
= NULL
;
1949 struct nvme_compare_ctx
*ctx
= NULL
;
1952 trace_pci_nvme_compare(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
1954 status
= nvme_check_mdts(n
, len
);
1959 status
= nvme_check_bounds(ns
, slba
, nlb
);
1961 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1965 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
1966 status
= nvme_check_dulbe(ns
, slba
, nlb
);
1972 bounce
= g_malloc(len
);
1974 ctx
= g_new(struct nvme_compare_ctx
, 1);
1975 ctx
->bounce
= bounce
;
1979 qemu_iovec_init(&ctx
->iov
, 1);
1980 qemu_iovec_add(&ctx
->iov
, bounce
, len
);
1982 block_acct_start(blk_get_stats(blk
), &req
->acct
, len
, BLOCK_ACCT_READ
);
1983 blk_aio_preadv(blk
, offset
, &ctx
->iov
, 0, nvme_compare_cb
, req
);
1985 return NVME_NO_COMPLETE
;
1988 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
1990 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
1991 uintptr_t *num_flushes
= (uintptr_t *)&req
->opaque
;
1993 struct nvme_aio_flush_ctx
*ctx
;
1996 trace_pci_nvme_flush(nvme_cid(req
), nsid
);
1998 if (nsid
!= NVME_NSID_BROADCAST
) {
1999 req
->ns
= nvme_ns(n
, nsid
);
2000 if (unlikely(!req
->ns
)) {
2001 return NVME_INVALID_FIELD
| NVME_DNR
;
2004 block_acct_start(blk_get_stats(req
->ns
->blkconf
.blk
), &req
->acct
, 0,
2006 req
->aiocb
= blk_aio_flush(req
->ns
->blkconf
.blk
, nvme_rw_cb
, req
);
2007 return NVME_NO_COMPLETE
;
2010 /* 1-initialize; see comment in nvme_dsm */
2013 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
2019 ctx
= g_new(struct nvme_aio_flush_ctx
, 1);
2025 block_acct_start(blk_get_stats(ns
->blkconf
.blk
), &ctx
->acct
, 0,
2027 blk_aio_flush(ns
->blkconf
.blk
, nvme_aio_flush_cb
, ctx
);
2030 /* account for the 1-initialization */
2034 status
= NVME_NO_COMPLETE
;
2036 status
= req
->status
;
2042 static uint16_t nvme_read(NvmeCtrl
*n
, NvmeRequest
*req
)
2044 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2045 NvmeNamespace
*ns
= req
->ns
;
2046 uint64_t slba
= le64_to_cpu(rw
->slba
);
2047 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
2048 uint64_t data_size
= nvme_l2b(ns
, nlb
);
2049 uint64_t data_offset
;
2050 BlockBackend
*blk
= ns
->blkconf
.blk
;
2053 trace_pci_nvme_read(nvme_cid(req
), nvme_nsid(ns
), nlb
, data_size
, slba
);
2055 status
= nvme_check_mdts(n
, data_size
);
2060 status
= nvme_check_bounds(ns
, slba
, nlb
);
2062 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
2066 if (ns
->params
.zoned
) {
2067 status
= nvme_check_zone_read(ns
, slba
, nlb
);
2069 trace_pci_nvme_err_zone_read_not_ok(slba
, nlb
, status
);
2074 status
= nvme_map_dptr(n
, data_size
, req
);
2079 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
2080 status
= nvme_check_dulbe(ns
, slba
, nlb
);
2086 data_offset
= nvme_l2b(ns
, slba
);
2088 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
,
2091 req
->aiocb
= dma_blk_read(blk
, &req
->qsg
, data_offset
,
2092 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
2094 req
->aiocb
= blk_aio_preadv(blk
, data_offset
, &req
->iov
, 0,
2097 return NVME_NO_COMPLETE
;
2100 block_acct_invalid(blk_get_stats(blk
), BLOCK_ACCT_READ
);
2101 return status
| NVME_DNR
;
2104 static uint16_t nvme_do_write(NvmeCtrl
*n
, NvmeRequest
*req
, bool append
,
2107 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2108 NvmeNamespace
*ns
= req
->ns
;
2109 uint64_t slba
= le64_to_cpu(rw
->slba
);
2110 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
2111 uint64_t data_size
= nvme_l2b(ns
, nlb
);
2112 uint64_t data_offset
;
2114 NvmeZonedResult
*res
= (NvmeZonedResult
*)&req
->cqe
;
2115 BlockBackend
*blk
= ns
->blkconf
.blk
;
2118 trace_pci_nvme_write(nvme_cid(req
), nvme_io_opc_str(rw
->opcode
),
2119 nvme_nsid(ns
), nlb
, data_size
, slba
);
2122 status
= nvme_check_mdts(n
, data_size
);
2128 status
= nvme_check_bounds(ns
, slba
, nlb
);
2130 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
2134 if (ns
->params
.zoned
) {
2135 zone
= nvme_get_zone_by_slba(ns
, slba
);
2138 if (unlikely(slba
!= zone
->d
.zslba
)) {
2139 trace_pci_nvme_err_append_not_at_start(slba
, zone
->d
.zslba
);
2140 status
= NVME_INVALID_FIELD
;
2144 if (n
->params
.zasl
&& data_size
> n
->page_size
<< n
->params
.zasl
) {
2145 trace_pci_nvme_err_zasl(data_size
);
2146 return NVME_INVALID_FIELD
| NVME_DNR
;
2150 res
->slba
= cpu_to_le64(slba
);
2153 status
= nvme_check_zone_write(ns
, zone
, slba
, nlb
);
2158 status
= nvme_zrm_auto(ns
, zone
);
2166 data_offset
= nvme_l2b(ns
, slba
);
2169 status
= nvme_map_dptr(n
, data_size
, req
);
2174 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
,
2177 req
->aiocb
= dma_blk_write(blk
, &req
->qsg
, data_offset
,
2178 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
2180 req
->aiocb
= blk_aio_pwritev(blk
, data_offset
, &req
->iov
, 0,
2184 req
->aiocb
= blk_aio_pwrite_zeroes(blk
, data_offset
, data_size
,
2185 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
,
2188 return NVME_NO_COMPLETE
;
2191 block_acct_invalid(blk_get_stats(blk
), BLOCK_ACCT_WRITE
);
2192 return status
| NVME_DNR
;
2195 static inline uint16_t nvme_write(NvmeCtrl
*n
, NvmeRequest
*req
)
2197 return nvme_do_write(n
, req
, false, false);
2200 static inline uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
2202 return nvme_do_write(n
, req
, false, true);
2205 static inline uint16_t nvme_zone_append(NvmeCtrl
*n
, NvmeRequest
*req
)
2207 return nvme_do_write(n
, req
, true, false);
2210 static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace
*ns
, NvmeCmd
*c
,
2211 uint64_t *slba
, uint32_t *zone_idx
)
2213 uint32_t dw10
= le32_to_cpu(c
->cdw10
);
2214 uint32_t dw11
= le32_to_cpu(c
->cdw11
);
2216 if (!ns
->params
.zoned
) {
2217 trace_pci_nvme_err_invalid_opc(c
->opcode
);
2218 return NVME_INVALID_OPCODE
| NVME_DNR
;
2221 *slba
= ((uint64_t)dw11
) << 32 | dw10
;
2222 if (unlikely(*slba
>= ns
->id_ns
.nsze
)) {
2223 trace_pci_nvme_err_invalid_lba_range(*slba
, 0, ns
->id_ns
.nsze
);
2225 return NVME_LBA_RANGE
| NVME_DNR
;
2228 *zone_idx
= nvme_zone_idx(ns
, *slba
);
2229 assert(*zone_idx
< ns
->num_zones
);
2231 return NVME_SUCCESS
;
2234 typedef uint16_t (*op_handler_t
)(NvmeNamespace
*, NvmeZone
*, NvmeZoneState
,
2237 enum NvmeZoneProcessingMask
{
2238 NVME_PROC_CURRENT_ZONE
= 0,
2239 NVME_PROC_OPENED_ZONES
= 1 << 0,
2240 NVME_PROC_CLOSED_ZONES
= 1 << 1,
2241 NVME_PROC_READ_ONLY_ZONES
= 1 << 2,
2242 NVME_PROC_FULL_ZONES
= 1 << 3,
2245 static uint16_t nvme_open_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
2246 NvmeZoneState state
, NvmeRequest
*req
)
2248 return nvme_zrm_open(ns
, zone
);
2251 static uint16_t nvme_close_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
2252 NvmeZoneState state
, NvmeRequest
*req
)
2254 return nvme_zrm_close(ns
, zone
);
2257 static uint16_t nvme_finish_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
2258 NvmeZoneState state
, NvmeRequest
*req
)
2260 return nvme_zrm_finish(ns
, zone
);
2263 static uint16_t nvme_reset_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
2264 NvmeZoneState state
, NvmeRequest
*req
)
2266 uintptr_t *resets
= (uintptr_t *)&req
->opaque
;
2267 struct nvme_zone_reset_ctx
*ctx
;
2270 case NVME_ZONE_STATE_EMPTY
:
2271 return NVME_SUCCESS
;
2272 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
2273 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
2274 case NVME_ZONE_STATE_CLOSED
:
2275 case NVME_ZONE_STATE_FULL
:
2278 return NVME_ZONE_INVAL_TRANSITION
;
2282 * The zone reset aio callback needs to know the zone that is being reset
2283 * in order to transition the zone on completion.
2285 ctx
= g_new(struct nvme_zone_reset_ctx
, 1);
2291 blk_aio_pwrite_zeroes(ns
->blkconf
.blk
, nvme_l2b(ns
, zone
->d
.zslba
),
2292 nvme_l2b(ns
, ns
->zone_size
), BDRV_REQ_MAY_UNMAP
,
2293 nvme_aio_zone_reset_cb
, ctx
);
2295 return NVME_NO_COMPLETE
;
2298 static uint16_t nvme_offline_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
2299 NvmeZoneState state
, NvmeRequest
*req
)
2302 case NVME_ZONE_STATE_READ_ONLY
:
2303 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_OFFLINE
);
2305 case NVME_ZONE_STATE_OFFLINE
:
2306 return NVME_SUCCESS
;
2308 return NVME_ZONE_INVAL_TRANSITION
;
2312 static uint16_t nvme_set_zd_ext(NvmeNamespace
*ns
, NvmeZone
*zone
)
2315 uint8_t state
= nvme_get_zone_state(zone
);
2317 if (state
== NVME_ZONE_STATE_EMPTY
) {
2318 status
= nvme_aor_check(ns
, 1, 0);
2322 nvme_aor_inc_active(ns
);
2323 zone
->d
.za
|= NVME_ZA_ZD_EXT_VALID
;
2324 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
2325 return NVME_SUCCESS
;
2328 return NVME_ZONE_INVAL_TRANSITION
;
2331 static uint16_t nvme_bulk_proc_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
2332 enum NvmeZoneProcessingMask proc_mask
,
2333 op_handler_t op_hndlr
, NvmeRequest
*req
)
2335 uint16_t status
= NVME_SUCCESS
;
2336 NvmeZoneState zs
= nvme_get_zone_state(zone
);
2340 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
2341 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
2342 proc_zone
= proc_mask
& NVME_PROC_OPENED_ZONES
;
2344 case NVME_ZONE_STATE_CLOSED
:
2345 proc_zone
= proc_mask
& NVME_PROC_CLOSED_ZONES
;
2347 case NVME_ZONE_STATE_READ_ONLY
:
2348 proc_zone
= proc_mask
& NVME_PROC_READ_ONLY_ZONES
;
2350 case NVME_ZONE_STATE_FULL
:
2351 proc_zone
= proc_mask
& NVME_PROC_FULL_ZONES
;
2358 status
= op_hndlr(ns
, zone
, zs
, req
);
2364 static uint16_t nvme_do_zone_op(NvmeNamespace
*ns
, NvmeZone
*zone
,
2365 enum NvmeZoneProcessingMask proc_mask
,
2366 op_handler_t op_hndlr
, NvmeRequest
*req
)
2369 uint16_t status
= NVME_SUCCESS
;
2373 status
= op_hndlr(ns
, zone
, nvme_get_zone_state(zone
), req
);
2375 if (proc_mask
& NVME_PROC_CLOSED_ZONES
) {
2376 QTAILQ_FOREACH_SAFE(zone
, &ns
->closed_zones
, entry
, next
) {
2377 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
2379 if (status
&& status
!= NVME_NO_COMPLETE
) {
2384 if (proc_mask
& NVME_PROC_OPENED_ZONES
) {
2385 QTAILQ_FOREACH_SAFE(zone
, &ns
->imp_open_zones
, entry
, next
) {
2386 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
2388 if (status
&& status
!= NVME_NO_COMPLETE
) {
2393 QTAILQ_FOREACH_SAFE(zone
, &ns
->exp_open_zones
, entry
, next
) {
2394 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
2396 if (status
&& status
!= NVME_NO_COMPLETE
) {
2401 if (proc_mask
& NVME_PROC_FULL_ZONES
) {
2402 QTAILQ_FOREACH_SAFE(zone
, &ns
->full_zones
, entry
, next
) {
2403 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
2405 if (status
&& status
!= NVME_NO_COMPLETE
) {
2411 if (proc_mask
& NVME_PROC_READ_ONLY_ZONES
) {
2412 for (i
= 0; i
< ns
->num_zones
; i
++, zone
++) {
2413 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
2415 if (status
&& status
!= NVME_NO_COMPLETE
) {
2426 static uint16_t nvme_zone_mgmt_send(NvmeCtrl
*n
, NvmeRequest
*req
)
2428 NvmeCmd
*cmd
= (NvmeCmd
*)&req
->cmd
;
2429 NvmeNamespace
*ns
= req
->ns
;
2433 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
2435 uint32_t zone_idx
= 0;
2439 enum NvmeZoneProcessingMask proc_mask
= NVME_PROC_CURRENT_ZONE
;
2441 action
= dw13
& 0xff;
2444 req
->status
= NVME_SUCCESS
;
2447 status
= nvme_get_mgmt_zone_slba_idx(ns
, cmd
, &slba
, &zone_idx
);
2453 zone
= &ns
->zone_array
[zone_idx
];
2454 if (slba
!= zone
->d
.zslba
) {
2455 trace_pci_nvme_err_unaligned_zone_cmd(action
, slba
, zone
->d
.zslba
);
2456 return NVME_INVALID_FIELD
| NVME_DNR
;
2461 case NVME_ZONE_ACTION_OPEN
:
2463 proc_mask
= NVME_PROC_CLOSED_ZONES
;
2465 trace_pci_nvme_open_zone(slba
, zone_idx
, all
);
2466 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_open_zone
, req
);
2469 case NVME_ZONE_ACTION_CLOSE
:
2471 proc_mask
= NVME_PROC_OPENED_ZONES
;
2473 trace_pci_nvme_close_zone(slba
, zone_idx
, all
);
2474 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_close_zone
, req
);
2477 case NVME_ZONE_ACTION_FINISH
:
2479 proc_mask
= NVME_PROC_OPENED_ZONES
| NVME_PROC_CLOSED_ZONES
;
2481 trace_pci_nvme_finish_zone(slba
, zone_idx
, all
);
2482 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_finish_zone
, req
);
2485 case NVME_ZONE_ACTION_RESET
:
2486 resets
= (uintptr_t *)&req
->opaque
;
2489 proc_mask
= NVME_PROC_OPENED_ZONES
| NVME_PROC_CLOSED_ZONES
|
2490 NVME_PROC_FULL_ZONES
;
2492 trace_pci_nvme_reset_zone(slba
, zone_idx
, all
);
2496 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_reset_zone
, req
);
2500 return *resets
? NVME_NO_COMPLETE
: req
->status
;
2502 case NVME_ZONE_ACTION_OFFLINE
:
2504 proc_mask
= NVME_PROC_READ_ONLY_ZONES
;
2506 trace_pci_nvme_offline_zone(slba
, zone_idx
, all
);
2507 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_offline_zone
, req
);
2510 case NVME_ZONE_ACTION_SET_ZD_EXT
:
2511 trace_pci_nvme_set_descriptor_extension(slba
, zone_idx
);
2512 if (all
|| !ns
->params
.zd_extension_size
) {
2513 return NVME_INVALID_FIELD
| NVME_DNR
;
2515 zd_ext
= nvme_get_zd_extension(ns
, zone_idx
);
2516 status
= nvme_dma(n
, zd_ext
, ns
->params
.zd_extension_size
,
2517 DMA_DIRECTION_TO_DEVICE
, req
);
2519 trace_pci_nvme_err_zd_extension_map_error(zone_idx
);
2523 status
= nvme_set_zd_ext(ns
, zone
);
2524 if (status
== NVME_SUCCESS
) {
2525 trace_pci_nvme_zd_extension_set(zone_idx
);
2531 trace_pci_nvme_err_invalid_mgmt_action(action
);
2532 status
= NVME_INVALID_FIELD
;
2535 if (status
== NVME_ZONE_INVAL_TRANSITION
) {
2536 trace_pci_nvme_err_invalid_zone_state_transition(action
, slba
,
2546 static bool nvme_zone_matches_filter(uint32_t zafs
, NvmeZone
*zl
)
2548 NvmeZoneState zs
= nvme_get_zone_state(zl
);
2551 case NVME_ZONE_REPORT_ALL
:
2553 case NVME_ZONE_REPORT_EMPTY
:
2554 return zs
== NVME_ZONE_STATE_EMPTY
;
2555 case NVME_ZONE_REPORT_IMPLICITLY_OPEN
:
2556 return zs
== NVME_ZONE_STATE_IMPLICITLY_OPEN
;
2557 case NVME_ZONE_REPORT_EXPLICITLY_OPEN
:
2558 return zs
== NVME_ZONE_STATE_EXPLICITLY_OPEN
;
2559 case NVME_ZONE_REPORT_CLOSED
:
2560 return zs
== NVME_ZONE_STATE_CLOSED
;
2561 case NVME_ZONE_REPORT_FULL
:
2562 return zs
== NVME_ZONE_STATE_FULL
;
2563 case NVME_ZONE_REPORT_READ_ONLY
:
2564 return zs
== NVME_ZONE_STATE_READ_ONLY
;
2565 case NVME_ZONE_REPORT_OFFLINE
:
2566 return zs
== NVME_ZONE_STATE_OFFLINE
;
2572 static uint16_t nvme_zone_mgmt_recv(NvmeCtrl
*n
, NvmeRequest
*req
)
2574 NvmeCmd
*cmd
= (NvmeCmd
*)&req
->cmd
;
2575 NvmeNamespace
*ns
= req
->ns
;
2576 /* cdw12 is zero-based number of dwords to return. Convert to bytes */
2577 uint32_t data_size
= (le32_to_cpu(cmd
->cdw12
) + 1) << 2;
2578 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
2579 uint32_t zone_idx
, zra
, zrasf
, partial
;
2580 uint64_t max_zones
, nr_zones
= 0;
2582 uint64_t slba
, capacity
= nvme_ns_nlbas(ns
);
2585 NvmeZoneReportHeader
*header
;
2587 size_t zone_entry_sz
;
2589 req
->status
= NVME_SUCCESS
;
2591 status
= nvme_get_mgmt_zone_slba_idx(ns
, cmd
, &slba
, &zone_idx
);
2597 if (zra
!= NVME_ZONE_REPORT
&& zra
!= NVME_ZONE_REPORT_EXTENDED
) {
2598 return NVME_INVALID_FIELD
| NVME_DNR
;
2600 if (zra
== NVME_ZONE_REPORT_EXTENDED
&& !ns
->params
.zd_extension_size
) {
2601 return NVME_INVALID_FIELD
| NVME_DNR
;
2604 zrasf
= (dw13
>> 8) & 0xff;
2605 if (zrasf
> NVME_ZONE_REPORT_OFFLINE
) {
2606 return NVME_INVALID_FIELD
| NVME_DNR
;
2609 if (data_size
< sizeof(NvmeZoneReportHeader
)) {
2610 return NVME_INVALID_FIELD
| NVME_DNR
;
2613 status
= nvme_check_mdts(n
, data_size
);
2618 partial
= (dw13
>> 16) & 0x01;
2620 zone_entry_sz
= sizeof(NvmeZoneDescr
);
2621 if (zra
== NVME_ZONE_REPORT_EXTENDED
) {
2622 zone_entry_sz
+= ns
->params
.zd_extension_size
;
2625 max_zones
= (data_size
- sizeof(NvmeZoneReportHeader
)) / zone_entry_sz
;
2626 buf
= g_malloc0(data_size
);
2628 zone
= &ns
->zone_array
[zone_idx
];
2629 for (; slba
< capacity
; slba
+= ns
->zone_size
) {
2630 if (partial
&& nr_zones
>= max_zones
) {
2633 if (nvme_zone_matches_filter(zrasf
, zone
++)) {
2637 header
= (NvmeZoneReportHeader
*)buf
;
2638 header
->nr_zones
= cpu_to_le64(nr_zones
);
2640 buf_p
= buf
+ sizeof(NvmeZoneReportHeader
);
2641 for (; zone_idx
< ns
->num_zones
&& max_zones
> 0; zone_idx
++) {
2642 zone
= &ns
->zone_array
[zone_idx
];
2643 if (nvme_zone_matches_filter(zrasf
, zone
)) {
2644 z
= (NvmeZoneDescr
*)buf_p
;
2645 buf_p
+= sizeof(NvmeZoneDescr
);
2649 z
->zcap
= cpu_to_le64(zone
->d
.zcap
);
2650 z
->zslba
= cpu_to_le64(zone
->d
.zslba
);
2653 if (nvme_wp_is_valid(zone
)) {
2654 z
->wp
= cpu_to_le64(zone
->d
.wp
);
2656 z
->wp
= cpu_to_le64(~0ULL);
2659 if (zra
== NVME_ZONE_REPORT_EXTENDED
) {
2660 if (zone
->d
.za
& NVME_ZA_ZD_EXT_VALID
) {
2661 memcpy(buf_p
, nvme_get_zd_extension(ns
, zone_idx
),
2662 ns
->params
.zd_extension_size
);
2664 buf_p
+= ns
->params
.zd_extension_size
;
2671 status
= nvme_dma(n
, (uint8_t *)buf
, data_size
,
2672 DMA_DIRECTION_FROM_DEVICE
, req
);
2679 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
2681 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
2683 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
2684 req
->cmd
.opcode
, nvme_io_opc_str(req
->cmd
.opcode
));
2686 if (!nvme_nsid_valid(n
, nsid
)) {
2687 return NVME_INVALID_NSID
| NVME_DNR
;
2691 * In the base NVM command set, Flush may apply to all namespaces
2692 * (indicated by NSID being set to 0xFFFFFFFF). But if that feature is used
2693 * along with TP 4056 (Namespace Types), it may be pretty screwed up.
2695 * If NSID is indeed set to 0xFFFFFFFF, we simply cannot associate the
2696 * opcode with a specific command since we cannot determine a unique I/O
2697 * command set. Opcode 0x0 could have any other meaning than something
2698 * equivalent to flushing and say it DOES have completely different
2699 * semantics in some other command set - does an NSID of 0xFFFFFFFF then
2700 * mean "for all namespaces, apply whatever command set specific command
2701 * that uses the 0x0 opcode?" Or does it mean "for all namespaces, apply
2702 * whatever command that uses the 0x0 opcode if, and only if, it allows
2703 * NSID to be 0xFFFFFFFF"?
2705 * Anyway (and luckily), for now, we do not care about this since the
2706 * device only supports namespace types that includes the NVM Flush command
2707 * (NVM and Zoned), so always do an NVM Flush.
2709 if (req
->cmd
.opcode
== NVME_CMD_FLUSH
) {
2710 return nvme_flush(n
, req
);
2713 req
->ns
= nvme_ns(n
, nsid
);
2714 if (unlikely(!req
->ns
)) {
2715 return NVME_INVALID_FIELD
| NVME_DNR
;
2718 if (!(req
->ns
->iocs
[req
->cmd
.opcode
] & NVME_CMD_EFF_CSUPP
)) {
2719 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
2720 return NVME_INVALID_OPCODE
| NVME_DNR
;
2723 switch (req
->cmd
.opcode
) {
2724 case NVME_CMD_WRITE_ZEROES
:
2725 return nvme_write_zeroes(n
, req
);
2726 case NVME_CMD_ZONE_APPEND
:
2727 return nvme_zone_append(n
, req
);
2728 case NVME_CMD_WRITE
:
2729 return nvme_write(n
, req
);
2731 return nvme_read(n
, req
);
2732 case NVME_CMD_COMPARE
:
2733 return nvme_compare(n
, req
);
2735 return nvme_dsm(n
, req
);
2737 return nvme_copy(n
, req
);
2738 case NVME_CMD_ZONE_MGMT_SEND
:
2739 return nvme_zone_mgmt_send(n
, req
);
2740 case NVME_CMD_ZONE_MGMT_RECV
:
2741 return nvme_zone_mgmt_recv(n
, req
);
2746 return NVME_INVALID_OPCODE
| NVME_DNR
;
2749 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
2751 n
->sq
[sq
->sqid
] = NULL
;
2752 timer_free(sq
->timer
);
2759 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
2761 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
2762 NvmeRequest
*r
, *next
;
2765 uint16_t qid
= le16_to_cpu(c
->qid
);
2767 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
2768 trace_pci_nvme_err_invalid_del_sq(qid
);
2769 return NVME_INVALID_QID
| NVME_DNR
;
2772 trace_pci_nvme_del_sq(qid
);
2775 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
2776 r
= QTAILQ_FIRST(&sq
->out_req_list
);
2778 blk_aio_cancel(r
->aiocb
);
2780 if (!nvme_check_cqid(n
, sq
->cqid
)) {
2781 cq
= n
->cq
[sq
->cqid
];
2782 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
2785 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
2787 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
2788 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
2793 nvme_free_sq(sq
, n
);
2794 return NVME_SUCCESS
;
2797 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
2798 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
2804 sq
->dma_addr
= dma_addr
;
2808 sq
->head
= sq
->tail
= 0;
2809 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
2811 QTAILQ_INIT(&sq
->req_list
);
2812 QTAILQ_INIT(&sq
->out_req_list
);
2813 for (i
= 0; i
< sq
->size
; i
++) {
2814 sq
->io_req
[i
].sq
= sq
;
2815 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
2817 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
2819 assert(n
->cq
[cqid
]);
2821 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
2825 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
2828 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
2830 uint16_t cqid
= le16_to_cpu(c
->cqid
);
2831 uint16_t sqid
= le16_to_cpu(c
->sqid
);
2832 uint16_t qsize
= le16_to_cpu(c
->qsize
);
2833 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
2834 uint64_t prp1
= le64_to_cpu(c
->prp1
);
2836 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
2838 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
2839 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
2840 return NVME_INVALID_CQID
| NVME_DNR
;
2842 if (unlikely(!sqid
|| sqid
> n
->params
.max_ioqpairs
||
2843 n
->sq
[sqid
] != NULL
)) {
2844 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
2845 return NVME_INVALID_QID
| NVME_DNR
;
2847 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
2848 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
2849 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
2851 if (unlikely(prp1
& (n
->page_size
- 1))) {
2852 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
2853 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
2855 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
2856 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
2857 return NVME_INVALID_FIELD
| NVME_DNR
;
2859 sq
= g_malloc0(sizeof(*sq
));
2860 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
2861 return NVME_SUCCESS
;
2865 uint64_t units_read
;
2866 uint64_t units_written
;
2867 uint64_t read_commands
;
2868 uint64_t write_commands
;
2871 static void nvme_set_blk_stats(NvmeNamespace
*ns
, struct nvme_stats
*stats
)
2873 BlockAcctStats
*s
= blk_get_stats(ns
->blkconf
.blk
);
2875 stats
->units_read
+= s
->nr_bytes
[BLOCK_ACCT_READ
] >> BDRV_SECTOR_BITS
;
2876 stats
->units_written
+= s
->nr_bytes
[BLOCK_ACCT_WRITE
] >> BDRV_SECTOR_BITS
;
2877 stats
->read_commands
+= s
->nr_ops
[BLOCK_ACCT_READ
];
2878 stats
->write_commands
+= s
->nr_ops
[BLOCK_ACCT_WRITE
];
2881 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
2882 uint64_t off
, NvmeRequest
*req
)
2884 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
2885 struct nvme_stats stats
= { 0 };
2886 NvmeSmartLog smart
= { 0 };
2891 if (off
>= sizeof(smart
)) {
2892 return NVME_INVALID_FIELD
| NVME_DNR
;
2895 if (nsid
!= 0xffffffff) {
2896 ns
= nvme_ns(n
, nsid
);
2898 return NVME_INVALID_NSID
| NVME_DNR
;
2900 nvme_set_blk_stats(ns
, &stats
);
2904 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2909 nvme_set_blk_stats(ns
, &stats
);
2913 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
2914 smart
.critical_warning
= n
->smart_critical_warning
;
2916 smart
.data_units_read
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_read
,
2918 smart
.data_units_written
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_written
,
2920 smart
.host_read_commands
[0] = cpu_to_le64(stats
.read_commands
);
2921 smart
.host_write_commands
[0] = cpu_to_le64(stats
.write_commands
);
2923 smart
.temperature
= cpu_to_le16(n
->temperature
);
2925 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
2926 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
2927 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
2930 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
2931 smart
.power_on_hours
[0] =
2932 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
2935 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
2938 return nvme_dma(n
, (uint8_t *) &smart
+ off
, trans_len
,
2939 DMA_DIRECTION_FROM_DEVICE
, req
);
2942 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
2946 NvmeFwSlotInfoLog fw_log
= {
2950 if (off
>= sizeof(fw_log
)) {
2951 return NVME_INVALID_FIELD
| NVME_DNR
;
2954 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
2955 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
2957 return nvme_dma(n
, (uint8_t *) &fw_log
+ off
, trans_len
,
2958 DMA_DIRECTION_FROM_DEVICE
, req
);
2961 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
2962 uint64_t off
, NvmeRequest
*req
)
2965 NvmeErrorLog errlog
;
2967 if (off
>= sizeof(errlog
)) {
2968 return NVME_INVALID_FIELD
| NVME_DNR
;
2972 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
2975 memset(&errlog
, 0x0, sizeof(errlog
));
2976 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
2978 return nvme_dma(n
, (uint8_t *)&errlog
, trans_len
,
2979 DMA_DIRECTION_FROM_DEVICE
, req
);
2982 static uint16_t nvme_cmd_effects(NvmeCtrl
*n
, uint8_t csi
, uint32_t buf_len
,
2983 uint64_t off
, NvmeRequest
*req
)
2985 NvmeEffectsLog log
= {};
2986 const uint32_t *src_iocs
= NULL
;
2989 if (off
>= sizeof(log
)) {
2990 trace_pci_nvme_err_invalid_log_page_offset(off
, sizeof(log
));
2991 return NVME_INVALID_FIELD
| NVME_DNR
;
2994 switch (NVME_CC_CSS(n
->bar
.cc
)) {
2995 case NVME_CC_CSS_NVM
:
2996 src_iocs
= nvme_cse_iocs_nvm
;
2998 case NVME_CC_CSS_ADMIN_ONLY
:
3000 case NVME_CC_CSS_CSI
:
3003 src_iocs
= nvme_cse_iocs_nvm
;
3005 case NVME_CSI_ZONED
:
3006 src_iocs
= nvme_cse_iocs_zoned
;
3011 memcpy(log
.acs
, nvme_cse_acs
, sizeof(nvme_cse_acs
));
3014 memcpy(log
.iocs
, src_iocs
, sizeof(log
.iocs
));
3017 trans_len
= MIN(sizeof(log
) - off
, buf_len
);
3019 return nvme_dma(n
, ((uint8_t *)&log
) + off
, trans_len
,
3020 DMA_DIRECTION_FROM_DEVICE
, req
);
3023 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
3025 NvmeCmd
*cmd
= &req
->cmd
;
3027 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
3028 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
3029 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
3030 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
3031 uint8_t lid
= dw10
& 0xff;
3032 uint8_t lsp
= (dw10
>> 8) & 0xf;
3033 uint8_t rae
= (dw10
>> 15) & 0x1;
3034 uint8_t csi
= le32_to_cpu(cmd
->cdw14
) >> 24;
3035 uint32_t numdl
, numdu
;
3036 uint64_t off
, lpol
, lpou
;
3040 numdl
= (dw10
>> 16);
3041 numdu
= (dw11
& 0xffff);
3045 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
3046 off
= (lpou
<< 32ULL) | lpol
;
3049 return NVME_INVALID_FIELD
| NVME_DNR
;
3052 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
3054 status
= nvme_check_mdts(n
, len
);
3060 case NVME_LOG_ERROR_INFO
:
3061 return nvme_error_info(n
, rae
, len
, off
, req
);
3062 case NVME_LOG_SMART_INFO
:
3063 return nvme_smart_info(n
, rae
, len
, off
, req
);
3064 case NVME_LOG_FW_SLOT_INFO
:
3065 return nvme_fw_log_info(n
, len
, off
, req
);
3066 case NVME_LOG_CMD_EFFECTS
:
3067 return nvme_cmd_effects(n
, csi
, len
, off
, req
);
3069 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
3070 return NVME_INVALID_FIELD
| NVME_DNR
;
3074 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
3076 n
->cq
[cq
->cqid
] = NULL
;
3077 timer_free(cq
->timer
);
3078 if (msix_enabled(&n
->parent_obj
)) {
3079 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
3086 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
3088 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
3090 uint16_t qid
= le16_to_cpu(c
->qid
);
3092 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
3093 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
3094 return NVME_INVALID_CQID
| NVME_DNR
;
3098 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
3099 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
3100 return NVME_INVALID_QUEUE_DEL
;
3102 nvme_irq_deassert(n
, cq
);
3103 trace_pci_nvme_del_cq(qid
);
3104 nvme_free_cq(cq
, n
);
3105 return NVME_SUCCESS
;
3108 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
3109 uint16_t cqid
, uint16_t vector
, uint16_t size
,
3110 uint16_t irq_enabled
)
3114 if (msix_enabled(&n
->parent_obj
)) {
3115 ret
= msix_vector_use(&n
->parent_obj
, vector
);
3121 cq
->dma_addr
= dma_addr
;
3123 cq
->irq_enabled
= irq_enabled
;
3124 cq
->vector
= vector
;
3125 cq
->head
= cq
->tail
= 0;
3126 QTAILQ_INIT(&cq
->req_list
);
3127 QTAILQ_INIT(&cq
->sq_list
);
3129 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
3132 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
3135 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
3136 uint16_t cqid
= le16_to_cpu(c
->cqid
);
3137 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
3138 uint16_t qsize
= le16_to_cpu(c
->qsize
);
3139 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
3140 uint64_t prp1
= le64_to_cpu(c
->prp1
);
3142 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
3143 NVME_CQ_FLAGS_IEN(qflags
) != 0);
3145 if (unlikely(!cqid
|| cqid
> n
->params
.max_ioqpairs
||
3146 n
->cq
[cqid
] != NULL
)) {
3147 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
3148 return NVME_INVALID_QID
| NVME_DNR
;
3150 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
3151 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
3152 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
3154 if (unlikely(prp1
& (n
->page_size
- 1))) {
3155 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
3156 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
3158 if (unlikely(!msix_enabled(&n
->parent_obj
) && vector
)) {
3159 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
3160 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
3162 if (unlikely(vector
>= n
->params
.msix_qsize
)) {
3163 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
3164 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
3166 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
3167 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
3168 return NVME_INVALID_FIELD
| NVME_DNR
;
3171 cq
= g_malloc0(sizeof(*cq
));
3172 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
3173 NVME_CQ_FLAGS_IEN(qflags
));
3176 * It is only required to set qs_created when creating a completion queue;
3177 * creating a submission queue without a matching completion queue will
3180 n
->qs_created
= true;
3181 return NVME_SUCCESS
;
3184 static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl
*n
, NvmeRequest
*req
)
3186 uint8_t id
[NVME_IDENTIFY_DATA_SIZE
] = {};
3188 return nvme_dma(n
, id
, sizeof(id
), DMA_DIRECTION_FROM_DEVICE
, req
);
3191 static inline bool nvme_csi_has_nvm_support(NvmeNamespace
*ns
)
3195 case NVME_CSI_ZONED
:
3201 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
3203 trace_pci_nvme_identify_ctrl();
3205 return nvme_dma(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
),
3206 DMA_DIRECTION_FROM_DEVICE
, req
);
3209 static uint16_t nvme_identify_ctrl_csi(NvmeCtrl
*n
, NvmeRequest
*req
)
3211 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
3212 uint8_t id
[NVME_IDENTIFY_DATA_SIZE
] = {};
3214 trace_pci_nvme_identify_ctrl_csi(c
->csi
);
3218 ((NvmeIdCtrlNvm
*)&id
)->dmrsl
= cpu_to_le32(n
->dmrsl
);
3221 case NVME_CSI_ZONED
:
3222 ((NvmeIdCtrlZoned
*)&id
)->zasl
= n
->params
.zasl
;
3226 return NVME_INVALID_FIELD
| NVME_DNR
;
3229 return nvme_dma(n
, id
, sizeof(id
), DMA_DIRECTION_FROM_DEVICE
, req
);
3232 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
)
3235 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
3236 uint32_t nsid
= le32_to_cpu(c
->nsid
);
3238 trace_pci_nvme_identify_ns(nsid
);
3240 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
3241 return NVME_INVALID_NSID
| NVME_DNR
;
3244 ns
= nvme_ns(n
, nsid
);
3245 if (unlikely(!ns
)) {
3246 return nvme_rpt_empty_id_struct(n
, req
);
3249 if (c
->csi
== NVME_CSI_NVM
&& nvme_csi_has_nvm_support(ns
)) {
3250 return nvme_dma(n
, (uint8_t *)&ns
->id_ns
, sizeof(NvmeIdNs
),
3251 DMA_DIRECTION_FROM_DEVICE
, req
);
3254 return NVME_INVALID_CMD_SET
| NVME_DNR
;
3257 static uint16_t nvme_identify_ns_csi(NvmeCtrl
*n
, NvmeRequest
*req
)
3260 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
3261 uint32_t nsid
= le32_to_cpu(c
->nsid
);
3263 trace_pci_nvme_identify_ns_csi(nsid
, c
->csi
);
3265 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
3266 return NVME_INVALID_NSID
| NVME_DNR
;
3269 ns
= nvme_ns(n
, nsid
);
3270 if (unlikely(!ns
)) {
3271 return nvme_rpt_empty_id_struct(n
, req
);
3274 if (c
->csi
== NVME_CSI_NVM
&& nvme_csi_has_nvm_support(ns
)) {
3275 return nvme_rpt_empty_id_struct(n
, req
);
3276 } else if (c
->csi
== NVME_CSI_ZONED
&& ns
->csi
== NVME_CSI_ZONED
) {
3277 return nvme_dma(n
, (uint8_t *)ns
->id_ns_zoned
, sizeof(NvmeIdNsZoned
),
3278 DMA_DIRECTION_FROM_DEVICE
, req
);
3281 return NVME_INVALID_FIELD
| NVME_DNR
;
3284 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
)
3287 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
3288 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
3289 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
3290 static const int data_len
= sizeof(list
);
3291 uint32_t *list_ptr
= (uint32_t *)list
;
3294 trace_pci_nvme_identify_nslist(min_nsid
);
3297 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
3298 * since the Active Namespace ID List should return namespaces with ids
3299 * *higher* than the NSID specified in the command. This is also specified
3300 * in the spec (NVM Express v1.3d, Section 5.15.4).
3302 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
3303 return NVME_INVALID_NSID
| NVME_DNR
;
3306 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3311 if (ns
->params
.nsid
<= min_nsid
) {
3314 list_ptr
[j
++] = cpu_to_le32(ns
->params
.nsid
);
3315 if (j
== data_len
/ sizeof(uint32_t)) {
3320 return nvme_dma(n
, list
, data_len
, DMA_DIRECTION_FROM_DEVICE
, req
);
3323 static uint16_t nvme_identify_nslist_csi(NvmeCtrl
*n
, NvmeRequest
*req
)
3326 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
3327 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
3328 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
3329 static const int data_len
= sizeof(list
);
3330 uint32_t *list_ptr
= (uint32_t *)list
;
3333 trace_pci_nvme_identify_nslist_csi(min_nsid
, c
->csi
);
3336 * Same as in nvme_identify_nslist(), 0xffffffff/0xfffffffe are invalid.
3338 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
3339 return NVME_INVALID_NSID
| NVME_DNR
;
3342 if (c
->csi
!= NVME_CSI_NVM
&& c
->csi
!= NVME_CSI_ZONED
) {
3343 return NVME_INVALID_FIELD
| NVME_DNR
;
3346 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3351 if (ns
->params
.nsid
<= min_nsid
|| c
->csi
!= ns
->csi
) {
3354 list_ptr
[j
++] = cpu_to_le32(ns
->params
.nsid
);
3355 if (j
== data_len
/ sizeof(uint32_t)) {
3360 return nvme_dma(n
, list
, data_len
, DMA_DIRECTION_FROM_DEVICE
, req
);
3363 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
3366 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
3367 uint32_t nsid
= le32_to_cpu(c
->nsid
);
3368 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
3373 uint8_t v
[NVME_NIDL_UUID
];
3381 struct data
*ns_descrs
= (struct data
*)list
;
3383 trace_pci_nvme_identify_ns_descr_list(nsid
);
3385 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
3386 return NVME_INVALID_NSID
| NVME_DNR
;
3389 ns
= nvme_ns(n
, nsid
);
3390 if (unlikely(!ns
)) {
3391 return NVME_INVALID_FIELD
| NVME_DNR
;
3395 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
3396 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
3397 * Namespace Identification Descriptor. Add the namespace UUID here.
3399 ns_descrs
->uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
3400 ns_descrs
->uuid
.hdr
.nidl
= NVME_NIDL_UUID
;
3401 memcpy(&ns_descrs
->uuid
.v
, ns
->params
.uuid
.data
, NVME_NIDL_UUID
);
3403 ns_descrs
->csi
.hdr
.nidt
= NVME_NIDT_CSI
;
3404 ns_descrs
->csi
.hdr
.nidl
= NVME_NIDL_CSI
;
3405 ns_descrs
->csi
.v
= ns
->csi
;
3407 return nvme_dma(n
, list
, sizeof(list
), DMA_DIRECTION_FROM_DEVICE
, req
);
3410 static uint16_t nvme_identify_cmd_set(NvmeCtrl
*n
, NvmeRequest
*req
)
3412 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
3413 static const int data_len
= sizeof(list
);
3415 trace_pci_nvme_identify_cmd_set();
3417 NVME_SET_CSI(*list
, NVME_CSI_NVM
);
3418 NVME_SET_CSI(*list
, NVME_CSI_ZONED
);
3420 return nvme_dma(n
, list
, data_len
, DMA_DIRECTION_FROM_DEVICE
, req
);
3423 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
3425 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
3427 trace_pci_nvme_identify(nvme_cid(req
), c
->cns
, le16_to_cpu(c
->ctrlid
),
3431 case NVME_ID_CNS_NS
:
3433 case NVME_ID_CNS_NS_PRESENT
:
3434 return nvme_identify_ns(n
, req
);
3435 case NVME_ID_CNS_CS_NS
:
3437 case NVME_ID_CNS_CS_NS_PRESENT
:
3438 return nvme_identify_ns_csi(n
, req
);
3439 case NVME_ID_CNS_CTRL
:
3440 return nvme_identify_ctrl(n
, req
);
3441 case NVME_ID_CNS_CS_CTRL
:
3442 return nvme_identify_ctrl_csi(n
, req
);
3443 case NVME_ID_CNS_NS_ACTIVE_LIST
:
3445 case NVME_ID_CNS_NS_PRESENT_LIST
:
3446 return nvme_identify_nslist(n
, req
);
3447 case NVME_ID_CNS_CS_NS_ACTIVE_LIST
:
3449 case NVME_ID_CNS_CS_NS_PRESENT_LIST
:
3450 return nvme_identify_nslist_csi(n
, req
);
3451 case NVME_ID_CNS_NS_DESCR_LIST
:
3452 return nvme_identify_ns_descr_list(n
, req
);
3453 case NVME_ID_CNS_IO_COMMAND_SET
:
3454 return nvme_identify_cmd_set(n
, req
);
3456 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
3457 return NVME_INVALID_FIELD
| NVME_DNR
;
3461 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
3463 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
3465 req
->cqe
.result
= 1;
3466 if (nvme_check_sqid(n
, sqid
)) {
3467 return NVME_INVALID_FIELD
| NVME_DNR
;
3470 return NVME_SUCCESS
;
3473 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
3475 trace_pci_nvme_setfeat_timestamp(ts
);
3477 n
->host_timestamp
= le64_to_cpu(ts
);
3478 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
3481 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
3483 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
3484 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
3486 union nvme_timestamp
{
3488 uint64_t timestamp
:48;
3496 union nvme_timestamp ts
;
3498 ts
.timestamp
= n
->host_timestamp
+ elapsed_time
;
3500 /* If the host timestamp is non-zero, set the timestamp origin */
3501 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
3503 trace_pci_nvme_getfeat_timestamp(ts
.all
);
3505 return cpu_to_le64(ts
.all
);
3508 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
3510 uint64_t timestamp
= nvme_get_timestamp(n
);
3512 return nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
3513 DMA_DIRECTION_FROM_DEVICE
, req
);
3516 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
3518 NvmeCmd
*cmd
= &req
->cmd
;
3519 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
3520 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
3521 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
3523 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
3524 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
3529 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
3530 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
3533 trace_pci_nvme_getfeat(nvme_cid(req
), nsid
, fid
, sel
, dw11
);
3535 if (!nvme_feature_support
[fid
]) {
3536 return NVME_INVALID_FIELD
| NVME_DNR
;
3539 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
3540 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
3542 * The Reservation Notification Mask and Reservation Persistence
3543 * features require a status code of Invalid Field in Command when
3544 * NSID is 0xFFFFFFFF. Since the device does not support those
3545 * features we can always return Invalid Namespace or Format as we
3546 * should do for all other features.
3548 return NVME_INVALID_NSID
| NVME_DNR
;
3551 if (!nvme_ns(n
, nsid
)) {
3552 return NVME_INVALID_FIELD
| NVME_DNR
;
3557 case NVME_GETFEAT_SELECT_CURRENT
:
3559 case NVME_GETFEAT_SELECT_SAVED
:
3560 /* no features are saveable by the controller; fallthrough */
3561 case NVME_GETFEAT_SELECT_DEFAULT
:
3563 case NVME_GETFEAT_SELECT_CAP
:
3564 result
= nvme_feature_cap
[fid
];
3569 case NVME_TEMPERATURE_THRESHOLD
:
3573 * The controller only implements the Composite Temperature sensor, so
3574 * return 0 for all other sensors.
3576 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
3580 switch (NVME_TEMP_THSEL(dw11
)) {
3581 case NVME_TEMP_THSEL_OVER
:
3582 result
= n
->features
.temp_thresh_hi
;
3584 case NVME_TEMP_THSEL_UNDER
:
3585 result
= n
->features
.temp_thresh_low
;
3589 return NVME_INVALID_FIELD
| NVME_DNR
;
3590 case NVME_ERROR_RECOVERY
:
3591 if (!nvme_nsid_valid(n
, nsid
)) {
3592 return NVME_INVALID_NSID
| NVME_DNR
;
3595 ns
= nvme_ns(n
, nsid
);
3596 if (unlikely(!ns
)) {
3597 return NVME_INVALID_FIELD
| NVME_DNR
;
3600 result
= ns
->features
.err_rec
;
3602 case NVME_VOLATILE_WRITE_CACHE
:
3604 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3610 result
= blk_enable_write_cache(ns
->blkconf
.blk
);
3615 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
3617 case NVME_ASYNCHRONOUS_EVENT_CONF
:
3618 result
= n
->features
.async_config
;
3620 case NVME_TIMESTAMP
:
3621 return nvme_get_feature_timestamp(n
, req
);
3628 case NVME_TEMPERATURE_THRESHOLD
:
3631 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
3635 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
3636 result
= NVME_TEMPERATURE_WARNING
;
3640 case NVME_NUMBER_OF_QUEUES
:
3641 result
= (n
->params
.max_ioqpairs
- 1) |
3642 ((n
->params
.max_ioqpairs
- 1) << 16);
3643 trace_pci_nvme_getfeat_numq(result
);
3645 case NVME_INTERRUPT_VECTOR_CONF
:
3647 if (iv
>= n
->params
.max_ioqpairs
+ 1) {
3648 return NVME_INVALID_FIELD
| NVME_DNR
;
3652 if (iv
== n
->admin_cq
.vector
) {
3653 result
|= NVME_INTVC_NOCOALESCING
;
3656 case NVME_COMMAND_SET_PROFILE
:
3660 result
= nvme_feature_default
[fid
];
3665 req
->cqe
.result
= cpu_to_le32(result
);
3666 return NVME_SUCCESS
;
3669 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
3674 ret
= nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
3675 DMA_DIRECTION_TO_DEVICE
, req
);
3680 nvme_set_timestamp(n
, timestamp
);
3682 return NVME_SUCCESS
;
3685 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
3687 NvmeNamespace
*ns
= NULL
;
3689 NvmeCmd
*cmd
= &req
->cmd
;
3690 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
3691 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
3692 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
3693 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
3694 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
3697 trace_pci_nvme_setfeat(nvme_cid(req
), nsid
, fid
, save
, dw11
);
3699 if (save
&& !(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_SAVE
)) {
3700 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
3703 if (!nvme_feature_support
[fid
]) {
3704 return NVME_INVALID_FIELD
| NVME_DNR
;
3707 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
3708 if (nsid
!= NVME_NSID_BROADCAST
) {
3709 if (!nvme_nsid_valid(n
, nsid
)) {
3710 return NVME_INVALID_NSID
| NVME_DNR
;
3713 ns
= nvme_ns(n
, nsid
);
3714 if (unlikely(!ns
)) {
3715 return NVME_INVALID_FIELD
| NVME_DNR
;
3718 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
3719 if (!nvme_nsid_valid(n
, nsid
)) {
3720 return NVME_INVALID_NSID
| NVME_DNR
;
3723 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
3726 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
3727 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
3731 case NVME_TEMPERATURE_THRESHOLD
:
3732 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
3736 switch (NVME_TEMP_THSEL(dw11
)) {
3737 case NVME_TEMP_THSEL_OVER
:
3738 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
3740 case NVME_TEMP_THSEL_UNDER
:
3741 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
3744 return NVME_INVALID_FIELD
| NVME_DNR
;
3747 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
3748 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
3749 nvme_smart_event(n
, NVME_AER_INFO_SMART_TEMP_THRESH
);
3753 case NVME_ERROR_RECOVERY
:
3754 if (nsid
== NVME_NSID_BROADCAST
) {
3755 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3762 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
3763 ns
->features
.err_rec
= dw11
;
3771 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
3772 ns
->features
.err_rec
= dw11
;
3775 case NVME_VOLATILE_WRITE_CACHE
:
3776 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3782 if (!(dw11
& 0x1) && blk_enable_write_cache(ns
->blkconf
.blk
)) {
3783 blk_flush(ns
->blkconf
.blk
);
3786 blk_set_enable_write_cache(ns
->blkconf
.blk
, dw11
& 1);
3791 case NVME_NUMBER_OF_QUEUES
:
3792 if (n
->qs_created
) {
3793 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
3797 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
3800 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
3801 return NVME_INVALID_FIELD
| NVME_DNR
;
3804 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
3805 ((dw11
>> 16) & 0xFFFF) + 1,
3806 n
->params
.max_ioqpairs
,
3807 n
->params
.max_ioqpairs
);
3808 req
->cqe
.result
= cpu_to_le32((n
->params
.max_ioqpairs
- 1) |
3809 ((n
->params
.max_ioqpairs
- 1) << 16));
3811 case NVME_ASYNCHRONOUS_EVENT_CONF
:
3812 n
->features
.async_config
= dw11
;
3814 case NVME_TIMESTAMP
:
3815 return nvme_set_feature_timestamp(n
, req
);
3816 case NVME_COMMAND_SET_PROFILE
:
3818 trace_pci_nvme_err_invalid_iocsci(dw11
& 0x1ff);
3819 return NVME_CMD_SET_CMB_REJECTED
| NVME_DNR
;
3823 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
3825 return NVME_SUCCESS
;
3828 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
3830 trace_pci_nvme_aer(nvme_cid(req
));
3832 if (n
->outstanding_aers
> n
->params
.aerl
) {
3833 trace_pci_nvme_aer_aerl_exceeded();
3834 return NVME_AER_LIMIT_EXCEEDED
;
3837 n
->aer_reqs
[n
->outstanding_aers
] = req
;
3838 n
->outstanding_aers
++;
3840 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
3841 nvme_process_aers(n
);
3844 return NVME_NO_COMPLETE
;
3847 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
3849 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
,
3850 nvme_adm_opc_str(req
->cmd
.opcode
));
3852 if (!(nvme_cse_acs
[req
->cmd
.opcode
] & NVME_CMD_EFF_CSUPP
)) {
3853 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
3854 return NVME_INVALID_OPCODE
| NVME_DNR
;
3857 switch (req
->cmd
.opcode
) {
3858 case NVME_ADM_CMD_DELETE_SQ
:
3859 return nvme_del_sq(n
, req
);
3860 case NVME_ADM_CMD_CREATE_SQ
:
3861 return nvme_create_sq(n
, req
);
3862 case NVME_ADM_CMD_GET_LOG_PAGE
:
3863 return nvme_get_log(n
, req
);
3864 case NVME_ADM_CMD_DELETE_CQ
:
3865 return nvme_del_cq(n
, req
);
3866 case NVME_ADM_CMD_CREATE_CQ
:
3867 return nvme_create_cq(n
, req
);
3868 case NVME_ADM_CMD_IDENTIFY
:
3869 return nvme_identify(n
, req
);
3870 case NVME_ADM_CMD_ABORT
:
3871 return nvme_abort(n
, req
);
3872 case NVME_ADM_CMD_SET_FEATURES
:
3873 return nvme_set_feature(n
, req
);
3874 case NVME_ADM_CMD_GET_FEATURES
:
3875 return nvme_get_feature(n
, req
);
3876 case NVME_ADM_CMD_ASYNC_EV_REQ
:
3877 return nvme_aer(n
, req
);
3882 return NVME_INVALID_OPCODE
| NVME_DNR
;
3885 static void nvme_process_sq(void *opaque
)
3887 NvmeSQueue
*sq
= opaque
;
3888 NvmeCtrl
*n
= sq
->ctrl
;
3889 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
3896 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
3897 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
3898 if (nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
))) {
3899 trace_pci_nvme_err_addr_read(addr
);
3900 trace_pci_nvme_err_cfs();
3901 n
->bar
.csts
= NVME_CSTS_FAILED
;
3904 nvme_inc_sq_head(sq
);
3906 req
= QTAILQ_FIRST(&sq
->req_list
);
3907 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
3908 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
3909 nvme_req_clear(req
);
3910 req
->cqe
.cid
= cmd
.cid
;
3911 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
3913 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
3914 nvme_admin_cmd(n
, req
);
3915 if (status
!= NVME_NO_COMPLETE
) {
3916 req
->status
= status
;
3917 nvme_enqueue_req_completion(cq
, req
);
3922 static void nvme_ctrl_reset(NvmeCtrl
*n
)
3927 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3936 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
3937 if (n
->sq
[i
] != NULL
) {
3938 nvme_free_sq(n
->sq
[i
], n
);
3941 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
3942 if (n
->cq
[i
] != NULL
) {
3943 nvme_free_cq(n
->cq
[i
], n
);
3947 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
3948 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
3949 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
3954 n
->outstanding_aers
= 0;
3955 n
->qs_created
= false;
3960 static void nvme_ctrl_shutdown(NvmeCtrl
*n
)
3966 memory_region_msync(&n
->pmr
.dev
->mr
, 0, n
->pmr
.dev
->size
);
3969 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3975 nvme_ns_shutdown(ns
);
3979 static void nvme_select_ns_iocs(NvmeCtrl
*n
)
3984 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3989 ns
->iocs
= nvme_cse_iocs_none
;
3992 if (NVME_CC_CSS(n
->bar
.cc
) != NVME_CC_CSS_ADMIN_ONLY
) {
3993 ns
->iocs
= nvme_cse_iocs_nvm
;
3996 case NVME_CSI_ZONED
:
3997 if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_CSI
) {
3998 ns
->iocs
= nvme_cse_iocs_zoned
;
3999 } else if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_NVM
) {
4000 ns
->iocs
= nvme_cse_iocs_nvm
;
4007 static int nvme_start_ctrl(NvmeCtrl
*n
)
4009 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
4010 uint32_t page_size
= 1 << page_bits
;
4012 if (unlikely(n
->cq
[0])) {
4013 trace_pci_nvme_err_startfail_cq();
4016 if (unlikely(n
->sq
[0])) {
4017 trace_pci_nvme_err_startfail_sq();
4020 if (unlikely(!n
->bar
.asq
)) {
4021 trace_pci_nvme_err_startfail_nbarasq();
4024 if (unlikely(!n
->bar
.acq
)) {
4025 trace_pci_nvme_err_startfail_nbaracq();
4028 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
4029 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
4032 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
4033 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
4036 if (unlikely(!(NVME_CAP_CSS(n
->bar
.cap
) & (1 << NVME_CC_CSS(n
->bar
.cc
))))) {
4037 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n
->bar
.cc
));
4040 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
4041 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
4042 trace_pci_nvme_err_startfail_page_too_small(
4043 NVME_CC_MPS(n
->bar
.cc
),
4044 NVME_CAP_MPSMIN(n
->bar
.cap
));
4047 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
4048 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
4049 trace_pci_nvme_err_startfail_page_too_large(
4050 NVME_CC_MPS(n
->bar
.cc
),
4051 NVME_CAP_MPSMAX(n
->bar
.cap
));
4054 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
4055 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
4056 trace_pci_nvme_err_startfail_cqent_too_small(
4057 NVME_CC_IOCQES(n
->bar
.cc
),
4058 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
4061 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
4062 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
4063 trace_pci_nvme_err_startfail_cqent_too_large(
4064 NVME_CC_IOCQES(n
->bar
.cc
),
4065 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
4068 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
4069 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
4070 trace_pci_nvme_err_startfail_sqent_too_small(
4071 NVME_CC_IOSQES(n
->bar
.cc
),
4072 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
4075 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
4076 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
4077 trace_pci_nvme_err_startfail_sqent_too_large(
4078 NVME_CC_IOSQES(n
->bar
.cc
),
4079 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
4082 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
4083 trace_pci_nvme_err_startfail_asqent_sz_zero();
4086 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
4087 trace_pci_nvme_err_startfail_acqent_sz_zero();
4091 n
->page_bits
= page_bits
;
4092 n
->page_size
= page_size
;
4093 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
4094 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
4095 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
4096 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
4097 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
4098 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
4099 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
4101 nvme_set_timestamp(n
, 0ULL);
4103 QTAILQ_INIT(&n
->aer_queue
);
4105 nvme_select_ns_iocs(n
);
4110 static void nvme_cmb_enable_regs(NvmeCtrl
*n
)
4112 NVME_CMBLOC_SET_CDPCILS(n
->bar
.cmbloc
, 1);
4113 NVME_CMBLOC_SET_CDPMLS(n
->bar
.cmbloc
, 1);
4114 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, NVME_CMB_BIR
);
4116 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
4117 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
4118 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 1);
4119 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
4120 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
4121 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
4122 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
4125 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
4128 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
4129 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
4130 "MMIO write not 32-bit aligned,"
4131 " offset=0x%"PRIx64
"", offset
);
4132 /* should be ignored, fall through for now */
4135 if (unlikely(size
< sizeof(uint32_t))) {
4136 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
4137 "MMIO write smaller than 32-bits,"
4138 " offset=0x%"PRIx64
", size=%u",
4140 /* should be ignored, fall through for now */
4144 case 0xc: /* INTMS */
4145 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
4146 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
4147 "undefined access to interrupt mask set"
4148 " when MSI-X is enabled");
4149 /* should be ignored, fall through for now */
4151 n
->bar
.intms
|= data
& 0xffffffff;
4152 n
->bar
.intmc
= n
->bar
.intms
;
4153 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
4156 case 0x10: /* INTMC */
4157 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
4158 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
4159 "undefined access to interrupt mask clr"
4160 " when MSI-X is enabled");
4161 /* should be ignored, fall through for now */
4163 n
->bar
.intms
&= ~(data
& 0xffffffff);
4164 n
->bar
.intmc
= n
->bar
.intms
;
4165 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
4169 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
4170 /* Windows first sends data, then sends enable bit */
4171 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
4172 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
4177 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
4179 if (unlikely(nvme_start_ctrl(n
))) {
4180 trace_pci_nvme_err_startfail();
4181 n
->bar
.csts
= NVME_CSTS_FAILED
;
4183 trace_pci_nvme_mmio_start_success();
4184 n
->bar
.csts
= NVME_CSTS_READY
;
4186 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
4187 trace_pci_nvme_mmio_stopped();
4189 n
->bar
.csts
&= ~NVME_CSTS_READY
;
4191 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
4192 trace_pci_nvme_mmio_shutdown_set();
4193 nvme_ctrl_shutdown(n
);
4195 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
4196 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
4197 trace_pci_nvme_mmio_shutdown_cleared();
4198 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
4202 case 0x1C: /* CSTS */
4203 if (data
& (1 << 4)) {
4204 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
4205 "attempted to W1C CSTS.NSSRO"
4206 " but CAP.NSSRS is zero (not supported)");
4207 } else if (data
!= 0) {
4208 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
4209 "attempted to set a read only bit"
4210 " of controller status");
4213 case 0x20: /* NSSR */
4214 if (data
== 0x4E564D65) {
4215 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
4217 /* The spec says that writes of other values have no effect */
4221 case 0x24: /* AQA */
4222 n
->bar
.aqa
= data
& 0xffffffff;
4223 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
4225 case 0x28: /* ASQ */
4226 n
->bar
.asq
= size
== 8 ? data
:
4227 (n
->bar
.asq
& ~0xffffffffULL
) | (data
& 0xffffffff);
4228 trace_pci_nvme_mmio_asqaddr(data
);
4230 case 0x2c: /* ASQ hi */
4231 n
->bar
.asq
= (n
->bar
.asq
& 0xffffffff) | (data
<< 32);
4232 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
4234 case 0x30: /* ACQ */
4235 trace_pci_nvme_mmio_acqaddr(data
);
4236 n
->bar
.acq
= size
== 8 ? data
:
4237 (n
->bar
.acq
& ~0xffffffffULL
) | (data
& 0xffffffff);
4239 case 0x34: /* ACQ hi */
4240 n
->bar
.acq
= (n
->bar
.acq
& 0xffffffff) | (data
<< 32);
4241 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
4243 case 0x38: /* CMBLOC */
4244 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
4245 "invalid write to reserved CMBLOC"
4246 " when CMBSZ is zero, ignored");
4248 case 0x3C: /* CMBSZ */
4249 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
4250 "invalid write to read only CMBSZ, ignored");
4252 case 0x50: /* CMBMSC */
4253 if (!NVME_CAP_CMBS(n
->bar
.cap
)) {
4257 n
->bar
.cmbmsc
= size
== 8 ? data
:
4258 (n
->bar
.cmbmsc
& ~0xffffffff) | (data
& 0xffffffff);
4259 n
->cmb
.cmse
= false;
4261 if (NVME_CMBMSC_CRE(data
)) {
4262 nvme_cmb_enable_regs(n
);
4264 if (NVME_CMBMSC_CMSE(data
)) {
4265 hwaddr cba
= NVME_CMBMSC_CBA(data
) << CMBMSC_CBA_SHIFT
;
4266 if (cba
+ int128_get64(n
->cmb
.mem
.size
) < cba
) {
4267 NVME_CMBSTS_SET_CBAI(n
->bar
.cmbsts
, 1);
4280 case 0x54: /* CMBMSC hi */
4281 n
->bar
.cmbmsc
= (n
->bar
.cmbmsc
& 0xffffffff) | (data
<< 32);
4284 case 0xE00: /* PMRCAP */
4285 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
4286 "invalid write to PMRCAP register, ignored");
4288 case 0xE04: /* PMRCTL */
4289 n
->bar
.pmrctl
= data
;
4290 if (NVME_PMRCTL_EN(data
)) {
4291 memory_region_set_enabled(&n
->pmr
.dev
->mr
, true);
4294 memory_region_set_enabled(&n
->pmr
.dev
->mr
, false);
4295 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 1);
4296 n
->pmr
.cmse
= false;
4299 case 0xE08: /* PMRSTS */
4300 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
4301 "invalid write to PMRSTS register, ignored");
4303 case 0xE0C: /* PMREBS */
4304 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
4305 "invalid write to PMREBS register, ignored");
4307 case 0xE10: /* PMRSWTP */
4308 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
4309 "invalid write to PMRSWTP register, ignored");
4311 case 0xE14: /* PMRMSCL */
4312 if (!NVME_CAP_PMRS(n
->bar
.cap
)) {
4316 n
->bar
.pmrmsc
= (n
->bar
.pmrmsc
& ~0xffffffff) | (data
& 0xffffffff);
4317 n
->pmr
.cmse
= false;
4319 if (NVME_PMRMSC_CMSE(n
->bar
.pmrmsc
)) {
4320 hwaddr cba
= NVME_PMRMSC_CBA(n
->bar
.pmrmsc
) << PMRMSC_CBA_SHIFT
;
4321 if (cba
+ int128_get64(n
->pmr
.dev
->mr
.size
) < cba
) {
4322 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 1);
4331 case 0xE18: /* PMRMSCU */
4332 if (!NVME_CAP_PMRS(n
->bar
.cap
)) {
4336 n
->bar
.pmrmsc
= (n
->bar
.pmrmsc
& 0xffffffff) | (data
<< 32);
4339 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
4340 "invalid MMIO write,"
4341 " offset=0x%"PRIx64
", data=%"PRIx64
"",
4347 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
4349 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
4350 uint8_t *ptr
= (uint8_t *)&n
->bar
;
4353 trace_pci_nvme_mmio_read(addr
, size
);
4355 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
4356 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
4357 "MMIO read not 32-bit aligned,"
4358 " offset=0x%"PRIx64
"", addr
);
4359 /* should RAZ, fall through for now */
4360 } else if (unlikely(size
< sizeof(uint32_t))) {
4361 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
4362 "MMIO read smaller than 32-bits,"
4363 " offset=0x%"PRIx64
"", addr
);
4364 /* should RAZ, fall through for now */
4367 if (addr
< sizeof(n
->bar
)) {
4369 * When PMRWBM bit 1 is set then read from
4370 * from PMRSTS should ensure prior writes
4371 * made it to persistent media
4373 if (addr
== 0xE08 &&
4374 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
4375 memory_region_msync(&n
->pmr
.dev
->mr
, 0, n
->pmr
.dev
->size
);
4377 memcpy(&val
, ptr
+ addr
, size
);
4379 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
4380 "MMIO read beyond last register,"
4381 " offset=0x%"PRIx64
", returning 0", addr
);
4387 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
4391 if (unlikely(addr
& ((1 << 2) - 1))) {
4392 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
4393 "doorbell write not 32-bit aligned,"
4394 " offset=0x%"PRIx64
", ignoring", addr
);
4398 if (((addr
- 0x1000) >> 2) & 1) {
4399 /* Completion queue doorbell write */
4401 uint16_t new_head
= val
& 0xffff;
4405 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
4406 if (unlikely(nvme_check_cqid(n
, qid
))) {
4407 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
4408 "completion queue doorbell write"
4409 " for nonexistent queue,"
4410 " sqid=%"PRIu32
", ignoring", qid
);
4413 * NVM Express v1.3d, Section 4.1 state: "If host software writes
4414 * an invalid value to the Submission Queue Tail Doorbell or
4415 * Completion Queue Head Doorbell regiter and an Asynchronous Event
4416 * Request command is outstanding, then an asynchronous event is
4417 * posted to the Admin Completion Queue with a status code of
4418 * Invalid Doorbell Write Value."
4420 * Also note that the spec includes the "Invalid Doorbell Register"
4421 * status code, but nowhere does it specify when to use it.
4422 * However, it seems reasonable to use it here in a similar
4425 if (n
->outstanding_aers
) {
4426 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
4427 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
4428 NVME_LOG_ERROR_INFO
);
4435 if (unlikely(new_head
>= cq
->size
)) {
4436 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
4437 "completion queue doorbell write value"
4438 " beyond queue size, sqid=%"PRIu32
","
4439 " new_head=%"PRIu16
", ignoring",
4442 if (n
->outstanding_aers
) {
4443 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
4444 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
4445 NVME_LOG_ERROR_INFO
);
4451 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
4453 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
4454 cq
->head
= new_head
;
4457 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
4458 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
4460 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
4463 if (cq
->tail
== cq
->head
) {
4464 nvme_irq_deassert(n
, cq
);
4467 /* Submission queue doorbell write */
4469 uint16_t new_tail
= val
& 0xffff;
4472 qid
= (addr
- 0x1000) >> 3;
4473 if (unlikely(nvme_check_sqid(n
, qid
))) {
4474 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
4475 "submission queue doorbell write"
4476 " for nonexistent queue,"
4477 " sqid=%"PRIu32
", ignoring", qid
);
4479 if (n
->outstanding_aers
) {
4480 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
4481 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
4482 NVME_LOG_ERROR_INFO
);
4489 if (unlikely(new_tail
>= sq
->size
)) {
4490 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
4491 "submission queue doorbell write value"
4492 " beyond queue size, sqid=%"PRIu32
","
4493 " new_tail=%"PRIu16
", ignoring",
4496 if (n
->outstanding_aers
) {
4497 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
4498 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
4499 NVME_LOG_ERROR_INFO
);
4505 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
4507 sq
->tail
= new_tail
;
4508 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
4512 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
4515 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
4517 trace_pci_nvme_mmio_write(addr
, data
, size
);
4519 if (addr
< sizeof(n
->bar
)) {
4520 nvme_write_bar(n
, addr
, data
, size
);
4522 nvme_process_db(n
, addr
, data
);
4526 static const MemoryRegionOps nvme_mmio_ops
= {
4527 .read
= nvme_mmio_read
,
4528 .write
= nvme_mmio_write
,
4529 .endianness
= DEVICE_LITTLE_ENDIAN
,
4531 .min_access_size
= 2,
4532 .max_access_size
= 8,
4536 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
4539 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
4540 stn_le_p(&n
->cmb
.buf
[addr
], size
, data
);
4543 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
4545 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
4546 return ldn_le_p(&n
->cmb
.buf
[addr
], size
);
4549 static const MemoryRegionOps nvme_cmb_ops
= {
4550 .read
= nvme_cmb_read
,
4551 .write
= nvme_cmb_write
,
4552 .endianness
= DEVICE_LITTLE_ENDIAN
,
4554 .min_access_size
= 1,
4555 .max_access_size
= 8,
4559 static void nvme_check_constraints(NvmeCtrl
*n
, Error
**errp
)
4561 NvmeParams
*params
= &n
->params
;
4563 if (params
->num_queues
) {
4564 warn_report("num_queues is deprecated; please use max_ioqpairs "
4567 params
->max_ioqpairs
= params
->num_queues
- 1;
4571 warn_report("drive property is deprecated; "
4572 "please use an nvme-ns device instead");
4575 if (params
->max_ioqpairs
< 1 ||
4576 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
4577 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
4582 if (params
->msix_qsize
< 1 ||
4583 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
4584 error_setg(errp
, "msix_qsize must be between 1 and %d",
4585 PCI_MSIX_FLAGS_QSIZE
+ 1);
4589 if (!params
->serial
) {
4590 error_setg(errp
, "serial property not set");
4595 if (host_memory_backend_is_mapped(n
->pmr
.dev
)) {
4596 error_setg(errp
, "can't use already busy memdev: %s",
4597 object_get_canonical_path_component(OBJECT(n
->pmr
.dev
)));
4601 if (!is_power_of_2(n
->pmr
.dev
->size
)) {
4602 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
4606 host_memory_backend_set_mapped(n
->pmr
.dev
, true);
4609 if (n
->params
.zasl
> n
->params
.mdts
) {
4610 error_setg(errp
, "zoned.zasl (Zone Append Size Limit) must be less "
4611 "than or equal to mdts (Maximum Data Transfer Size)");
4616 static void nvme_init_state(NvmeCtrl
*n
)
4618 n
->num_namespaces
= NVME_MAX_NAMESPACES
;
4619 /* add one to max_ioqpairs to account for the admin queue pair */
4620 n
->reg_size
= pow2ceil(sizeof(NvmeBar
) +
4621 2 * (n
->params
.max_ioqpairs
+ 1) * NVME_DB_SIZE
);
4622 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
4623 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
4624 n
->temperature
= NVME_TEMPERATURE
;
4625 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
4626 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
4627 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
4630 int nvme_register_namespace(NvmeCtrl
*n
, NvmeNamespace
*ns
, Error
**errp
)
4632 uint32_t nsid
= nvme_nsid(ns
);
4634 if (nsid
> NVME_MAX_NAMESPACES
) {
4635 error_setg(errp
, "invalid namespace id (must be between 0 and %d)",
4636 NVME_MAX_NAMESPACES
);
4641 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
4642 if (!nvme_ns(n
, i
)) {
4643 nsid
= ns
->params
.nsid
= i
;
4649 error_setg(errp
, "no free namespace id");
4653 if (n
->namespaces
[nsid
- 1]) {
4654 error_setg(errp
, "namespace id '%d' is already in use", nsid
);
4659 trace_pci_nvme_register_namespace(nsid
);
4661 n
->namespaces
[nsid
- 1] = ns
;
4663 n
->dmrsl
= MIN_NON_ZERO(n
->dmrsl
,
4664 BDRV_REQUEST_MAX_BYTES
/ nvme_l2b(ns
, 1));
4669 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
4671 uint64_t cmb_size
= n
->params
.cmb_size_mb
* MiB
;
4673 n
->cmb
.buf
= g_malloc0(cmb_size
);
4674 memory_region_init_io(&n
->cmb
.mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
4675 "nvme-cmb", cmb_size
);
4676 pci_register_bar(pci_dev
, NVME_CMB_BIR
,
4677 PCI_BASE_ADDRESS_SPACE_MEMORY
|
4678 PCI_BASE_ADDRESS_MEM_TYPE_64
|
4679 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->cmb
.mem
);
4681 NVME_CAP_SET_CMBS(n
->bar
.cap
, 1);
4683 if (n
->params
.legacy_cmb
) {
4684 nvme_cmb_enable_regs(n
);
4689 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
4691 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 1);
4692 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 1);
4693 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, NVME_PMR_BIR
);
4694 /* Turn on bit 1 support */
4695 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
4696 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 1);
4698 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
4699 PCI_BASE_ADDRESS_SPACE_MEMORY
|
4700 PCI_BASE_ADDRESS_MEM_TYPE_64
|
4701 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmr
.dev
->mr
);
4703 memory_region_set_enabled(&n
->pmr
.dev
->mr
, false);
4706 static int nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
4708 uint8_t *pci_conf
= pci_dev
->config
;
4709 uint64_t bar_size
, msix_table_size
, msix_pba_size
;
4710 unsigned msix_table_offset
, msix_pba_offset
;
4715 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
4716 pci_config_set_prog_interface(pci_conf
, 0x2);
4718 if (n
->params
.use_intel_id
) {
4719 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
4720 pci_config_set_device_id(pci_conf
, 0x5845);
4722 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_REDHAT
);
4723 pci_config_set_device_id(pci_conf
, PCI_DEVICE_ID_REDHAT_NVME
);
4726 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
4727 pcie_endpoint_cap_init(pci_dev
, 0x80);
4729 bar_size
= QEMU_ALIGN_UP(n
->reg_size
, 4 * KiB
);
4730 msix_table_offset
= bar_size
;
4731 msix_table_size
= PCI_MSIX_ENTRY_SIZE
* n
->params
.msix_qsize
;
4733 bar_size
+= msix_table_size
;
4734 bar_size
= QEMU_ALIGN_UP(bar_size
, 4 * KiB
);
4735 msix_pba_offset
= bar_size
;
4736 msix_pba_size
= QEMU_ALIGN_UP(n
->params
.msix_qsize
, 64) / 8;
4738 bar_size
+= msix_pba_size
;
4739 bar_size
= pow2ceil(bar_size
);
4741 memory_region_init(&n
->bar0
, OBJECT(n
), "nvme-bar0", bar_size
);
4742 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
4744 memory_region_add_subregion(&n
->bar0
, 0, &n
->iomem
);
4746 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
4747 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->bar0
);
4748 ret
= msix_init(pci_dev
, n
->params
.msix_qsize
,
4749 &n
->bar0
, 0, msix_table_offset
,
4750 &n
->bar0
, 0, msix_pba_offset
, 0, &err
);
4752 if (ret
== -ENOTSUP
) {
4753 warn_report_err(err
);
4755 error_propagate(errp
, err
);
4760 if (n
->params
.cmb_size_mb
) {
4761 nvme_init_cmb(n
, pci_dev
);
4765 nvme_init_pmr(n
, pci_dev
);
4771 static void nvme_init_subnqn(NvmeCtrl
*n
)
4773 NvmeSubsystem
*subsys
= n
->subsys
;
4774 NvmeIdCtrl
*id
= &n
->id_ctrl
;
4777 snprintf((char *)id
->subnqn
, sizeof(id
->subnqn
),
4778 "nqn.2019-08.org.qemu:%s", n
->params
.serial
);
4780 pstrcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), (char*)subsys
->subnqn
);
4784 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
4786 NvmeIdCtrl
*id
= &n
->id_ctrl
;
4787 uint8_t *pci_conf
= pci_dev
->config
;
4789 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
4790 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
4791 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
4792 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
4793 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
4795 id
->cntlid
= cpu_to_le16(n
->cntlid
);
4799 if (n
->params
.use_intel_id
) {
4809 id
->mdts
= n
->params
.mdts
;
4810 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
4811 id
->oacs
= cpu_to_le16(0);
4812 id
->cntrltype
= 0x1;
4815 * Because the controller always completes the Abort command immediately,
4816 * there can never be more than one concurrently executing Abort command,
4817 * so this value is never used for anything. Note that there can easily be
4818 * many Abort commands in the queues, but they are not considered
4819 * "executing" until processed by nvme_abort.
4821 * The specification recommends a value of 3 for Abort Command Limit (four
4822 * concurrently outstanding Abort commands), so lets use that though it is
4826 id
->aerl
= n
->params
.aerl
;
4827 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
4828 id
->lpa
= NVME_LPA_NS_SMART
| NVME_LPA_CSE
| NVME_LPA_EXTENDED
;
4830 /* recommended default value (~70 C) */
4831 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
4832 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
4834 id
->sqes
= (0x6 << 4) | 0x6;
4835 id
->cqes
= (0x4 << 4) | 0x4;
4836 id
->nn
= cpu_to_le32(n
->num_namespaces
);
4837 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
4838 NVME_ONCS_FEATURES
| NVME_ONCS_DSM
|
4839 NVME_ONCS_COMPARE
| NVME_ONCS_COPY
);
4842 * NOTE: If this device ever supports a command set that does NOT use 0x0
4843 * as a Flush-equivalent operation, support for the broadcast NSID in Flush
4844 * should probably be removed.
4846 * See comment in nvme_io_cmd.
4848 id
->vwc
= NVME_VWC_NSID_BROADCAST_SUPPORT
| NVME_VWC_PRESENT
;
4850 id
->ocfs
= cpu_to_le16(NVME_OCFS_COPY_FORMAT_0
);
4851 id
->sgls
= cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN
|
4852 NVME_CTRL_SGLS_BITBUCKET
);
4854 nvme_init_subnqn(n
);
4856 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
4857 id
->psd
[0].enlat
= cpu_to_le32(0x10);
4858 id
->psd
[0].exlat
= cpu_to_le32(0x4);
4861 id
->cmic
|= NVME_CMIC_MULTI_CTRL
;
4864 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
4865 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
4866 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
4867 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_NVM
);
4868 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_CSI_SUPP
);
4869 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_ADMIN_ONLY
);
4870 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
4871 NVME_CAP_SET_CMBS(n
->bar
.cap
, n
->params
.cmb_size_mb
? 1 : 0);
4872 NVME_CAP_SET_PMRS(n
->bar
.cap
, n
->pmr
.dev
? 1 : 0);
4874 n
->bar
.vs
= NVME_SPEC_VER
;
4875 n
->bar
.intmc
= n
->bar
.intms
= 0;
4878 static int nvme_init_subsys(NvmeCtrl
*n
, Error
**errp
)
4886 cntlid
= nvme_subsys_register_ctrl(n
, errp
);
4896 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
4898 NvmeCtrl
*n
= NVME(pci_dev
);
4900 Error
*local_err
= NULL
;
4902 nvme_check_constraints(n
, &local_err
);
4904 error_propagate(errp
, local_err
);
4908 qbus_create_inplace(&n
->bus
, sizeof(NvmeBus
), TYPE_NVME_BUS
,
4909 &pci_dev
->qdev
, n
->parent_obj
.qdev
.id
);
4912 if (nvme_init_pci(n
, pci_dev
, errp
)) {
4916 if (nvme_init_subsys(n
, errp
)) {
4917 error_propagate(errp
, local_err
);
4920 nvme_init_ctrl(n
, pci_dev
);
4922 /* setup a namespace if the controller drive property was given */
4923 if (n
->namespace.blkconf
.blk
) {
4925 ns
->params
.nsid
= 1;
4927 if (nvme_ns_setup(ns
, errp
)) {
4931 if (nvme_register_namespace(n
, ns
, errp
)) {
4937 static void nvme_exit(PCIDevice
*pci_dev
)
4939 NvmeCtrl
*n
= NVME(pci_dev
);
4945 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
4951 nvme_ns_cleanup(ns
);
4956 g_free(n
->aer_reqs
);
4958 if (n
->params
.cmb_size_mb
) {
4963 host_memory_backend_set_mapped(n
->pmr
.dev
, false);
4965 msix_uninit_exclusive_bar(pci_dev
);
4968 static Property nvme_props
[] = {
4969 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, namespace.blkconf
),
4970 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmr
.dev
, TYPE_MEMORY_BACKEND
,
4971 HostMemoryBackend
*),
4972 DEFINE_PROP_LINK("subsys", NvmeCtrl
, subsys
, TYPE_NVME_SUBSYS
,
4974 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
4975 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
4976 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
4977 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
4978 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
4979 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
4980 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
4981 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
4982 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl
, params
.use_intel_id
, false),
4983 DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl
, params
.legacy_cmb
, false),
4984 DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl
, params
.zasl
, 0),
4985 DEFINE_PROP_END_OF_LIST(),
4988 static void nvme_get_smart_warning(Object
*obj
, Visitor
*v
, const char *name
,
4989 void *opaque
, Error
**errp
)
4991 NvmeCtrl
*n
= NVME(obj
);
4992 uint8_t value
= n
->smart_critical_warning
;
4994 visit_type_uint8(v
, name
, &value
, errp
);
4997 static void nvme_set_smart_warning(Object
*obj
, Visitor
*v
, const char *name
,
4998 void *opaque
, Error
**errp
)
5000 NvmeCtrl
*n
= NVME(obj
);
5001 uint8_t value
, old_value
, cap
= 0, index
, event
;
5003 if (!visit_type_uint8(v
, name
, &value
, errp
)) {
5007 cap
= NVME_SMART_SPARE
| NVME_SMART_TEMPERATURE
| NVME_SMART_RELIABILITY
5008 | NVME_SMART_MEDIA_READ_ONLY
| NVME_SMART_FAILED_VOLATILE_MEDIA
;
5009 if (NVME_CAP_PMRS(n
->bar
.cap
)) {
5010 cap
|= NVME_SMART_PMR_UNRELIABLE
;
5013 if ((value
& cap
) != value
) {
5014 error_setg(errp
, "unsupported smart critical warning bits: 0x%x",
5019 old_value
= n
->smart_critical_warning
;
5020 n
->smart_critical_warning
= value
;
5022 /* only inject new bits of smart critical warning */
5023 for (index
= 0; index
< NVME_SMART_WARN_MAX
; index
++) {
5025 if (value
& ~old_value
& event
)
5026 nvme_smart_event(n
, event
);
5030 static const VMStateDescription nvme_vmstate
= {
5035 static void nvme_class_init(ObjectClass
*oc
, void *data
)
5037 DeviceClass
*dc
= DEVICE_CLASS(oc
);
5038 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
5040 pc
->realize
= nvme_realize
;
5041 pc
->exit
= nvme_exit
;
5042 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
5045 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
5046 dc
->desc
= "Non-Volatile Memory Express";
5047 device_class_set_props(dc
, nvme_props
);
5048 dc
->vmsd
= &nvme_vmstate
;
5051 static void nvme_instance_init(Object
*obj
)
5053 NvmeCtrl
*n
= NVME(obj
);
5055 if (n
->namespace.blkconf
.blk
) {
5056 device_add_bootindex_property(obj
, &n
->namespace.blkconf
.bootindex
,
5057 "bootindex", "/namespace@1,0",
5061 object_property_add(obj
, "smart_critical_warning", "uint8",
5062 nvme_get_smart_warning
,
5063 nvme_set_smart_warning
, NULL
, NULL
);
5066 static const TypeInfo nvme_info
= {
5068 .parent
= TYPE_PCI_DEVICE
,
5069 .instance_size
= sizeof(NvmeCtrl
),
5070 .instance_init
= nvme_instance_init
,
5071 .class_init
= nvme_class_init
,
5072 .interfaces
= (InterfaceInfo
[]) {
5073 { INTERFACE_PCIE_DEVICE
},
5078 static const TypeInfo nvme_bus_info
= {
5079 .name
= TYPE_NVME_BUS
,
5081 .instance_size
= sizeof(NvmeBus
),
5084 static void nvme_register_types(void)
5086 type_register_static(&nvme_info
);
5087 type_register_static(&nvme_bus_info
);
5090 type_init(nvme_register_types
)