2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
17 * Notes on coding style
18 * ---------------------
19 * While QEMU coding style prefers lowercase hexadecimals in constants, the
20 * NVMe subsystem use thes format from the NVMe specifications in the comments
21 * (i.e. 'h' suffix instead of '0x' prefix).
25 * See docs/system/nvme.rst for extensive documentation.
28 * -drive file=<file>,if=none,id=<drive_id>
29 * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id>
30 * -device nvme,serial=<serial>,id=<bus_name>, \
31 * cmb_size_mb=<cmb_size_mb[optional]>, \
32 * [pmrdev=<mem_backend_file_id>,] \
33 * max_ioqpairs=<N[optional]>, \
34 * aerl=<N[optional]>,aer_max_queued=<N[optional]>, \
35 * mdts=<N[optional]>,vsl=<N[optional]>, \
36 * zoned.zasl=<N[optional]>, \
37 * zoned.auto_transition=<on|off[optional]>, \
38 * sriov_max_vfs=<N[optional]> \
39 * sriov_vq_flexible=<N[optional]> \
40 * sriov_vi_flexible=<N[optional]> \
41 * sriov_max_vi_per_vf=<N[optional]> \
42 * sriov_max_vq_per_vf=<N[optional]> \
44 * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
45 * zoned=<true|false[optional]>, \
46 * subsys=<subsys_id>,shared=<true|false[optional]>, \
47 * detached=<true|false[optional]>, \
48 * zoned.zone_size=<N[optional]>, \
49 * zoned.zone_capacity=<N[optional]>, \
50 * zoned.descr_ext_size=<N[optional]>, \
51 * zoned.max_active=<N[optional]>, \
52 * zoned.max_open=<N[optional]>, \
53 * zoned.cross_read=<true|false[optional]>
55 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
56 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the
57 * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to
58 * always enable the CMBLOC and CMBSZ registers (v1.3 behavior).
60 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
62 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
63 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
65 * The PMR will use BAR 4/5 exclusively.
67 * To place controller(s) and namespace(s) to a subsystem, then provide
68 * nvme-subsys device as above.
70 * nvme subsystem device parameters
71 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
73 * This parameter provides the `<nqn_id>` part of the string
74 * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field
75 * of subsystem controllers. Note that `<nqn_id>` should be unique per
76 * subsystem, but this is not enforced by QEMU. If not specified, it will
77 * default to the value of the `id` parameter (`<subsys_id>`).
79 * nvme device parameters
80 * ~~~~~~~~~~~~~~~~~~~~~~
82 * Specifying this parameter attaches the controller to the subsystem and
83 * the SUBNQN field in the controller will report the NQN of the subsystem
84 * device. This also enables multi controller capability represented in
85 * Identify Controller data structure in CMIC (Controller Multi-path I/O and
86 * Namespace Sharing Capabilities).
89 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
90 * of concurrently outstanding Asynchronous Event Request commands support
91 * by the controller. This is a 0's based value.
94 * This is the maximum number of events that the device will enqueue for
95 * completion when there are no outstanding AERs. When the maximum number of
96 * enqueued events are reached, subsequent events will be dropped.
99 * Indicates the maximum data transfer size for a command that transfers data
100 * between host-accessible memory and the controller. The value is specified
101 * as a power of two (2^n) and is in units of the minimum memory page size
102 * (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB).
105 * Indicates the maximum data size limit for the Verify command. Like `mdts`,
106 * this value is specified as a power of two (2^n) and is in units of the
107 * minimum memory page size (CAP.MPSMIN). The default value is 7 (i.e. 512
111 * Indicates the maximum data transfer size for the Zone Append command. Like
112 * `mdts`, the value is specified as a power of two (2^n) and is in units of
113 * the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e.
114 * defaulting to the value of `mdts`).
116 * - `zoned.auto_transition`
117 * Indicates if zones in zone state implicitly opened can be automatically
118 * transitioned to zone state closed for resource management purposes.
122 * Indicates the maximum number of PCIe virtual functions supported
123 * by the controller. The default value is 0. Specifying a non-zero value
124 * enables reporting of both SR-IOV and ARI capabilities by the NVMe device.
125 * Virtual function controllers will not report SR-IOV capability.
127 * NOTE: Single Root I/O Virtualization support is experimental.
128 * All the related parameters may be subject to change.
130 * - `sriov_vq_flexible`
131 * Indicates the total number of flexible queue resources assignable to all
132 * the secondary controllers. Implicitly sets the number of primary
133 * controller's private resources to `(max_ioqpairs - sriov_vq_flexible)`.
135 * - `sriov_vi_flexible`
136 * Indicates the total number of flexible interrupt resources assignable to
137 * all the secondary controllers. Implicitly sets the number of primary
138 * controller's private resources to `(msix_qsize - sriov_vi_flexible)`.
140 * - `sriov_max_vi_per_vf`
141 * Indicates the maximum number of virtual interrupt resources assignable
142 * to a secondary controller. The default 0 resolves to
143 * `(sriov_vi_flexible / sriov_max_vfs)`.
145 * - `sriov_max_vq_per_vf`
146 * Indicates the maximum number of virtual queue resources assignable to
147 * a secondary controller. The default 0 resolves to
148 * `(sriov_vq_flexible / sriov_max_vfs)`.
150 * nvme namespace device parameters
151 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
153 * When the parent nvme device (as defined explicitly by the 'bus' parameter
154 * or implicitly by the most recently defined NvmeBus) is linked to an
155 * nvme-subsys device, the namespace will be attached to all controllers in
156 * the subsystem. If set to 'off' (the default), the namespace will remain a
157 * private namespace and may only be attached to a single controller at a
161 * This parameter is only valid together with the `subsys` parameter. If left
162 * at the default value (`false/off`), the namespace will be attached to all
163 * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the
164 * namespace will be available in the subsystem but not attached to any
167 * Setting `zoned` to true selects Zoned Command Set at the namespace.
168 * In this case, the following namespace properties are available to configure
170 * zoned.zone_size=<zone size in bytes, default: 128MiB>
171 * The number may be followed by K, M, G as in kilo-, mega- or giga-.
173 * zoned.zone_capacity=<zone capacity in bytes, default: zone size>
174 * The value 0 (default) forces zone capacity to be the same as zone
175 * size. The value of this property may not exceed zone size.
177 * zoned.descr_ext_size=<zone descriptor extension size, default 0>
178 * This value needs to be specified in 64B units. If it is zero,
179 * namespace(s) will not support zone descriptor extensions.
181 * zoned.max_active=<Maximum Active Resources (zones), default: 0>
182 * The default value means there is no limit to the number of
183 * concurrently active zones.
185 * zoned.max_open=<Maximum Open Resources (zones), default: 0>
186 * The default value means there is no limit to the number of
187 * concurrently open zones.
189 * zoned.cross_read=<enable RAZB, default: false>
190 * Setting this property to true enables Read Across Zone Boundaries.
193 #include "qemu/osdep.h"
194 #include "qemu/cutils.h"
195 #include "qemu/error-report.h"
196 #include "qemu/log.h"
197 #include "qemu/units.h"
198 #include "qemu/range.h"
199 #include "qapi/error.h"
200 #include "qapi/visitor.h"
201 #include "sysemu/sysemu.h"
202 #include "sysemu/block-backend.h"
203 #include "sysemu/hostmem.h"
204 #include "hw/pci/msix.h"
205 #include "hw/pci/pcie_sriov.h"
206 #include "migration/vmstate.h"
212 #define NVME_MAX_IOQPAIRS 0xffff
213 #define NVME_DB_SIZE 4
214 #define NVME_SPEC_VER 0x00010400
215 #define NVME_CMB_BIR 2
216 #define NVME_PMR_BIR 4
217 #define NVME_TEMPERATURE 0x143
218 #define NVME_TEMPERATURE_WARNING 0x157
219 #define NVME_TEMPERATURE_CRITICAL 0x175
220 #define NVME_NUM_FW_SLOTS 1
221 #define NVME_DEFAULT_MAX_ZA_SIZE (128 * KiB)
222 #define NVME_MAX_VFS 127
223 #define NVME_VF_RES_GRANULARITY 1
224 #define NVME_VF_OFFSET 0x1
225 #define NVME_VF_STRIDE 1
227 #define NVME_GUEST_ERR(trace, fmt, ...) \
229 (trace_##trace)(__VA_ARGS__); \
230 qemu_log_mask(LOG_GUEST_ERROR, #trace \
231 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
234 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
235 [NVME_ARBITRATION
] = true,
236 [NVME_POWER_MANAGEMENT
] = true,
237 [NVME_TEMPERATURE_THRESHOLD
] = true,
238 [NVME_ERROR_RECOVERY
] = true,
239 [NVME_VOLATILE_WRITE_CACHE
] = true,
240 [NVME_NUMBER_OF_QUEUES
] = true,
241 [NVME_INTERRUPT_COALESCING
] = true,
242 [NVME_INTERRUPT_VECTOR_CONF
] = true,
243 [NVME_WRITE_ATOMICITY
] = true,
244 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
245 [NVME_TIMESTAMP
] = true,
246 [NVME_HOST_BEHAVIOR_SUPPORT
] = true,
247 [NVME_COMMAND_SET_PROFILE
] = true,
248 [NVME_FDP_MODE
] = true,
249 [NVME_FDP_EVENTS
] = true,
252 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
253 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
254 [NVME_ERROR_RECOVERY
] = NVME_FEAT_CAP_CHANGE
| NVME_FEAT_CAP_NS
,
255 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
256 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
257 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
258 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
259 [NVME_HOST_BEHAVIOR_SUPPORT
] = NVME_FEAT_CAP_CHANGE
,
260 [NVME_COMMAND_SET_PROFILE
] = NVME_FEAT_CAP_CHANGE
,
261 [NVME_FDP_MODE
] = NVME_FEAT_CAP_CHANGE
,
262 [NVME_FDP_EVENTS
] = NVME_FEAT_CAP_CHANGE
| NVME_FEAT_CAP_NS
,
265 static const uint32_t nvme_cse_acs
[256] = {
266 [NVME_ADM_CMD_DELETE_SQ
] = NVME_CMD_EFF_CSUPP
,
267 [NVME_ADM_CMD_CREATE_SQ
] = NVME_CMD_EFF_CSUPP
,
268 [NVME_ADM_CMD_GET_LOG_PAGE
] = NVME_CMD_EFF_CSUPP
,
269 [NVME_ADM_CMD_DELETE_CQ
] = NVME_CMD_EFF_CSUPP
,
270 [NVME_ADM_CMD_CREATE_CQ
] = NVME_CMD_EFF_CSUPP
,
271 [NVME_ADM_CMD_IDENTIFY
] = NVME_CMD_EFF_CSUPP
,
272 [NVME_ADM_CMD_ABORT
] = NVME_CMD_EFF_CSUPP
,
273 [NVME_ADM_CMD_SET_FEATURES
] = NVME_CMD_EFF_CSUPP
,
274 [NVME_ADM_CMD_GET_FEATURES
] = NVME_CMD_EFF_CSUPP
,
275 [NVME_ADM_CMD_ASYNC_EV_REQ
] = NVME_CMD_EFF_CSUPP
,
276 [NVME_ADM_CMD_NS_ATTACHMENT
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_NIC
,
277 [NVME_ADM_CMD_VIRT_MNGMT
] = NVME_CMD_EFF_CSUPP
,
278 [NVME_ADM_CMD_DBBUF_CONFIG
] = NVME_CMD_EFF_CSUPP
,
279 [NVME_ADM_CMD_FORMAT_NVM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
280 [NVME_ADM_CMD_DIRECTIVE_RECV
] = NVME_CMD_EFF_CSUPP
,
281 [NVME_ADM_CMD_DIRECTIVE_SEND
] = NVME_CMD_EFF_CSUPP
,
284 static const uint32_t nvme_cse_iocs_none
[256];
286 static const uint32_t nvme_cse_iocs_nvm
[256] = {
287 [NVME_CMD_FLUSH
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
288 [NVME_CMD_WRITE_ZEROES
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
289 [NVME_CMD_WRITE
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
290 [NVME_CMD_READ
] = NVME_CMD_EFF_CSUPP
,
291 [NVME_CMD_DSM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
292 [NVME_CMD_VERIFY
] = NVME_CMD_EFF_CSUPP
,
293 [NVME_CMD_COPY
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
294 [NVME_CMD_COMPARE
] = NVME_CMD_EFF_CSUPP
,
295 [NVME_CMD_IO_MGMT_RECV
] = NVME_CMD_EFF_CSUPP
,
296 [NVME_CMD_IO_MGMT_SEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
299 static const uint32_t nvme_cse_iocs_zoned
[256] = {
300 [NVME_CMD_FLUSH
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
301 [NVME_CMD_WRITE_ZEROES
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
302 [NVME_CMD_WRITE
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
303 [NVME_CMD_READ
] = NVME_CMD_EFF_CSUPP
,
304 [NVME_CMD_DSM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
305 [NVME_CMD_VERIFY
] = NVME_CMD_EFF_CSUPP
,
306 [NVME_CMD_COPY
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
307 [NVME_CMD_COMPARE
] = NVME_CMD_EFF_CSUPP
,
308 [NVME_CMD_ZONE_APPEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
309 [NVME_CMD_ZONE_MGMT_SEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
310 [NVME_CMD_ZONE_MGMT_RECV
] = NVME_CMD_EFF_CSUPP
,
313 static void nvme_process_sq(void *opaque
);
314 static void nvme_ctrl_reset(NvmeCtrl
*n
, NvmeResetType rst
);
315 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
);
317 static uint16_t nvme_sqid(NvmeRequest
*req
)
319 return le16_to_cpu(req
->sq
->sqid
);
322 static inline uint16_t nvme_make_pid(NvmeNamespace
*ns
, uint16_t rg
,
325 uint16_t rgif
= ns
->endgrp
->fdp
.rgif
;
331 return (rg
<< (16 - rgif
)) | ph
;
334 static inline bool nvme_ph_valid(NvmeNamespace
*ns
, uint16_t ph
)
336 return ph
< ns
->fdp
.nphs
;
339 static inline bool nvme_rg_valid(NvmeEnduranceGroup
*endgrp
, uint16_t rg
)
341 return rg
< endgrp
->fdp
.nrg
;
344 static inline uint16_t nvme_pid2ph(NvmeNamespace
*ns
, uint16_t pid
)
346 uint16_t rgif
= ns
->endgrp
->fdp
.rgif
;
352 return pid
& ((1 << (15 - rgif
)) - 1);
355 static inline uint16_t nvme_pid2rg(NvmeNamespace
*ns
, uint16_t pid
)
357 uint16_t rgif
= ns
->endgrp
->fdp
.rgif
;
363 return pid
>> (16 - rgif
);
366 static inline bool nvme_parse_pid(NvmeNamespace
*ns
, uint16_t pid
,
367 uint16_t *ph
, uint16_t *rg
)
369 *rg
= nvme_pid2rg(ns
, pid
);
370 *ph
= nvme_pid2ph(ns
, pid
);
372 return nvme_ph_valid(ns
, *ph
) && nvme_rg_valid(ns
->endgrp
, *rg
);
375 static void nvme_assign_zone_state(NvmeNamespace
*ns
, NvmeZone
*zone
,
378 if (QTAILQ_IN_USE(zone
, entry
)) {
379 switch (nvme_get_zone_state(zone
)) {
380 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
381 QTAILQ_REMOVE(&ns
->exp_open_zones
, zone
, entry
);
383 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
384 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
386 case NVME_ZONE_STATE_CLOSED
:
387 QTAILQ_REMOVE(&ns
->closed_zones
, zone
, entry
);
389 case NVME_ZONE_STATE_FULL
:
390 QTAILQ_REMOVE(&ns
->full_zones
, zone
, entry
);
396 nvme_set_zone_state(zone
, state
);
399 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
400 QTAILQ_INSERT_TAIL(&ns
->exp_open_zones
, zone
, entry
);
402 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
403 QTAILQ_INSERT_TAIL(&ns
->imp_open_zones
, zone
, entry
);
405 case NVME_ZONE_STATE_CLOSED
:
406 QTAILQ_INSERT_TAIL(&ns
->closed_zones
, zone
, entry
);
408 case NVME_ZONE_STATE_FULL
:
409 QTAILQ_INSERT_TAIL(&ns
->full_zones
, zone
, entry
);
410 case NVME_ZONE_STATE_READ_ONLY
:
417 static uint16_t nvme_zns_check_resources(NvmeNamespace
*ns
, uint32_t act
,
418 uint32_t opn
, uint32_t zrwa
)
420 if (ns
->params
.max_active_zones
!= 0 &&
421 ns
->nr_active_zones
+ act
> ns
->params
.max_active_zones
) {
422 trace_pci_nvme_err_insuff_active_res(ns
->params
.max_active_zones
);
423 return NVME_ZONE_TOO_MANY_ACTIVE
| NVME_DNR
;
426 if (ns
->params
.max_open_zones
!= 0 &&
427 ns
->nr_open_zones
+ opn
> ns
->params
.max_open_zones
) {
428 trace_pci_nvme_err_insuff_open_res(ns
->params
.max_open_zones
);
429 return NVME_ZONE_TOO_MANY_OPEN
| NVME_DNR
;
432 if (zrwa
> ns
->zns
.numzrwa
) {
433 return NVME_NOZRWA
| NVME_DNR
;
440 * Check if we can open a zone without exceeding open/active limits.
441 * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5).
443 static uint16_t nvme_aor_check(NvmeNamespace
*ns
, uint32_t act
, uint32_t opn
)
445 return nvme_zns_check_resources(ns
, act
, opn
, 0);
448 static NvmeFdpEvent
*nvme_fdp_alloc_event(NvmeCtrl
*n
, NvmeFdpEventBuffer
*ebuf
)
450 NvmeFdpEvent
*ret
= NULL
;
451 bool is_full
= ebuf
->next
== ebuf
->start
&& ebuf
->nelems
;
453 ret
= &ebuf
->events
[ebuf
->next
++];
454 if (unlikely(ebuf
->next
== NVME_FDP_MAX_EVENTS
)) {
458 ebuf
->start
= ebuf
->next
;
463 memset(ret
, 0, sizeof(NvmeFdpEvent
));
464 ret
->timestamp
= nvme_get_timestamp(n
);
469 static inline int log_event(NvmeRuHandle
*ruh
, uint8_t event_type
)
471 return (ruh
->event_filter
>> nvme_fdp_evf_shifts
[event_type
]) & 0x1;
474 static bool nvme_update_ruh(NvmeCtrl
*n
, NvmeNamespace
*ns
, uint16_t pid
)
476 NvmeEnduranceGroup
*endgrp
= ns
->endgrp
;
479 NvmeFdpEvent
*e
= NULL
;
480 uint16_t ph
, rg
, ruhid
;
482 if (!nvme_parse_pid(ns
, pid
, &ph
, &rg
)) {
486 ruhid
= ns
->fdp
.phs
[ph
];
488 ruh
= &endgrp
->fdp
.ruhs
[ruhid
];
492 if (log_event(ruh
, FDP_EVT_RU_NOT_FULLY_WRITTEN
)) {
493 e
= nvme_fdp_alloc_event(n
, &endgrp
->fdp
.host_events
);
494 e
->type
= FDP_EVT_RU_NOT_FULLY_WRITTEN
;
495 e
->flags
= FDPEF_PIV
| FDPEF_NSIDV
| FDPEF_LV
;
496 e
->pid
= cpu_to_le16(pid
);
497 e
->nsid
= cpu_to_le32(ns
->params
.nsid
);
498 e
->rgid
= cpu_to_le16(rg
);
499 e
->ruhid
= cpu_to_le16(ruhid
);
502 /* log (eventual) GC overhead of prematurely swapping the RU */
503 nvme_fdp_stat_inc(&endgrp
->fdp
.mbmw
, nvme_l2b(ns
, ru
->ruamw
));
506 ru
->ruamw
= ruh
->ruamw
;
511 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
519 lo
= n
->params
.legacy_cmb
? n
->cmb
.mem
.addr
: n
->cmb
.cba
;
520 hi
= lo
+ int128_get64(n
->cmb
.mem
.size
);
522 return addr
>= lo
&& addr
< hi
;
525 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
527 hwaddr base
= n
->params
.legacy_cmb
? n
->cmb
.mem
.addr
: n
->cmb
.cba
;
528 return &n
->cmb
.buf
[addr
- base
];
531 static bool nvme_addr_is_pmr(NvmeCtrl
*n
, hwaddr addr
)
539 hi
= n
->pmr
.cba
+ int128_get64(n
->pmr
.dev
->mr
.size
);
541 return addr
>= n
->pmr
.cba
&& addr
< hi
;
544 static inline void *nvme_addr_to_pmr(NvmeCtrl
*n
, hwaddr addr
)
546 return memory_region_get_ram_ptr(&n
->pmr
.dev
->mr
) + (addr
- n
->pmr
.cba
);
549 static inline bool nvme_addr_is_iomem(NvmeCtrl
*n
, hwaddr addr
)
554 * The purpose of this check is to guard against invalid "local" access to
555 * the iomem (i.e. controller registers). Thus, we check against the range
556 * covered by the 'bar0' MemoryRegion since that is currently composed of
557 * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however,
558 * that if the device model is ever changed to allow the CMB to be located
559 * in BAR0 as well, then this must be changed.
562 hi
= lo
+ int128_get64(n
->bar0
.size
);
564 return addr
>= lo
&& addr
< hi
;
567 static int nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
569 hwaddr hi
= addr
+ size
- 1;
574 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
575 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
579 if (nvme_addr_is_pmr(n
, addr
) && nvme_addr_is_pmr(n
, hi
)) {
580 memcpy(buf
, nvme_addr_to_pmr(n
, addr
), size
);
584 return pci_dma_read(PCI_DEVICE(n
), addr
, buf
, size
);
587 static int nvme_addr_write(NvmeCtrl
*n
, hwaddr addr
, const void *buf
, int size
)
589 hwaddr hi
= addr
+ size
- 1;
594 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
595 memcpy(nvme_addr_to_cmb(n
, addr
), buf
, size
);
599 if (nvme_addr_is_pmr(n
, addr
) && nvme_addr_is_pmr(n
, hi
)) {
600 memcpy(nvme_addr_to_pmr(n
, addr
), buf
, size
);
604 return pci_dma_write(PCI_DEVICE(n
), addr
, buf
, size
);
607 static bool nvme_nsid_valid(NvmeCtrl
*n
, uint32_t nsid
)
610 (nsid
== NVME_NSID_BROADCAST
|| nsid
<= NVME_MAX_NAMESPACES
);
613 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
615 return sqid
< n
->conf_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
618 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
620 return cqid
< n
->conf_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
623 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
626 if (cq
->tail
>= cq
->size
) {
628 cq
->phase
= !cq
->phase
;
632 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
634 sq
->head
= (sq
->head
+ 1) % sq
->size
;
637 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
639 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
642 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
644 return sq
->head
== sq
->tail
;
647 static void nvme_irq_check(NvmeCtrl
*n
)
649 PCIDevice
*pci
= PCI_DEVICE(n
);
650 uint32_t intms
= ldl_le_p(&n
->bar
.intms
);
652 if (msix_enabled(pci
)) {
655 if (~intms
& n
->irq_status
) {
658 pci_irq_deassert(pci
);
662 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
664 PCIDevice
*pci
= PCI_DEVICE(n
);
666 if (cq
->irq_enabled
) {
667 if (msix_enabled(pci
)) {
668 trace_pci_nvme_irq_msix(cq
->vector
);
669 msix_notify(pci
, cq
->vector
);
671 trace_pci_nvme_irq_pin();
672 assert(cq
->vector
< 32);
673 n
->irq_status
|= 1 << cq
->vector
;
677 trace_pci_nvme_irq_masked();
681 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
683 if (cq
->irq_enabled
) {
684 if (msix_enabled(PCI_DEVICE(n
))) {
687 assert(cq
->vector
< 32);
688 if (!n
->cq_pending
) {
689 n
->irq_status
&= ~(1 << cq
->vector
);
696 static void nvme_req_clear(NvmeRequest
*req
)
701 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
702 req
->status
= NVME_SUCCESS
;
705 static inline void nvme_sg_init(NvmeCtrl
*n
, NvmeSg
*sg
, bool dma
)
708 pci_dma_sglist_init(&sg
->qsg
, PCI_DEVICE(n
), 0);
709 sg
->flags
= NVME_SG_DMA
;
711 qemu_iovec_init(&sg
->iov
, 0);
714 sg
->flags
|= NVME_SG_ALLOC
;
717 static inline void nvme_sg_unmap(NvmeSg
*sg
)
719 if (!(sg
->flags
& NVME_SG_ALLOC
)) {
723 if (sg
->flags
& NVME_SG_DMA
) {
724 qemu_sglist_destroy(&sg
->qsg
);
726 qemu_iovec_destroy(&sg
->iov
);
729 memset(sg
, 0x0, sizeof(*sg
));
733 * When metadata is transfered as extended LBAs, the DPTR mapped into `sg`
734 * holds both data and metadata. This function splits the data and metadata
735 * into two separate QSG/IOVs.
737 static void nvme_sg_split(NvmeSg
*sg
, NvmeNamespace
*ns
, NvmeSg
*data
,
741 uint32_t trans_len
, count
= ns
->lbasz
;
743 bool dma
= sg
->flags
& NVME_SG_DMA
;
745 size_t sg_len
= dma
? sg
->qsg
.size
: sg
->iov
.size
;
748 assert(sg
->flags
& NVME_SG_ALLOC
);
751 sge_len
= dma
? sg
->qsg
.sg
[sg_idx
].len
: sg
->iov
.iov
[sg_idx
].iov_len
;
753 trans_len
= MIN(sg_len
, count
);
754 trans_len
= MIN(trans_len
, sge_len
- offset
);
758 qemu_sglist_add(&dst
->qsg
, sg
->qsg
.sg
[sg_idx
].base
+ offset
,
761 qemu_iovec_add(&dst
->iov
,
762 sg
->iov
.iov
[sg_idx
].iov_base
+ offset
,
772 dst
= (dst
== data
) ? mdata
: data
;
773 count
= (dst
== data
) ? ns
->lbasz
: ns
->lbaf
.ms
;
776 if (sge_len
== offset
) {
783 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
790 trace_pci_nvme_map_addr_cmb(addr
, len
);
792 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
793 return NVME_DATA_TRAS_ERROR
;
796 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
801 static uint16_t nvme_map_addr_pmr(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
808 if (!nvme_addr_is_pmr(n
, addr
) || !nvme_addr_is_pmr(n
, addr
+ len
- 1)) {
809 return NVME_DATA_TRAS_ERROR
;
812 qemu_iovec_add(iov
, nvme_addr_to_pmr(n
, addr
), len
);
817 static uint16_t nvme_map_addr(NvmeCtrl
*n
, NvmeSg
*sg
, hwaddr addr
, size_t len
)
819 bool cmb
= false, pmr
= false;
825 trace_pci_nvme_map_addr(addr
, len
);
827 if (nvme_addr_is_iomem(n
, addr
)) {
828 return NVME_DATA_TRAS_ERROR
;
831 if (nvme_addr_is_cmb(n
, addr
)) {
833 } else if (nvme_addr_is_pmr(n
, addr
)) {
838 if (sg
->flags
& NVME_SG_DMA
) {
839 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
842 if (sg
->iov
.niov
+ 1 > IOV_MAX
) {
843 goto max_mappings_exceeded
;
847 return nvme_map_addr_cmb(n
, &sg
->iov
, addr
, len
);
849 return nvme_map_addr_pmr(n
, &sg
->iov
, addr
, len
);
853 if (!(sg
->flags
& NVME_SG_DMA
)) {
854 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
857 if (sg
->qsg
.nsg
+ 1 > IOV_MAX
) {
858 goto max_mappings_exceeded
;
861 qemu_sglist_add(&sg
->qsg
, addr
, len
);
865 max_mappings_exceeded
:
866 NVME_GUEST_ERR(pci_nvme_ub_too_many_mappings
,
867 "number of mappings exceed 1024");
868 return NVME_INTERNAL_DEV_ERROR
| NVME_DNR
;
871 static inline bool nvme_addr_is_dma(NvmeCtrl
*n
, hwaddr addr
)
873 return !(nvme_addr_is_cmb(n
, addr
) || nvme_addr_is_pmr(n
, addr
));
876 static uint16_t nvme_map_prp(NvmeCtrl
*n
, NvmeSg
*sg
, uint64_t prp1
,
877 uint64_t prp2
, uint32_t len
)
879 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
880 trans_len
= MIN(len
, trans_len
);
881 int num_prps
= (len
>> n
->page_bits
) + 1;
885 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
887 nvme_sg_init(n
, sg
, nvme_addr_is_dma(n
, prp1
));
889 status
= nvme_map_addr(n
, sg
, prp1
, trans_len
);
896 if (len
> n
->page_size
) {
897 uint64_t prp_list
[n
->max_prp_ents
];
898 uint32_t nents
, prp_trans
;
902 * The first PRP list entry, pointed to by PRP2 may contain offset.
903 * Hence, we need to calculate the number of entries in based on
906 nents
= (n
->page_size
- (prp2
& (n
->page_size
- 1))) >> 3;
907 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
908 ret
= nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
910 trace_pci_nvme_err_addr_read(prp2
);
911 status
= NVME_DATA_TRAS_ERROR
;
915 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
917 if (i
== nents
- 1 && len
> n
->page_size
) {
918 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
919 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
920 status
= NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
925 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
926 nents
= MIN(nents
, n
->max_prp_ents
);
927 prp_trans
= nents
* sizeof(uint64_t);
928 ret
= nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
931 trace_pci_nvme_err_addr_read(prp_ent
);
932 status
= NVME_DATA_TRAS_ERROR
;
935 prp_ent
= le64_to_cpu(prp_list
[i
]);
938 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
939 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
940 status
= NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
944 trans_len
= MIN(len
, n
->page_size
);
945 status
= nvme_map_addr(n
, sg
, prp_ent
, trans_len
);
954 if (unlikely(prp2
& (n
->page_size
- 1))) {
955 trace_pci_nvme_err_invalid_prp2_align(prp2
);
956 status
= NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
959 status
= nvme_map_addr(n
, sg
, prp2
, len
);
974 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
975 * number of bytes mapped in len.
977 static uint16_t nvme_map_sgl_data(NvmeCtrl
*n
, NvmeSg
*sg
,
978 NvmeSglDescriptor
*segment
, uint64_t nsgld
,
979 size_t *len
, NvmeCmd
*cmd
)
981 dma_addr_t addr
, trans_len
;
985 for (int i
= 0; i
< nsgld
; i
++) {
986 uint8_t type
= NVME_SGL_TYPE(segment
[i
].type
);
989 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
991 case NVME_SGL_DESCR_TYPE_SEGMENT
:
992 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
993 return NVME_INVALID_NUM_SGL_DESCRS
| NVME_DNR
;
995 return NVME_SGL_DESCR_TYPE_INVALID
| NVME_DNR
;
998 dlen
= le32_to_cpu(segment
[i
].len
);
1006 * All data has been mapped, but the SGL contains additional
1007 * segments and/or descriptors. The controller might accept
1008 * ignoring the rest of the SGL.
1010 uint32_t sgls
= le32_to_cpu(n
->id_ctrl
.sgls
);
1011 if (sgls
& NVME_CTRL_SGLS_EXCESS_LENGTH
) {
1015 trace_pci_nvme_err_invalid_sgl_excess_length(dlen
);
1016 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
1019 trans_len
= MIN(*len
, dlen
);
1021 addr
= le64_to_cpu(segment
[i
].addr
);
1023 if (UINT64_MAX
- addr
< dlen
) {
1024 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
1027 status
= nvme_map_addr(n
, sg
, addr
, trans_len
);
1035 return NVME_SUCCESS
;
1038 static uint16_t nvme_map_sgl(NvmeCtrl
*n
, NvmeSg
*sg
, NvmeSglDescriptor sgl
,
1039 size_t len
, NvmeCmd
*cmd
)
1042 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
1043 * dynamically allocating a potentially huge SGL. The spec allows the SGL
1044 * to be larger (as in number of bytes required to describe the SGL
1045 * descriptors and segment chain) than the command transfer size, so it is
1046 * not bounded by MDTS.
1048 const int SEG_CHUNK_SIZE
= 256;
1050 NvmeSglDescriptor segment
[SEG_CHUNK_SIZE
], *sgld
, *last_sgld
;
1058 addr
= le64_to_cpu(sgl
.addr
);
1060 trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl
.type
), len
);
1062 nvme_sg_init(n
, sg
, nvme_addr_is_dma(n
, addr
));
1065 * If the entire transfer can be described with a single data block it can
1066 * be mapped directly.
1068 if (NVME_SGL_TYPE(sgl
.type
) == NVME_SGL_DESCR_TYPE_DATA_BLOCK
) {
1069 status
= nvme_map_sgl_data(n
, sg
, sgld
, 1, &len
, cmd
);
1078 switch (NVME_SGL_TYPE(sgld
->type
)) {
1079 case NVME_SGL_DESCR_TYPE_SEGMENT
:
1080 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
1083 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
1086 seg_len
= le32_to_cpu(sgld
->len
);
1088 /* check the length of the (Last) Segment descriptor */
1089 if (!seg_len
|| seg_len
& 0xf) {
1090 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
1093 if (UINT64_MAX
- addr
< seg_len
) {
1094 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
1097 nsgld
= seg_len
/ sizeof(NvmeSglDescriptor
);
1099 while (nsgld
> SEG_CHUNK_SIZE
) {
1100 if (nvme_addr_read(n
, addr
, segment
, sizeof(segment
))) {
1101 trace_pci_nvme_err_addr_read(addr
);
1102 status
= NVME_DATA_TRAS_ERROR
;
1106 status
= nvme_map_sgl_data(n
, sg
, segment
, SEG_CHUNK_SIZE
,
1112 nsgld
-= SEG_CHUNK_SIZE
;
1113 addr
+= SEG_CHUNK_SIZE
* sizeof(NvmeSglDescriptor
);
1116 ret
= nvme_addr_read(n
, addr
, segment
, nsgld
*
1117 sizeof(NvmeSglDescriptor
));
1119 trace_pci_nvme_err_addr_read(addr
);
1120 status
= NVME_DATA_TRAS_ERROR
;
1124 last_sgld
= &segment
[nsgld
- 1];
1127 * If the segment ends with a Data Block, then we are done.
1129 if (NVME_SGL_TYPE(last_sgld
->type
) == NVME_SGL_DESCR_TYPE_DATA_BLOCK
) {
1130 status
= nvme_map_sgl_data(n
, sg
, segment
, nsgld
, &len
, cmd
);
1139 * If the last descriptor was not a Data Block, then the current
1140 * segment must not be a Last Segment.
1142 if (NVME_SGL_TYPE(sgld
->type
) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT
) {
1143 status
= NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
1148 addr
= le64_to_cpu(sgld
->addr
);
1151 * Do not map the last descriptor; it will be a Segment or Last Segment
1152 * descriptor and is handled by the next iteration.
1154 status
= nvme_map_sgl_data(n
, sg
, segment
, nsgld
- 1, &len
, cmd
);
1161 /* if there is any residual left in len, the SGL was too short */
1163 status
= NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
1167 return NVME_SUCCESS
;
1174 uint16_t nvme_map_dptr(NvmeCtrl
*n
, NvmeSg
*sg
, size_t len
,
1177 uint64_t prp1
, prp2
;
1179 switch (NVME_CMD_FLAGS_PSDT(cmd
->flags
)) {
1181 prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
1182 prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
1184 return nvme_map_prp(n
, sg
, prp1
, prp2
, len
);
1185 case NVME_PSDT_SGL_MPTR_CONTIGUOUS
:
1186 case NVME_PSDT_SGL_MPTR_SGL
:
1187 return nvme_map_sgl(n
, sg
, cmd
->dptr
.sgl
, len
, cmd
);
1189 return NVME_INVALID_FIELD
;
1193 static uint16_t nvme_map_mptr(NvmeCtrl
*n
, NvmeSg
*sg
, size_t len
,
1196 int psdt
= NVME_CMD_FLAGS_PSDT(cmd
->flags
);
1197 hwaddr mptr
= le64_to_cpu(cmd
->mptr
);
1200 if (psdt
== NVME_PSDT_SGL_MPTR_SGL
) {
1201 NvmeSglDescriptor sgl
;
1203 if (nvme_addr_read(n
, mptr
, &sgl
, sizeof(sgl
))) {
1204 return NVME_DATA_TRAS_ERROR
;
1207 status
= nvme_map_sgl(n
, sg
, sgl
, len
, cmd
);
1208 if (status
&& (status
& 0x7ff) == NVME_DATA_SGL_LEN_INVALID
) {
1209 status
= NVME_MD_SGL_LEN_INVALID
| NVME_DNR
;
1215 nvme_sg_init(n
, sg
, nvme_addr_is_dma(n
, mptr
));
1216 status
= nvme_map_addr(n
, sg
, mptr
, len
);
1224 static uint16_t nvme_map_data(NvmeCtrl
*n
, uint32_t nlb
, NvmeRequest
*req
)
1226 NvmeNamespace
*ns
= req
->ns
;
1227 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1228 bool pi
= !!NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
);
1229 bool pract
= !!(le16_to_cpu(rw
->control
) & NVME_RW_PRINFO_PRACT
);
1230 size_t len
= nvme_l2b(ns
, nlb
);
1233 if (nvme_ns_ext(ns
) &&
1234 !(pi
&& pract
&& ns
->lbaf
.ms
== nvme_pi_tuple_size(ns
))) {
1237 len
+= nvme_m2b(ns
, nlb
);
1239 status
= nvme_map_dptr(n
, &sg
, len
, &req
->cmd
);
1244 nvme_sg_init(n
, &req
->sg
, sg
.flags
& NVME_SG_DMA
);
1245 nvme_sg_split(&sg
, ns
, &req
->sg
, NULL
);
1248 return NVME_SUCCESS
;
1251 return nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
1254 static uint16_t nvme_map_mdata(NvmeCtrl
*n
, uint32_t nlb
, NvmeRequest
*req
)
1256 NvmeNamespace
*ns
= req
->ns
;
1257 size_t len
= nvme_m2b(ns
, nlb
);
1260 if (nvme_ns_ext(ns
)) {
1263 len
+= nvme_l2b(ns
, nlb
);
1265 status
= nvme_map_dptr(n
, &sg
, len
, &req
->cmd
);
1270 nvme_sg_init(n
, &req
->sg
, sg
.flags
& NVME_SG_DMA
);
1271 nvme_sg_split(&sg
, ns
, NULL
, &req
->sg
);
1274 return NVME_SUCCESS
;
1277 return nvme_map_mptr(n
, &req
->sg
, len
, &req
->cmd
);
1280 static uint16_t nvme_tx_interleaved(NvmeCtrl
*n
, NvmeSg
*sg
, uint8_t *ptr
,
1281 uint32_t len
, uint32_t bytes
,
1282 int32_t skip_bytes
, int64_t offset
,
1283 NvmeTxDirection dir
)
1286 uint32_t trans_len
, count
= bytes
;
1287 bool dma
= sg
->flags
& NVME_SG_DMA
;
1292 assert(sg
->flags
& NVME_SG_ALLOC
);
1295 sge_len
= dma
? sg
->qsg
.sg
[sg_idx
].len
: sg
->iov
.iov
[sg_idx
].iov_len
;
1297 if (sge_len
- offset
< 0) {
1303 if (sge_len
== offset
) {
1309 trans_len
= MIN(len
, count
);
1310 trans_len
= MIN(trans_len
, sge_len
- offset
);
1313 addr
= sg
->qsg
.sg
[sg_idx
].base
+ offset
;
1315 addr
= (hwaddr
)(uintptr_t)sg
->iov
.iov
[sg_idx
].iov_base
+ offset
;
1318 if (dir
== NVME_TX_DIRECTION_TO_DEVICE
) {
1319 ret
= nvme_addr_read(n
, addr
, ptr
, trans_len
);
1321 ret
= nvme_addr_write(n
, addr
, ptr
, trans_len
);
1325 return NVME_DATA_TRAS_ERROR
;
1331 offset
+= trans_len
;
1335 offset
+= skip_bytes
;
1339 return NVME_SUCCESS
;
1342 static uint16_t nvme_tx(NvmeCtrl
*n
, NvmeSg
*sg
, void *ptr
, uint32_t len
,
1343 NvmeTxDirection dir
)
1345 assert(sg
->flags
& NVME_SG_ALLOC
);
1347 if (sg
->flags
& NVME_SG_DMA
) {
1348 const MemTxAttrs attrs
= MEMTXATTRS_UNSPECIFIED
;
1349 dma_addr_t residual
;
1351 if (dir
== NVME_TX_DIRECTION_TO_DEVICE
) {
1352 dma_buf_write(ptr
, len
, &residual
, &sg
->qsg
, attrs
);
1354 dma_buf_read(ptr
, len
, &residual
, &sg
->qsg
, attrs
);
1357 if (unlikely(residual
)) {
1358 trace_pci_nvme_err_invalid_dma();
1359 return NVME_INVALID_FIELD
| NVME_DNR
;
1364 if (dir
== NVME_TX_DIRECTION_TO_DEVICE
) {
1365 bytes
= qemu_iovec_to_buf(&sg
->iov
, 0, ptr
, len
);
1367 bytes
= qemu_iovec_from_buf(&sg
->iov
, 0, ptr
, len
);
1370 if (unlikely(bytes
!= len
)) {
1371 trace_pci_nvme_err_invalid_dma();
1372 return NVME_INVALID_FIELD
| NVME_DNR
;
1376 return NVME_SUCCESS
;
1379 static inline uint16_t nvme_c2h(NvmeCtrl
*n
, void *ptr
, uint32_t len
,
1384 status
= nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
1389 return nvme_tx(n
, &req
->sg
, ptr
, len
, NVME_TX_DIRECTION_FROM_DEVICE
);
1392 static inline uint16_t nvme_h2c(NvmeCtrl
*n
, void *ptr
, uint32_t len
,
1397 status
= nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
1402 return nvme_tx(n
, &req
->sg
, ptr
, len
, NVME_TX_DIRECTION_TO_DEVICE
);
1405 uint16_t nvme_bounce_data(NvmeCtrl
*n
, void *ptr
, uint32_t len
,
1406 NvmeTxDirection dir
, NvmeRequest
*req
)
1408 NvmeNamespace
*ns
= req
->ns
;
1409 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1410 bool pi
= !!NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
);
1411 bool pract
= !!(le16_to_cpu(rw
->control
) & NVME_RW_PRINFO_PRACT
);
1413 if (nvme_ns_ext(ns
) &&
1414 !(pi
&& pract
&& ns
->lbaf
.ms
== nvme_pi_tuple_size(ns
))) {
1415 return nvme_tx_interleaved(n
, &req
->sg
, ptr
, len
, ns
->lbasz
,
1416 ns
->lbaf
.ms
, 0, dir
);
1419 return nvme_tx(n
, &req
->sg
, ptr
, len
, dir
);
1422 uint16_t nvme_bounce_mdata(NvmeCtrl
*n
, void *ptr
, uint32_t len
,
1423 NvmeTxDirection dir
, NvmeRequest
*req
)
1425 NvmeNamespace
*ns
= req
->ns
;
1428 if (nvme_ns_ext(ns
)) {
1429 return nvme_tx_interleaved(n
, &req
->sg
, ptr
, len
, ns
->lbaf
.ms
,
1430 ns
->lbasz
, ns
->lbasz
, dir
);
1433 nvme_sg_unmap(&req
->sg
);
1435 status
= nvme_map_mptr(n
, &req
->sg
, len
, &req
->cmd
);
1440 return nvme_tx(n
, &req
->sg
, ptr
, len
, dir
);
1443 static inline void nvme_blk_read(BlockBackend
*blk
, int64_t offset
,
1444 uint32_t align
, BlockCompletionFunc
*cb
,
1447 assert(req
->sg
.flags
& NVME_SG_ALLOC
);
1449 if (req
->sg
.flags
& NVME_SG_DMA
) {
1450 req
->aiocb
= dma_blk_read(blk
, &req
->sg
.qsg
, offset
, align
, cb
, req
);
1452 req
->aiocb
= blk_aio_preadv(blk
, offset
, &req
->sg
.iov
, 0, cb
, req
);
1456 static inline void nvme_blk_write(BlockBackend
*blk
, int64_t offset
,
1457 uint32_t align
, BlockCompletionFunc
*cb
,
1460 assert(req
->sg
.flags
& NVME_SG_ALLOC
);
1462 if (req
->sg
.flags
& NVME_SG_DMA
) {
1463 req
->aiocb
= dma_blk_write(blk
, &req
->sg
.qsg
, offset
, align
, cb
, req
);
1465 req
->aiocb
= blk_aio_pwritev(blk
, offset
, &req
->sg
.iov
, 0, cb
, req
);
1469 static void nvme_update_cq_eventidx(const NvmeCQueue
*cq
)
1471 uint32_t v
= cpu_to_le32(cq
->head
);
1473 trace_pci_nvme_update_cq_eventidx(cq
->cqid
, cq
->head
);
1475 pci_dma_write(PCI_DEVICE(cq
->ctrl
), cq
->ei_addr
, &v
, sizeof(v
));
1478 static void nvme_update_cq_head(NvmeCQueue
*cq
)
1482 pci_dma_read(PCI_DEVICE(cq
->ctrl
), cq
->db_addr
, &v
, sizeof(v
));
1484 cq
->head
= le32_to_cpu(v
);
1486 trace_pci_nvme_update_cq_head(cq
->cqid
, cq
->head
);
1489 static void nvme_post_cqes(void *opaque
)
1491 NvmeCQueue
*cq
= opaque
;
1492 NvmeCtrl
*n
= cq
->ctrl
;
1493 NvmeRequest
*req
, *next
;
1494 bool pending
= cq
->head
!= cq
->tail
;
1497 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
1501 if (n
->dbbuf_enabled
) {
1502 nvme_update_cq_eventidx(cq
);
1503 nvme_update_cq_head(cq
);
1506 if (nvme_cq_full(cq
)) {
1511 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
1512 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
1513 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
1514 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
1515 ret
= pci_dma_write(PCI_DEVICE(n
), addr
, (void *)&req
->cqe
,
1518 trace_pci_nvme_err_addr_write(addr
);
1519 trace_pci_nvme_err_cfs();
1520 stl_le_p(&n
->bar
.csts
, NVME_CSTS_FAILED
);
1523 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
1524 nvme_inc_cq_tail(cq
);
1525 nvme_sg_unmap(&req
->sg
);
1526 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
1528 if (cq
->tail
!= cq
->head
) {
1529 if (cq
->irq_enabled
&& !pending
) {
1533 nvme_irq_assert(n
, cq
);
1537 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
1539 assert(cq
->cqid
== req
->sq
->cqid
);
1540 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
1541 le32_to_cpu(req
->cqe
.result
),
1542 le32_to_cpu(req
->cqe
.dw1
),
1546 trace_pci_nvme_err_req_status(nvme_cid(req
), nvme_nsid(req
->ns
),
1547 req
->status
, req
->cmd
.opcode
);
1550 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
1551 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
1553 qemu_bh_schedule(cq
->bh
);
1556 static void nvme_process_aers(void *opaque
)
1558 NvmeCtrl
*n
= opaque
;
1559 NvmeAsyncEvent
*event
, *next
;
1561 trace_pci_nvme_process_aers(n
->aer_queued
);
1563 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
1565 NvmeAerResult
*result
;
1567 /* can't post cqe if there is nothing to complete */
1568 if (!n
->outstanding_aers
) {
1569 trace_pci_nvme_no_outstanding_aers();
1573 /* ignore if masked (cqe posted, but event not cleared) */
1574 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
1575 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
1579 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
1582 n
->aer_mask
|= 1 << event
->result
.event_type
;
1583 n
->outstanding_aers
--;
1585 req
= n
->aer_reqs
[n
->outstanding_aers
];
1587 result
= (NvmeAerResult
*) &req
->cqe
.result
;
1588 result
->event_type
= event
->result
.event_type
;
1589 result
->event_info
= event
->result
.event_info
;
1590 result
->log_page
= event
->result
.log_page
;
1593 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
1596 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
1600 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
1601 uint8_t event_info
, uint8_t log_page
)
1603 NvmeAsyncEvent
*event
;
1605 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
1607 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
1608 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
1612 event
= g_new(NvmeAsyncEvent
, 1);
1613 event
->result
= (NvmeAerResult
) {
1614 .event_type
= event_type
,
1615 .event_info
= event_info
,
1616 .log_page
= log_page
,
1619 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
1622 nvme_process_aers(n
);
1625 static void nvme_smart_event(NvmeCtrl
*n
, uint8_t event
)
1629 /* Ref SPEC <Asynchronous Event Information 0x2013 SMART / Health Status> */
1630 if (!(NVME_AEC_SMART(n
->features
.async_config
) & event
)) {
1635 case NVME_SMART_SPARE
:
1636 aer_info
= NVME_AER_INFO_SMART_SPARE_THRESH
;
1638 case NVME_SMART_TEMPERATURE
:
1639 aer_info
= NVME_AER_INFO_SMART_TEMP_THRESH
;
1641 case NVME_SMART_RELIABILITY
:
1642 case NVME_SMART_MEDIA_READ_ONLY
:
1643 case NVME_SMART_FAILED_VOLATILE_MEDIA
:
1644 case NVME_SMART_PMR_UNRELIABLE
:
1645 aer_info
= NVME_AER_INFO_SMART_RELIABILITY
;
1651 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
, aer_info
, NVME_LOG_SMART_INFO
);
1654 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
1656 n
->aer_mask
&= ~(1 << event_type
);
1657 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1658 nvme_process_aers(n
);
1662 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
1664 uint8_t mdts
= n
->params
.mdts
;
1666 if (mdts
&& len
> n
->page_size
<< mdts
) {
1667 trace_pci_nvme_err_mdts(len
);
1668 return NVME_INVALID_FIELD
| NVME_DNR
;
1671 return NVME_SUCCESS
;
1674 static inline uint16_t nvme_check_bounds(NvmeNamespace
*ns
, uint64_t slba
,
1677 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
1679 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
1680 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, nsze
);
1681 return NVME_LBA_RANGE
| NVME_DNR
;
1684 return NVME_SUCCESS
;
1687 static int nvme_block_status_all(NvmeNamespace
*ns
, uint64_t slba
,
1688 uint32_t nlb
, int flags
)
1690 BlockDriverState
*bs
= blk_bs(ns
->blkconf
.blk
);
1692 int64_t pnum
= 0, bytes
= nvme_l2b(ns
, nlb
);
1693 int64_t offset
= nvme_l2b(ns
, slba
);
1697 * `pnum` holds the number of bytes after offset that shares the same
1698 * allocation status as the byte at offset. If `pnum` is different from
1699 * `bytes`, we should check the allocation status of the next range and
1700 * continue this until all bytes have been checked.
1705 ret
= bdrv_block_status(bs
, offset
, bytes
, &pnum
, NULL
, NULL
);
1711 trace_pci_nvme_block_status(offset
, bytes
, pnum
, ret
,
1712 !!(ret
& BDRV_BLOCK_ZERO
));
1714 if (!(ret
& flags
)) {
1719 } while (pnum
!= bytes
);
1724 static uint16_t nvme_check_dulbe(NvmeNamespace
*ns
, uint64_t slba
,
1730 ret
= nvme_block_status_all(ns
, slba
, nlb
, BDRV_BLOCK_DATA
);
1733 error_setg_errno(&err
, -ret
, "unable to get block status");
1734 error_report_err(err
);
1736 return NVME_INTERNAL_DEV_ERROR
;
1742 return NVME_SUCCESS
;
1745 static void nvme_aio_err(NvmeRequest
*req
, int ret
)
1747 uint16_t status
= NVME_SUCCESS
;
1748 Error
*local_err
= NULL
;
1750 switch (req
->cmd
.opcode
) {
1752 status
= NVME_UNRECOVERED_READ
;
1754 case NVME_CMD_FLUSH
:
1755 case NVME_CMD_WRITE
:
1756 case NVME_CMD_WRITE_ZEROES
:
1757 case NVME_CMD_ZONE_APPEND
:
1759 status
= NVME_WRITE_FAULT
;
1762 status
= NVME_INTERNAL_DEV_ERROR
;
1766 trace_pci_nvme_err_aio(nvme_cid(req
), strerror(-ret
), status
);
1768 error_setg_errno(&local_err
, -ret
, "aio failed");
1769 error_report_err(local_err
);
1772 * Set the command status code to the first encountered error but allow a
1773 * subsequent Internal Device Error to trump it.
1775 if (req
->status
&& status
!= NVME_INTERNAL_DEV_ERROR
) {
1779 req
->status
= status
;
1782 static inline uint32_t nvme_zone_idx(NvmeNamespace
*ns
, uint64_t slba
)
1784 return ns
->zone_size_log2
> 0 ? slba
>> ns
->zone_size_log2
:
1785 slba
/ ns
->zone_size
;
1788 static inline NvmeZone
*nvme_get_zone_by_slba(NvmeNamespace
*ns
, uint64_t slba
)
1790 uint32_t zone_idx
= nvme_zone_idx(ns
, slba
);
1792 if (zone_idx
>= ns
->num_zones
) {
1796 return &ns
->zone_array
[zone_idx
];
1799 static uint16_t nvme_check_zone_state_for_write(NvmeZone
*zone
)
1801 uint64_t zslba
= zone
->d
.zslba
;
1803 switch (nvme_get_zone_state(zone
)) {
1804 case NVME_ZONE_STATE_EMPTY
:
1805 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1806 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1807 case NVME_ZONE_STATE_CLOSED
:
1808 return NVME_SUCCESS
;
1809 case NVME_ZONE_STATE_FULL
:
1810 trace_pci_nvme_err_zone_is_full(zslba
);
1811 return NVME_ZONE_FULL
;
1812 case NVME_ZONE_STATE_OFFLINE
:
1813 trace_pci_nvme_err_zone_is_offline(zslba
);
1814 return NVME_ZONE_OFFLINE
;
1815 case NVME_ZONE_STATE_READ_ONLY
:
1816 trace_pci_nvme_err_zone_is_read_only(zslba
);
1817 return NVME_ZONE_READ_ONLY
;
1822 return NVME_INTERNAL_DEV_ERROR
;
1825 static uint16_t nvme_check_zone_write(NvmeNamespace
*ns
, NvmeZone
*zone
,
1826 uint64_t slba
, uint32_t nlb
)
1828 uint64_t zcap
= nvme_zone_wr_boundary(zone
);
1831 status
= nvme_check_zone_state_for_write(zone
);
1836 if (zone
->d
.za
& NVME_ZA_ZRWA_VALID
) {
1837 uint64_t ezrwa
= zone
->w_ptr
+ 2 * ns
->zns
.zrwas
;
1839 if (slba
< zone
->w_ptr
|| slba
+ nlb
> ezrwa
) {
1840 trace_pci_nvme_err_zone_invalid_write(slba
, zone
->w_ptr
);
1841 return NVME_ZONE_INVALID_WRITE
;
1844 if (unlikely(slba
!= zone
->w_ptr
)) {
1845 trace_pci_nvme_err_write_not_at_wp(slba
, zone
->d
.zslba
,
1847 return NVME_ZONE_INVALID_WRITE
;
1851 if (unlikely((slba
+ nlb
) > zcap
)) {
1852 trace_pci_nvme_err_zone_boundary(slba
, nlb
, zcap
);
1853 return NVME_ZONE_BOUNDARY_ERROR
;
1856 return NVME_SUCCESS
;
1859 static uint16_t nvme_check_zone_state_for_read(NvmeZone
*zone
)
1861 switch (nvme_get_zone_state(zone
)) {
1862 case NVME_ZONE_STATE_EMPTY
:
1863 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1864 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1865 case NVME_ZONE_STATE_FULL
:
1866 case NVME_ZONE_STATE_CLOSED
:
1867 case NVME_ZONE_STATE_READ_ONLY
:
1868 return NVME_SUCCESS
;
1869 case NVME_ZONE_STATE_OFFLINE
:
1870 trace_pci_nvme_err_zone_is_offline(zone
->d
.zslba
);
1871 return NVME_ZONE_OFFLINE
;
1876 return NVME_INTERNAL_DEV_ERROR
;
1879 static uint16_t nvme_check_zone_read(NvmeNamespace
*ns
, uint64_t slba
,
1883 uint64_t bndry
, end
;
1886 zone
= nvme_get_zone_by_slba(ns
, slba
);
1889 bndry
= nvme_zone_rd_boundary(ns
, zone
);
1892 status
= nvme_check_zone_state_for_read(zone
);
1895 } else if (unlikely(end
> bndry
)) {
1896 if (!ns
->params
.cross_zone_read
) {
1897 status
= NVME_ZONE_BOUNDARY_ERROR
;
1900 * Read across zone boundary - check that all subsequent
1901 * zones that are being read have an appropriate state.
1905 status
= nvme_check_zone_state_for_read(zone
);
1909 } while (end
> nvme_zone_rd_boundary(ns
, zone
));
1916 static uint16_t nvme_zrm_finish(NvmeNamespace
*ns
, NvmeZone
*zone
)
1918 switch (nvme_get_zone_state(zone
)) {
1919 case NVME_ZONE_STATE_FULL
:
1920 return NVME_SUCCESS
;
1922 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1923 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1924 nvme_aor_dec_open(ns
);
1926 case NVME_ZONE_STATE_CLOSED
:
1927 nvme_aor_dec_active(ns
);
1929 if (zone
->d
.za
& NVME_ZA_ZRWA_VALID
) {
1930 zone
->d
.za
&= ~NVME_ZA_ZRWA_VALID
;
1931 if (ns
->params
.numzrwa
) {
1937 case NVME_ZONE_STATE_EMPTY
:
1938 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_FULL
);
1939 return NVME_SUCCESS
;
1942 return NVME_ZONE_INVAL_TRANSITION
;
1946 static uint16_t nvme_zrm_close(NvmeNamespace
*ns
, NvmeZone
*zone
)
1948 switch (nvme_get_zone_state(zone
)) {
1949 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1950 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1951 nvme_aor_dec_open(ns
);
1952 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
1954 case NVME_ZONE_STATE_CLOSED
:
1955 return NVME_SUCCESS
;
1958 return NVME_ZONE_INVAL_TRANSITION
;
1962 static uint16_t nvme_zrm_reset(NvmeNamespace
*ns
, NvmeZone
*zone
)
1964 switch (nvme_get_zone_state(zone
)) {
1965 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1966 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1967 nvme_aor_dec_open(ns
);
1969 case NVME_ZONE_STATE_CLOSED
:
1970 nvme_aor_dec_active(ns
);
1972 if (zone
->d
.za
& NVME_ZA_ZRWA_VALID
) {
1973 if (ns
->params
.numzrwa
) {
1979 case NVME_ZONE_STATE_FULL
:
1980 zone
->w_ptr
= zone
->d
.zslba
;
1981 zone
->d
.wp
= zone
->w_ptr
;
1982 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_EMPTY
);
1984 case NVME_ZONE_STATE_EMPTY
:
1985 return NVME_SUCCESS
;
1988 return NVME_ZONE_INVAL_TRANSITION
;
1992 static void nvme_zrm_auto_transition_zone(NvmeNamespace
*ns
)
1996 if (ns
->params
.max_open_zones
&&
1997 ns
->nr_open_zones
== ns
->params
.max_open_zones
) {
1998 zone
= QTAILQ_FIRST(&ns
->imp_open_zones
);
2001 * Automatically close this implicitly open zone.
2003 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
2004 nvme_zrm_close(ns
, zone
);
2010 NVME_ZRM_AUTO
= 1 << 0,
2011 NVME_ZRM_ZRWA
= 1 << 1,
2014 static uint16_t nvme_zrm_open_flags(NvmeCtrl
*n
, NvmeNamespace
*ns
,
2015 NvmeZone
*zone
, int flags
)
2020 switch (nvme_get_zone_state(zone
)) {
2021 case NVME_ZONE_STATE_EMPTY
:
2026 case NVME_ZONE_STATE_CLOSED
:
2027 if (n
->params
.auto_transition_zones
) {
2028 nvme_zrm_auto_transition_zone(ns
);
2030 status
= nvme_zns_check_resources(ns
, act
, 1,
2031 (flags
& NVME_ZRM_ZRWA
) ? 1 : 0);
2037 nvme_aor_inc_active(ns
);
2040 nvme_aor_inc_open(ns
);
2042 if (flags
& NVME_ZRM_AUTO
) {
2043 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_IMPLICITLY_OPEN
);
2044 return NVME_SUCCESS
;
2049 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
2050 if (flags
& NVME_ZRM_AUTO
) {
2051 return NVME_SUCCESS
;
2054 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_EXPLICITLY_OPEN
);
2058 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
2059 if (flags
& NVME_ZRM_ZRWA
) {
2062 zone
->d
.za
|= NVME_ZA_ZRWA_VALID
;
2065 return NVME_SUCCESS
;
2068 return NVME_ZONE_INVAL_TRANSITION
;
2072 static inline uint16_t nvme_zrm_auto(NvmeCtrl
*n
, NvmeNamespace
*ns
,
2075 return nvme_zrm_open_flags(n
, ns
, zone
, NVME_ZRM_AUTO
);
2078 static void nvme_advance_zone_wp(NvmeNamespace
*ns
, NvmeZone
*zone
,
2083 if (zone
->d
.wp
== nvme_zone_wr_boundary(zone
)) {
2084 nvme_zrm_finish(ns
, zone
);
2088 static void nvme_zoned_zrwa_implicit_flush(NvmeNamespace
*ns
, NvmeZone
*zone
,
2091 uint16_t nzrwafgs
= DIV_ROUND_UP(nlbc
, ns
->zns
.zrwafg
);
2093 nlbc
= nzrwafgs
* ns
->zns
.zrwafg
;
2095 trace_pci_nvme_zoned_zrwa_implicit_flush(zone
->d
.zslba
, nlbc
);
2097 zone
->w_ptr
+= nlbc
;
2099 nvme_advance_zone_wp(ns
, zone
, nlbc
);
2102 static void nvme_finalize_zoned_write(NvmeNamespace
*ns
, NvmeRequest
*req
)
2104 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2109 slba
= le64_to_cpu(rw
->slba
);
2110 nlb
= le16_to_cpu(rw
->nlb
) + 1;
2111 zone
= nvme_get_zone_by_slba(ns
, slba
);
2114 if (zone
->d
.za
& NVME_ZA_ZRWA_VALID
) {
2115 uint64_t ezrwa
= zone
->w_ptr
+ ns
->zns
.zrwas
- 1;
2116 uint64_t elba
= slba
+ nlb
- 1;
2119 nvme_zoned_zrwa_implicit_flush(ns
, zone
, elba
- ezrwa
);
2125 nvme_advance_zone_wp(ns
, zone
, nlb
);
2128 static inline bool nvme_is_write(NvmeRequest
*req
)
2130 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2132 return rw
->opcode
== NVME_CMD_WRITE
||
2133 rw
->opcode
== NVME_CMD_ZONE_APPEND
||
2134 rw
->opcode
== NVME_CMD_WRITE_ZEROES
;
2137 static AioContext
*nvme_get_aio_context(BlockAIOCB
*acb
)
2139 return qemu_get_aio_context();
2142 static void nvme_misc_cb(void *opaque
, int ret
)
2144 NvmeRequest
*req
= opaque
;
2146 trace_pci_nvme_misc_cb(nvme_cid(req
));
2149 nvme_aio_err(req
, ret
);
2152 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2155 void nvme_rw_complete_cb(void *opaque
, int ret
)
2157 NvmeRequest
*req
= opaque
;
2158 NvmeNamespace
*ns
= req
->ns
;
2159 BlockBackend
*blk
= ns
->blkconf
.blk
;
2160 BlockAcctCookie
*acct
= &req
->acct
;
2161 BlockAcctStats
*stats
= blk_get_stats(blk
);
2163 trace_pci_nvme_rw_complete_cb(nvme_cid(req
), blk_name(blk
));
2166 block_acct_failed(stats
, acct
);
2167 nvme_aio_err(req
, ret
);
2169 block_acct_done(stats
, acct
);
2172 if (ns
->params
.zoned
&& nvme_is_write(req
)) {
2173 nvme_finalize_zoned_write(ns
, req
);
2176 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2179 static void nvme_rw_cb(void *opaque
, int ret
)
2181 NvmeRequest
*req
= opaque
;
2182 NvmeNamespace
*ns
= req
->ns
;
2184 BlockBackend
*blk
= ns
->blkconf
.blk
;
2186 trace_pci_nvme_rw_cb(nvme_cid(req
), blk_name(blk
));
2193 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2194 uint64_t slba
= le64_to_cpu(rw
->slba
);
2195 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
2196 uint64_t offset
= nvme_moff(ns
, slba
);
2198 if (req
->cmd
.opcode
== NVME_CMD_WRITE_ZEROES
) {
2199 size_t mlen
= nvme_m2b(ns
, nlb
);
2201 req
->aiocb
= blk_aio_pwrite_zeroes(blk
, offset
, mlen
,
2203 nvme_rw_complete_cb
, req
);
2207 if (nvme_ns_ext(ns
) || req
->cmd
.mptr
) {
2210 nvme_sg_unmap(&req
->sg
);
2211 status
= nvme_map_mdata(nvme_ctrl(req
), nlb
, req
);
2217 if (req
->cmd
.opcode
== NVME_CMD_READ
) {
2218 return nvme_blk_read(blk
, offset
, 1, nvme_rw_complete_cb
, req
);
2221 return nvme_blk_write(blk
, offset
, 1, nvme_rw_complete_cb
, req
);
2226 nvme_rw_complete_cb(req
, ret
);
2229 static void nvme_verify_cb(void *opaque
, int ret
)
2231 NvmeBounceContext
*ctx
= opaque
;
2232 NvmeRequest
*req
= ctx
->req
;
2233 NvmeNamespace
*ns
= req
->ns
;
2234 BlockBackend
*blk
= ns
->blkconf
.blk
;
2235 BlockAcctCookie
*acct
= &req
->acct
;
2236 BlockAcctStats
*stats
= blk_get_stats(blk
);
2237 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2238 uint64_t slba
= le64_to_cpu(rw
->slba
);
2239 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
2240 uint16_t apptag
= le16_to_cpu(rw
->apptag
);
2241 uint16_t appmask
= le16_to_cpu(rw
->appmask
);
2242 uint64_t reftag
= le32_to_cpu(rw
->reftag
);
2243 uint64_t cdw3
= le32_to_cpu(rw
->cdw3
);
2246 reftag
|= cdw3
<< 32;
2248 trace_pci_nvme_verify_cb(nvme_cid(req
), prinfo
, apptag
, appmask
, reftag
);
2251 block_acct_failed(stats
, acct
);
2252 nvme_aio_err(req
, ret
);
2256 block_acct_done(stats
, acct
);
2258 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2259 status
= nvme_dif_mangle_mdata(ns
, ctx
->mdata
.bounce
,
2260 ctx
->mdata
.iov
.size
, slba
);
2262 req
->status
= status
;
2266 req
->status
= nvme_dif_check(ns
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
2267 ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
,
2268 prinfo
, slba
, apptag
, appmask
, &reftag
);
2272 qemu_iovec_destroy(&ctx
->data
.iov
);
2273 g_free(ctx
->data
.bounce
);
2275 qemu_iovec_destroy(&ctx
->mdata
.iov
);
2276 g_free(ctx
->mdata
.bounce
);
2280 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2284 static void nvme_verify_mdata_in_cb(void *opaque
, int ret
)
2286 NvmeBounceContext
*ctx
= opaque
;
2287 NvmeRequest
*req
= ctx
->req
;
2288 NvmeNamespace
*ns
= req
->ns
;
2289 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2290 uint64_t slba
= le64_to_cpu(rw
->slba
);
2291 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2292 size_t mlen
= nvme_m2b(ns
, nlb
);
2293 uint64_t offset
= nvme_moff(ns
, slba
);
2294 BlockBackend
*blk
= ns
->blkconf
.blk
;
2296 trace_pci_nvme_verify_mdata_in_cb(nvme_cid(req
), blk_name(blk
));
2302 ctx
->mdata
.bounce
= g_malloc(mlen
);
2304 qemu_iovec_reset(&ctx
->mdata
.iov
);
2305 qemu_iovec_add(&ctx
->mdata
.iov
, ctx
->mdata
.bounce
, mlen
);
2307 req
->aiocb
= blk_aio_preadv(blk
, offset
, &ctx
->mdata
.iov
, 0,
2308 nvme_verify_cb
, ctx
);
2312 nvme_verify_cb(ctx
, ret
);
2315 struct nvme_compare_ctx
{
2327 static void nvme_compare_mdata_cb(void *opaque
, int ret
)
2329 NvmeRequest
*req
= opaque
;
2330 NvmeNamespace
*ns
= req
->ns
;
2331 NvmeCtrl
*n
= nvme_ctrl(req
);
2332 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2333 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
2334 uint16_t apptag
= le16_to_cpu(rw
->apptag
);
2335 uint16_t appmask
= le16_to_cpu(rw
->appmask
);
2336 uint64_t reftag
= le32_to_cpu(rw
->reftag
);
2337 uint64_t cdw3
= le32_to_cpu(rw
->cdw3
);
2338 struct nvme_compare_ctx
*ctx
= req
->opaque
;
2339 g_autofree
uint8_t *buf
= NULL
;
2340 BlockBackend
*blk
= ns
->blkconf
.blk
;
2341 BlockAcctCookie
*acct
= &req
->acct
;
2342 BlockAcctStats
*stats
= blk_get_stats(blk
);
2343 uint16_t status
= NVME_SUCCESS
;
2345 reftag
|= cdw3
<< 32;
2347 trace_pci_nvme_compare_mdata_cb(nvme_cid(req
));
2350 block_acct_failed(stats
, acct
);
2351 nvme_aio_err(req
, ret
);
2355 buf
= g_malloc(ctx
->mdata
.iov
.size
);
2357 status
= nvme_bounce_mdata(n
, buf
, ctx
->mdata
.iov
.size
,
2358 NVME_TX_DIRECTION_TO_DEVICE
, req
);
2360 req
->status
= status
;
2364 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2365 uint64_t slba
= le64_to_cpu(rw
->slba
);
2367 uint8_t *mbufp
= ctx
->mdata
.bounce
;
2368 uint8_t *end
= mbufp
+ ctx
->mdata
.iov
.size
;
2371 status
= nvme_dif_check(ns
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
2372 ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
, prinfo
,
2373 slba
, apptag
, appmask
, &reftag
);
2375 req
->status
= status
;
2380 * When formatted with protection information, do not compare the DIF
2383 if (!(ns
->id_ns
.dps
& NVME_ID_NS_DPS_FIRST_EIGHT
)) {
2384 pil
= ns
->lbaf
.ms
- nvme_pi_tuple_size(ns
);
2387 for (bufp
= buf
; mbufp
< end
; bufp
+= ns
->lbaf
.ms
, mbufp
+= ns
->lbaf
.ms
) {
2388 if (memcmp(bufp
+ pil
, mbufp
+ pil
, ns
->lbaf
.ms
- pil
)) {
2389 req
->status
= NVME_CMP_FAILURE
| NVME_DNR
;
2397 if (memcmp(buf
, ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
)) {
2398 req
->status
= NVME_CMP_FAILURE
| NVME_DNR
;
2402 block_acct_done(stats
, acct
);
2405 qemu_iovec_destroy(&ctx
->data
.iov
);
2406 g_free(ctx
->data
.bounce
);
2408 qemu_iovec_destroy(&ctx
->mdata
.iov
);
2409 g_free(ctx
->mdata
.bounce
);
2413 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2416 static void nvme_compare_data_cb(void *opaque
, int ret
)
2418 NvmeRequest
*req
= opaque
;
2419 NvmeCtrl
*n
= nvme_ctrl(req
);
2420 NvmeNamespace
*ns
= req
->ns
;
2421 BlockBackend
*blk
= ns
->blkconf
.blk
;
2422 BlockAcctCookie
*acct
= &req
->acct
;
2423 BlockAcctStats
*stats
= blk_get_stats(blk
);
2425 struct nvme_compare_ctx
*ctx
= req
->opaque
;
2426 g_autofree
uint8_t *buf
= NULL
;
2429 trace_pci_nvme_compare_data_cb(nvme_cid(req
));
2432 block_acct_failed(stats
, acct
);
2433 nvme_aio_err(req
, ret
);
2437 buf
= g_malloc(ctx
->data
.iov
.size
);
2439 status
= nvme_bounce_data(n
, buf
, ctx
->data
.iov
.size
,
2440 NVME_TX_DIRECTION_TO_DEVICE
, req
);
2442 req
->status
= status
;
2446 if (memcmp(buf
, ctx
->data
.bounce
, ctx
->data
.iov
.size
)) {
2447 req
->status
= NVME_CMP_FAILURE
| NVME_DNR
;
2452 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2453 uint64_t slba
= le64_to_cpu(rw
->slba
);
2454 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2455 size_t mlen
= nvme_m2b(ns
, nlb
);
2456 uint64_t offset
= nvme_moff(ns
, slba
);
2458 ctx
->mdata
.bounce
= g_malloc(mlen
);
2460 qemu_iovec_init(&ctx
->mdata
.iov
, 1);
2461 qemu_iovec_add(&ctx
->mdata
.iov
, ctx
->mdata
.bounce
, mlen
);
2463 req
->aiocb
= blk_aio_preadv(blk
, offset
, &ctx
->mdata
.iov
, 0,
2464 nvme_compare_mdata_cb
, req
);
2468 block_acct_done(stats
, acct
);
2471 qemu_iovec_destroy(&ctx
->data
.iov
);
2472 g_free(ctx
->data
.bounce
);
2475 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2478 typedef struct NvmeDSMAIOCB
{
2484 NvmeDsmRange
*range
;
2489 static void nvme_dsm_cancel(BlockAIOCB
*aiocb
)
2491 NvmeDSMAIOCB
*iocb
= container_of(aiocb
, NvmeDSMAIOCB
, common
);
2493 /* break nvme_dsm_cb loop */
2494 iocb
->idx
= iocb
->nr
;
2495 iocb
->ret
= -ECANCELED
;
2498 blk_aio_cancel_async(iocb
->aiocb
);
2502 * We only reach this if nvme_dsm_cancel() has already been called or
2503 * the command ran to completion.
2505 assert(iocb
->idx
== iocb
->nr
);
2509 static const AIOCBInfo nvme_dsm_aiocb_info
= {
2510 .aiocb_size
= sizeof(NvmeDSMAIOCB
),
2511 .cancel_async
= nvme_dsm_cancel
,
2514 static void nvme_dsm_cb(void *opaque
, int ret
);
2516 static void nvme_dsm_md_cb(void *opaque
, int ret
)
2518 NvmeDSMAIOCB
*iocb
= opaque
;
2519 NvmeRequest
*req
= iocb
->req
;
2520 NvmeNamespace
*ns
= req
->ns
;
2521 NvmeDsmRange
*range
;
2525 if (ret
< 0 || iocb
->ret
< 0 || !ns
->lbaf
.ms
) {
2529 range
= &iocb
->range
[iocb
->idx
- 1];
2530 slba
= le64_to_cpu(range
->slba
);
2531 nlb
= le32_to_cpu(range
->nlb
);
2534 * Check that all block were discarded (zeroed); otherwise we do not zero
2538 ret
= nvme_block_status_all(ns
, slba
, nlb
, BDRV_BLOCK_ZERO
);
2544 nvme_dsm_cb(iocb
, 0);
2548 iocb
->aiocb
= blk_aio_pwrite_zeroes(ns
->blkconf
.blk
, nvme_moff(ns
, slba
),
2549 nvme_m2b(ns
, nlb
), BDRV_REQ_MAY_UNMAP
,
2554 nvme_dsm_cb(iocb
, ret
);
2557 static void nvme_dsm_cb(void *opaque
, int ret
)
2559 NvmeDSMAIOCB
*iocb
= opaque
;
2560 NvmeRequest
*req
= iocb
->req
;
2561 NvmeCtrl
*n
= nvme_ctrl(req
);
2562 NvmeNamespace
*ns
= req
->ns
;
2563 NvmeDsmRange
*range
;
2567 if (iocb
->ret
< 0) {
2569 } else if (ret
< 0) {
2575 if (iocb
->idx
== iocb
->nr
) {
2579 range
= &iocb
->range
[iocb
->idx
++];
2580 slba
= le64_to_cpu(range
->slba
);
2581 nlb
= le32_to_cpu(range
->nlb
);
2583 trace_pci_nvme_dsm_deallocate(slba
, nlb
);
2585 if (nlb
> n
->dmrsl
) {
2586 trace_pci_nvme_dsm_single_range_limit_exceeded(nlb
, n
->dmrsl
);
2590 if (nvme_check_bounds(ns
, slba
, nlb
)) {
2591 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
,
2596 iocb
->aiocb
= blk_aio_pdiscard(ns
->blkconf
.blk
, nvme_l2b(ns
, slba
),
2598 nvme_dsm_md_cb
, iocb
);
2603 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
2604 qemu_aio_unref(iocb
);
2607 static uint16_t nvme_dsm(NvmeCtrl
*n
, NvmeRequest
*req
)
2609 NvmeNamespace
*ns
= req
->ns
;
2610 NvmeDsmCmd
*dsm
= (NvmeDsmCmd
*) &req
->cmd
;
2611 uint32_t attr
= le32_to_cpu(dsm
->attributes
);
2612 uint32_t nr
= (le32_to_cpu(dsm
->nr
) & 0xff) + 1;
2613 uint16_t status
= NVME_SUCCESS
;
2615 trace_pci_nvme_dsm(nr
, attr
);
2617 if (attr
& NVME_DSMGMT_AD
) {
2618 NvmeDSMAIOCB
*iocb
= blk_aio_get(&nvme_dsm_aiocb_info
, ns
->blkconf
.blk
,
2623 iocb
->range
= g_new(NvmeDsmRange
, nr
);
2627 status
= nvme_h2c(n
, (uint8_t *)iocb
->range
, sizeof(NvmeDsmRange
) * nr
,
2630 g_free(iocb
->range
);
2631 qemu_aio_unref(iocb
);
2636 req
->aiocb
= &iocb
->common
;
2637 nvme_dsm_cb(iocb
, 0);
2639 return NVME_NO_COMPLETE
;
2645 static uint16_t nvme_verify(NvmeCtrl
*n
, NvmeRequest
*req
)
2647 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2648 NvmeNamespace
*ns
= req
->ns
;
2649 BlockBackend
*blk
= ns
->blkconf
.blk
;
2650 uint64_t slba
= le64_to_cpu(rw
->slba
);
2651 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2652 size_t len
= nvme_l2b(ns
, nlb
);
2653 int64_t offset
= nvme_l2b(ns
, slba
);
2654 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
2655 uint32_t reftag
= le32_to_cpu(rw
->reftag
);
2656 NvmeBounceContext
*ctx
= NULL
;
2659 trace_pci_nvme_verify(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
2661 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2662 status
= nvme_check_prinfo(ns
, prinfo
, slba
, reftag
);
2667 if (prinfo
& NVME_PRINFO_PRACT
) {
2668 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
2672 if (len
> n
->page_size
<< n
->params
.vsl
) {
2673 return NVME_INVALID_FIELD
| NVME_DNR
;
2676 status
= nvme_check_bounds(ns
, slba
, nlb
);
2681 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
2682 status
= nvme_check_dulbe(ns
, slba
, nlb
);
2688 ctx
= g_new0(NvmeBounceContext
, 1);
2691 ctx
->data
.bounce
= g_malloc(len
);
2693 qemu_iovec_init(&ctx
->data
.iov
, 1);
2694 qemu_iovec_add(&ctx
->data
.iov
, ctx
->data
.bounce
, len
);
2696 block_acct_start(blk_get_stats(blk
), &req
->acct
, ctx
->data
.iov
.size
,
2699 req
->aiocb
= blk_aio_preadv(ns
->blkconf
.blk
, offset
, &ctx
->data
.iov
, 0,
2700 nvme_verify_mdata_in_cb
, ctx
);
2701 return NVME_NO_COMPLETE
;
2704 typedef struct NvmeCopyAIOCB
{
2711 unsigned int format
;
2718 BlockAcctCookie read
;
2719 BlockAcctCookie write
;
2728 static void nvme_copy_cancel(BlockAIOCB
*aiocb
)
2730 NvmeCopyAIOCB
*iocb
= container_of(aiocb
, NvmeCopyAIOCB
, common
);
2732 iocb
->ret
= -ECANCELED
;
2735 blk_aio_cancel_async(iocb
->aiocb
);
2740 static const AIOCBInfo nvme_copy_aiocb_info
= {
2741 .aiocb_size
= sizeof(NvmeCopyAIOCB
),
2742 .cancel_async
= nvme_copy_cancel
,
2745 static void nvme_copy_done(NvmeCopyAIOCB
*iocb
)
2747 NvmeRequest
*req
= iocb
->req
;
2748 NvmeNamespace
*ns
= req
->ns
;
2749 BlockAcctStats
*stats
= blk_get_stats(ns
->blkconf
.blk
);
2751 if (iocb
->idx
!= iocb
->nr
) {
2752 req
->cqe
.result
= cpu_to_le32(iocb
->idx
);
2755 qemu_iovec_destroy(&iocb
->iov
);
2756 g_free(iocb
->bounce
);
2758 if (iocb
->ret
< 0) {
2759 block_acct_failed(stats
, &iocb
->acct
.read
);
2760 block_acct_failed(stats
, &iocb
->acct
.write
);
2762 block_acct_done(stats
, &iocb
->acct
.read
);
2763 block_acct_done(stats
, &iocb
->acct
.write
);
2766 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
2767 qemu_aio_unref(iocb
);
2770 static void nvme_do_copy(NvmeCopyAIOCB
*iocb
);
2772 static void nvme_copy_source_range_parse_format0(void *ranges
, int idx
,
2773 uint64_t *slba
, uint32_t *nlb
,
2778 NvmeCopySourceRangeFormat0
*_ranges
= ranges
;
2781 *slba
= le64_to_cpu(_ranges
[idx
].slba
);
2785 *nlb
= le16_to_cpu(_ranges
[idx
].nlb
) + 1;
2789 *apptag
= le16_to_cpu(_ranges
[idx
].apptag
);
2793 *appmask
= le16_to_cpu(_ranges
[idx
].appmask
);
2797 *reftag
= le32_to_cpu(_ranges
[idx
].reftag
);
2801 static void nvme_copy_source_range_parse_format1(void *ranges
, int idx
,
2802 uint64_t *slba
, uint32_t *nlb
,
2807 NvmeCopySourceRangeFormat1
*_ranges
= ranges
;
2810 *slba
= le64_to_cpu(_ranges
[idx
].slba
);
2814 *nlb
= le16_to_cpu(_ranges
[idx
].nlb
) + 1;
2818 *apptag
= le16_to_cpu(_ranges
[idx
].apptag
);
2822 *appmask
= le16_to_cpu(_ranges
[idx
].appmask
);
2828 *reftag
|= (uint64_t)_ranges
[idx
].sr
[4] << 40;
2829 *reftag
|= (uint64_t)_ranges
[idx
].sr
[5] << 32;
2830 *reftag
|= (uint64_t)_ranges
[idx
].sr
[6] << 24;
2831 *reftag
|= (uint64_t)_ranges
[idx
].sr
[7] << 16;
2832 *reftag
|= (uint64_t)_ranges
[idx
].sr
[8] << 8;
2833 *reftag
|= (uint64_t)_ranges
[idx
].sr
[9];
2837 static void nvme_copy_source_range_parse(void *ranges
, int idx
, uint8_t format
,
2838 uint64_t *slba
, uint32_t *nlb
,
2839 uint16_t *apptag
, uint16_t *appmask
,
2843 case NVME_COPY_FORMAT_0
:
2844 nvme_copy_source_range_parse_format0(ranges
, idx
, slba
, nlb
, apptag
,
2848 case NVME_COPY_FORMAT_1
:
2849 nvme_copy_source_range_parse_format1(ranges
, idx
, slba
, nlb
, apptag
,
2858 static inline uint16_t nvme_check_copy_mcl(NvmeNamespace
*ns
,
2859 NvmeCopyAIOCB
*iocb
, uint16_t nr
)
2861 uint32_t copy_len
= 0;
2863 for (int idx
= 0; idx
< nr
; idx
++) {
2865 nvme_copy_source_range_parse(iocb
->ranges
, idx
, iocb
->format
, NULL
,
2866 &nlb
, NULL
, NULL
, NULL
);
2867 copy_len
+= nlb
+ 1;
2870 if (copy_len
> ns
->id_ns
.mcl
) {
2871 return NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
2874 return NVME_SUCCESS
;
2877 static void nvme_copy_out_completed_cb(void *opaque
, int ret
)
2879 NvmeCopyAIOCB
*iocb
= opaque
;
2880 NvmeRequest
*req
= iocb
->req
;
2881 NvmeNamespace
*ns
= req
->ns
;
2884 nvme_copy_source_range_parse(iocb
->ranges
, iocb
->idx
, iocb
->format
, NULL
,
2885 &nlb
, NULL
, NULL
, NULL
);
2890 } else if (iocb
->ret
< 0) {
2894 if (ns
->params
.zoned
) {
2895 nvme_advance_zone_wp(ns
, iocb
->zone
, nlb
);
2904 static void nvme_copy_out_cb(void *opaque
, int ret
)
2906 NvmeCopyAIOCB
*iocb
= opaque
;
2907 NvmeRequest
*req
= iocb
->req
;
2908 NvmeNamespace
*ns
= req
->ns
;
2913 if (ret
< 0 || iocb
->ret
< 0 || !ns
->lbaf
.ms
) {
2917 nvme_copy_source_range_parse(iocb
->ranges
, iocb
->idx
, iocb
->format
, NULL
,
2918 &nlb
, NULL
, NULL
, NULL
);
2920 mlen
= nvme_m2b(ns
, nlb
);
2921 mbounce
= iocb
->bounce
+ nvme_l2b(ns
, nlb
);
2923 qemu_iovec_reset(&iocb
->iov
);
2924 qemu_iovec_add(&iocb
->iov
, mbounce
, mlen
);
2926 iocb
->aiocb
= blk_aio_pwritev(ns
->blkconf
.blk
, nvme_moff(ns
, iocb
->slba
),
2927 &iocb
->iov
, 0, nvme_copy_out_completed_cb
,
2933 nvme_copy_out_completed_cb(iocb
, ret
);
2936 static void nvme_copy_in_completed_cb(void *opaque
, int ret
)
2938 NvmeCopyAIOCB
*iocb
= opaque
;
2939 NvmeRequest
*req
= iocb
->req
;
2940 NvmeNamespace
*ns
= req
->ns
;
2943 uint16_t apptag
, appmask
;
2951 } else if (iocb
->ret
< 0) {
2955 nvme_copy_source_range_parse(iocb
->ranges
, iocb
->idx
, iocb
->format
, &slba
,
2956 &nlb
, &apptag
, &appmask
, &reftag
);
2957 len
= nvme_l2b(ns
, nlb
);
2959 trace_pci_nvme_copy_out(iocb
->slba
, nlb
);
2961 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2962 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
2964 uint16_t prinfor
= ((copy
->control
[0] >> 4) & 0xf);
2965 uint16_t prinfow
= ((copy
->control
[2] >> 2) & 0xf);
2967 size_t mlen
= nvme_m2b(ns
, nlb
);
2968 uint8_t *mbounce
= iocb
->bounce
+ nvme_l2b(ns
, nlb
);
2970 status
= nvme_dif_mangle_mdata(ns
, mbounce
, mlen
, slba
);
2974 status
= nvme_dif_check(ns
, iocb
->bounce
, len
, mbounce
, mlen
, prinfor
,
2975 slba
, apptag
, appmask
, &reftag
);
2980 apptag
= le16_to_cpu(copy
->apptag
);
2981 appmask
= le16_to_cpu(copy
->appmask
);
2983 if (prinfow
& NVME_PRINFO_PRACT
) {
2984 status
= nvme_check_prinfo(ns
, prinfow
, iocb
->slba
, iocb
->reftag
);
2989 nvme_dif_pract_generate_dif(ns
, iocb
->bounce
, len
, mbounce
, mlen
,
2990 apptag
, &iocb
->reftag
);
2992 status
= nvme_dif_check(ns
, iocb
->bounce
, len
, mbounce
, mlen
,
2993 prinfow
, iocb
->slba
, apptag
, appmask
,
3001 status
= nvme_check_bounds(ns
, iocb
->slba
, nlb
);
3006 if (ns
->params
.zoned
) {
3007 status
= nvme_check_zone_write(ns
, iocb
->zone
, iocb
->slba
, nlb
);
3012 if (!(iocb
->zone
->d
.za
& NVME_ZA_ZRWA_VALID
)) {
3013 iocb
->zone
->w_ptr
+= nlb
;
3017 qemu_iovec_reset(&iocb
->iov
);
3018 qemu_iovec_add(&iocb
->iov
, iocb
->bounce
, len
);
3020 iocb
->aiocb
= blk_aio_pwritev(ns
->blkconf
.blk
, nvme_l2b(ns
, iocb
->slba
),
3021 &iocb
->iov
, 0, nvme_copy_out_cb
, iocb
);
3026 req
->status
= status
;
3032 static void nvme_copy_in_cb(void *opaque
, int ret
)
3034 NvmeCopyAIOCB
*iocb
= opaque
;
3035 NvmeRequest
*req
= iocb
->req
;
3036 NvmeNamespace
*ns
= req
->ns
;
3040 if (ret
< 0 || iocb
->ret
< 0 || !ns
->lbaf
.ms
) {
3044 nvme_copy_source_range_parse(iocb
->ranges
, iocb
->idx
, iocb
->format
, &slba
,
3045 &nlb
, NULL
, NULL
, NULL
);
3047 qemu_iovec_reset(&iocb
->iov
);
3048 qemu_iovec_add(&iocb
->iov
, iocb
->bounce
+ nvme_l2b(ns
, nlb
),
3051 iocb
->aiocb
= blk_aio_preadv(ns
->blkconf
.blk
, nvme_moff(ns
, slba
),
3052 &iocb
->iov
, 0, nvme_copy_in_completed_cb
,
3057 nvme_copy_in_completed_cb(iocb
, ret
);
3060 static void nvme_do_copy(NvmeCopyAIOCB
*iocb
)
3062 NvmeRequest
*req
= iocb
->req
;
3063 NvmeNamespace
*ns
= req
->ns
;
3069 if (iocb
->ret
< 0) {
3073 if (iocb
->idx
== iocb
->nr
) {
3077 nvme_copy_source_range_parse(iocb
->ranges
, iocb
->idx
, iocb
->format
, &slba
,
3078 &nlb
, NULL
, NULL
, NULL
);
3079 len
= nvme_l2b(ns
, nlb
);
3081 trace_pci_nvme_copy_source_range(slba
, nlb
);
3083 if (nlb
> le16_to_cpu(ns
->id_ns
.mssrl
)) {
3084 status
= NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
3088 status
= nvme_check_bounds(ns
, slba
, nlb
);
3093 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
3094 status
= nvme_check_dulbe(ns
, slba
, nlb
);
3100 if (ns
->params
.zoned
) {
3101 status
= nvme_check_zone_read(ns
, slba
, nlb
);
3107 qemu_iovec_reset(&iocb
->iov
);
3108 qemu_iovec_add(&iocb
->iov
, iocb
->bounce
, len
);
3110 iocb
->aiocb
= blk_aio_preadv(ns
->blkconf
.blk
, nvme_l2b(ns
, slba
),
3111 &iocb
->iov
, 0, nvme_copy_in_cb
, iocb
);
3115 req
->status
= status
;
3118 nvme_copy_done(iocb
);
3121 static uint16_t nvme_copy(NvmeCtrl
*n
, NvmeRequest
*req
)
3123 NvmeNamespace
*ns
= req
->ns
;
3124 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
3125 NvmeCopyAIOCB
*iocb
= blk_aio_get(&nvme_copy_aiocb_info
, ns
->blkconf
.blk
,
3127 uint16_t nr
= copy
->nr
+ 1;
3128 uint8_t format
= copy
->control
[0] & 0xf;
3129 uint16_t prinfor
= ((copy
->control
[0] >> 4) & 0xf);
3130 uint16_t prinfow
= ((copy
->control
[2] >> 2) & 0xf);
3131 size_t len
= sizeof(NvmeCopySourceRangeFormat0
);
3135 trace_pci_nvme_copy(nvme_cid(req
), nvme_nsid(ns
), nr
, format
);
3137 iocb
->ranges
= NULL
;
3140 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) &&
3141 ((prinfor
& NVME_PRINFO_PRACT
) != (prinfow
& NVME_PRINFO_PRACT
))) {
3142 status
= NVME_INVALID_FIELD
| NVME_DNR
;
3146 if (!(n
->id_ctrl
.ocfs
& (1 << format
))) {
3147 trace_pci_nvme_err_copy_invalid_format(format
);
3148 status
= NVME_INVALID_FIELD
| NVME_DNR
;
3152 if (nr
> ns
->id_ns
.msrc
+ 1) {
3153 status
= NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
3157 if ((ns
->pif
== 0x0 && format
!= 0x0) ||
3158 (ns
->pif
!= 0x0 && format
!= 0x1)) {
3159 status
= NVME_INVALID_FORMAT
| NVME_DNR
;
3164 len
= sizeof(NvmeCopySourceRangeFormat1
);
3167 iocb
->format
= format
;
3168 iocb
->ranges
= g_malloc_n(nr
, len
);
3169 status
= nvme_h2c(n
, (uint8_t *)iocb
->ranges
, len
* nr
, req
);
3174 iocb
->slba
= le64_to_cpu(copy
->sdlba
);
3176 if (ns
->params
.zoned
) {
3177 iocb
->zone
= nvme_get_zone_by_slba(ns
, iocb
->slba
);
3179 status
= NVME_LBA_RANGE
| NVME_DNR
;
3183 status
= nvme_zrm_auto(n
, ns
, iocb
->zone
);
3189 status
= nvme_check_copy_mcl(ns
, iocb
, nr
);
3198 iocb
->reftag
= le32_to_cpu(copy
->reftag
);
3199 iocb
->reftag
|= (uint64_t)le32_to_cpu(copy
->cdw3
) << 32;
3200 iocb
->bounce
= g_malloc_n(le16_to_cpu(ns
->id_ns
.mssrl
),
3201 ns
->lbasz
+ ns
->lbaf
.ms
);
3203 qemu_iovec_init(&iocb
->iov
, 1);
3205 block_acct_start(blk_get_stats(ns
->blkconf
.blk
), &iocb
->acct
.read
, 0,
3207 block_acct_start(blk_get_stats(ns
->blkconf
.blk
), &iocb
->acct
.write
, 0,
3210 req
->aiocb
= &iocb
->common
;
3213 return NVME_NO_COMPLETE
;
3216 g_free(iocb
->ranges
);
3217 qemu_aio_unref(iocb
);
3221 static uint16_t nvme_compare(NvmeCtrl
*n
, NvmeRequest
*req
)
3223 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
3224 NvmeNamespace
*ns
= req
->ns
;
3225 BlockBackend
*blk
= ns
->blkconf
.blk
;
3226 uint64_t slba
= le64_to_cpu(rw
->slba
);
3227 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
3228 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
3229 size_t data_len
= nvme_l2b(ns
, nlb
);
3230 size_t len
= data_len
;
3231 int64_t offset
= nvme_l2b(ns
, slba
);
3232 struct nvme_compare_ctx
*ctx
= NULL
;
3235 trace_pci_nvme_compare(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
3237 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) && (prinfo
& NVME_PRINFO_PRACT
)) {
3238 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
3241 if (nvme_ns_ext(ns
)) {
3242 len
+= nvme_m2b(ns
, nlb
);
3245 status
= nvme_check_mdts(n
, len
);
3250 status
= nvme_check_bounds(ns
, slba
, nlb
);
3255 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
3256 status
= nvme_check_dulbe(ns
, slba
, nlb
);
3262 status
= nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
3267 ctx
= g_new(struct nvme_compare_ctx
, 1);
3268 ctx
->data
.bounce
= g_malloc(data_len
);
3272 qemu_iovec_init(&ctx
->data
.iov
, 1);
3273 qemu_iovec_add(&ctx
->data
.iov
, ctx
->data
.bounce
, data_len
);
3275 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_len
,
3277 req
->aiocb
= blk_aio_preadv(blk
, offset
, &ctx
->data
.iov
, 0,
3278 nvme_compare_data_cb
, req
);
3280 return NVME_NO_COMPLETE
;
3283 typedef struct NvmeFlushAIOCB
{
3294 static void nvme_flush_cancel(BlockAIOCB
*acb
)
3296 NvmeFlushAIOCB
*iocb
= container_of(acb
, NvmeFlushAIOCB
, common
);
3298 iocb
->ret
= -ECANCELED
;
3301 blk_aio_cancel_async(iocb
->aiocb
);
3306 static const AIOCBInfo nvme_flush_aiocb_info
= {
3307 .aiocb_size
= sizeof(NvmeFlushAIOCB
),
3308 .cancel_async
= nvme_flush_cancel
,
3309 .get_aio_context
= nvme_get_aio_context
,
3312 static void nvme_do_flush(NvmeFlushAIOCB
*iocb
);
3314 static void nvme_flush_ns_cb(void *opaque
, int ret
)
3316 NvmeFlushAIOCB
*iocb
= opaque
;
3317 NvmeNamespace
*ns
= iocb
->ns
;
3322 } else if (iocb
->ret
< 0) {
3327 trace_pci_nvme_flush_ns(iocb
->nsid
);
3330 iocb
->aiocb
= blk_aio_flush(ns
->blkconf
.blk
, nvme_flush_ns_cb
, iocb
);
3335 nvme_do_flush(iocb
);
3338 static void nvme_do_flush(NvmeFlushAIOCB
*iocb
)
3340 NvmeRequest
*req
= iocb
->req
;
3341 NvmeCtrl
*n
= nvme_ctrl(req
);
3344 if (iocb
->ret
< 0) {
3348 if (iocb
->broadcast
) {
3349 for (i
= iocb
->nsid
+ 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
3350 iocb
->ns
= nvme_ns(n
, i
);
3362 nvme_flush_ns_cb(iocb
, 0);
3366 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
3367 qemu_aio_unref(iocb
);
3370 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
3372 NvmeFlushAIOCB
*iocb
;
3373 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
3376 iocb
= qemu_aio_get(&nvme_flush_aiocb_info
, NULL
, nvme_misc_cb
, req
);
3382 iocb
->broadcast
= (nsid
== NVME_NSID_BROADCAST
);
3384 if (!iocb
->broadcast
) {
3385 if (!nvme_nsid_valid(n
, nsid
)) {
3386 status
= NVME_INVALID_NSID
| NVME_DNR
;
3390 iocb
->ns
= nvme_ns(n
, nsid
);
3392 status
= NVME_INVALID_FIELD
| NVME_DNR
;
3399 req
->aiocb
= &iocb
->common
;
3400 nvme_do_flush(iocb
);
3402 return NVME_NO_COMPLETE
;
3405 qemu_aio_unref(iocb
);
3410 static uint16_t nvme_read(NvmeCtrl
*n
, NvmeRequest
*req
)
3412 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
3413 NvmeNamespace
*ns
= req
->ns
;
3414 uint64_t slba
= le64_to_cpu(rw
->slba
);
3415 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
3416 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
3417 uint64_t data_size
= nvme_l2b(ns
, nlb
);
3418 uint64_t mapped_size
= data_size
;
3419 uint64_t data_offset
;
3420 BlockBackend
*blk
= ns
->blkconf
.blk
;
3423 if (nvme_ns_ext(ns
)) {
3424 mapped_size
+= nvme_m2b(ns
, nlb
);
3426 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3427 bool pract
= prinfo
& NVME_PRINFO_PRACT
;
3429 if (pract
&& ns
->lbaf
.ms
== nvme_pi_tuple_size(ns
)) {
3430 mapped_size
= data_size
;
3435 trace_pci_nvme_read(nvme_cid(req
), nvme_nsid(ns
), nlb
, mapped_size
, slba
);
3437 status
= nvme_check_mdts(n
, mapped_size
);
3442 status
= nvme_check_bounds(ns
, slba
, nlb
);
3447 if (ns
->params
.zoned
) {
3448 status
= nvme_check_zone_read(ns
, slba
, nlb
);
3450 trace_pci_nvme_err_zone_read_not_ok(slba
, nlb
, status
);
3455 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
3456 status
= nvme_check_dulbe(ns
, slba
, nlb
);
3462 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3463 return nvme_dif_rw(n
, req
);
3466 status
= nvme_map_data(n
, nlb
, req
);
3471 data_offset
= nvme_l2b(ns
, slba
);
3473 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
,
3475 nvme_blk_read(blk
, data_offset
, BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
3476 return NVME_NO_COMPLETE
;
3479 block_acct_invalid(blk_get_stats(blk
), BLOCK_ACCT_READ
);
3480 return status
| NVME_DNR
;
3483 static void nvme_do_write_fdp(NvmeCtrl
*n
, NvmeRequest
*req
, uint64_t slba
,
3486 NvmeNamespace
*ns
= req
->ns
;
3487 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
3488 uint64_t data_size
= nvme_l2b(ns
, nlb
);
3489 uint32_t dw12
= le32_to_cpu(req
->cmd
.cdw12
);
3490 uint8_t dtype
= (dw12
>> 20) & 0xf;
3491 uint16_t pid
= le16_to_cpu(rw
->dspec
);
3492 uint16_t ph
, rg
, ruhid
;
3493 NvmeReclaimUnit
*ru
;
3495 if (dtype
!= NVME_DIRECTIVE_DATA_PLACEMENT
||
3496 !nvme_parse_pid(ns
, pid
, &ph
, &rg
)) {
3501 ruhid
= ns
->fdp
.phs
[ph
];
3502 ru
= &ns
->endgrp
->fdp
.ruhs
[ruhid
].rus
[rg
];
3504 nvme_fdp_stat_inc(&ns
->endgrp
->fdp
.hbmw
, data_size
);
3505 nvme_fdp_stat_inc(&ns
->endgrp
->fdp
.mbmw
, data_size
);
3508 if (nlb
< ru
->ruamw
) {
3514 nvme_update_ruh(n
, ns
, pid
);
3518 static uint16_t nvme_do_write(NvmeCtrl
*n
, NvmeRequest
*req
, bool append
,
3521 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
3522 NvmeNamespace
*ns
= req
->ns
;
3523 uint64_t slba
= le64_to_cpu(rw
->slba
);
3524 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
3525 uint16_t ctrl
= le16_to_cpu(rw
->control
);
3526 uint8_t prinfo
= NVME_RW_PRINFO(ctrl
);
3527 uint64_t data_size
= nvme_l2b(ns
, nlb
);
3528 uint64_t mapped_size
= data_size
;
3529 uint64_t data_offset
;
3531 NvmeZonedResult
*res
= (NvmeZonedResult
*)&req
->cqe
;
3532 BlockBackend
*blk
= ns
->blkconf
.blk
;
3535 if (nvme_ns_ext(ns
)) {
3536 mapped_size
+= nvme_m2b(ns
, nlb
);
3538 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3539 bool pract
= prinfo
& NVME_PRINFO_PRACT
;
3541 if (pract
&& ns
->lbaf
.ms
== nvme_pi_tuple_size(ns
)) {
3542 mapped_size
-= nvme_m2b(ns
, nlb
);
3547 trace_pci_nvme_write(nvme_cid(req
), nvme_io_opc_str(rw
->opcode
),
3548 nvme_nsid(ns
), nlb
, mapped_size
, slba
);
3551 status
= nvme_check_mdts(n
, mapped_size
);
3557 status
= nvme_check_bounds(ns
, slba
, nlb
);
3562 if (ns
->params
.zoned
) {
3563 zone
= nvme_get_zone_by_slba(ns
, slba
);
3567 bool piremap
= !!(ctrl
& NVME_RW_PIREMAP
);
3569 if (unlikely(zone
->d
.za
& NVME_ZA_ZRWA_VALID
)) {
3570 return NVME_INVALID_ZONE_OP
| NVME_DNR
;
3573 if (unlikely(slba
!= zone
->d
.zslba
)) {
3574 trace_pci_nvme_err_append_not_at_start(slba
, zone
->d
.zslba
);
3575 status
= NVME_INVALID_FIELD
;
3579 if (n
->params
.zasl
&&
3580 data_size
> (uint64_t)n
->page_size
<< n
->params
.zasl
) {
3581 trace_pci_nvme_err_zasl(data_size
);
3582 return NVME_INVALID_FIELD
| NVME_DNR
;
3586 rw
->slba
= cpu_to_le64(slba
);
3587 res
->slba
= cpu_to_le64(slba
);
3589 switch (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3590 case NVME_ID_NS_DPS_TYPE_1
:
3592 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
3597 case NVME_ID_NS_DPS_TYPE_2
:
3599 uint32_t reftag
= le32_to_cpu(rw
->reftag
);
3600 rw
->reftag
= cpu_to_le32(reftag
+ (slba
- zone
->d
.zslba
));
3605 case NVME_ID_NS_DPS_TYPE_3
:
3607 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
3614 status
= nvme_check_zone_write(ns
, zone
, slba
, nlb
);
3619 status
= nvme_zrm_auto(n
, ns
, zone
);
3624 if (!(zone
->d
.za
& NVME_ZA_ZRWA_VALID
)) {
3627 } else if (ns
->endgrp
&& ns
->endgrp
->fdp
.enabled
) {
3628 nvme_do_write_fdp(n
, req
, slba
, nlb
);
3631 data_offset
= nvme_l2b(ns
, slba
);
3633 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3634 return nvme_dif_rw(n
, req
);
3638 status
= nvme_map_data(n
, nlb
, req
);
3643 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
,
3645 nvme_blk_write(blk
, data_offset
, BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
3647 req
->aiocb
= blk_aio_pwrite_zeroes(blk
, data_offset
, data_size
,
3648 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
,
3652 return NVME_NO_COMPLETE
;
3655 block_acct_invalid(blk_get_stats(blk
), BLOCK_ACCT_WRITE
);
3656 return status
| NVME_DNR
;
3659 static inline uint16_t nvme_write(NvmeCtrl
*n
, NvmeRequest
*req
)
3661 return nvme_do_write(n
, req
, false, false);
3664 static inline uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
3666 return nvme_do_write(n
, req
, false, true);
3669 static inline uint16_t nvme_zone_append(NvmeCtrl
*n
, NvmeRequest
*req
)
3671 return nvme_do_write(n
, req
, true, false);
3674 static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace
*ns
, NvmeCmd
*c
,
3675 uint64_t *slba
, uint32_t *zone_idx
)
3677 uint32_t dw10
= le32_to_cpu(c
->cdw10
);
3678 uint32_t dw11
= le32_to_cpu(c
->cdw11
);
3680 if (!ns
->params
.zoned
) {
3681 trace_pci_nvme_err_invalid_opc(c
->opcode
);
3682 return NVME_INVALID_OPCODE
| NVME_DNR
;
3685 *slba
= ((uint64_t)dw11
) << 32 | dw10
;
3686 if (unlikely(*slba
>= ns
->id_ns
.nsze
)) {
3687 trace_pci_nvme_err_invalid_lba_range(*slba
, 0, ns
->id_ns
.nsze
);
3689 return NVME_LBA_RANGE
| NVME_DNR
;
3692 *zone_idx
= nvme_zone_idx(ns
, *slba
);
3693 assert(*zone_idx
< ns
->num_zones
);
3695 return NVME_SUCCESS
;
3698 typedef uint16_t (*op_handler_t
)(NvmeNamespace
*, NvmeZone
*, NvmeZoneState
,
3701 enum NvmeZoneProcessingMask
{
3702 NVME_PROC_CURRENT_ZONE
= 0,
3703 NVME_PROC_OPENED_ZONES
= 1 << 0,
3704 NVME_PROC_CLOSED_ZONES
= 1 << 1,
3705 NVME_PROC_READ_ONLY_ZONES
= 1 << 2,
3706 NVME_PROC_FULL_ZONES
= 1 << 3,
3709 static uint16_t nvme_open_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3710 NvmeZoneState state
, NvmeRequest
*req
)
3712 NvmeZoneSendCmd
*cmd
= (NvmeZoneSendCmd
*)&req
->cmd
;
3715 if (cmd
->zsflags
& NVME_ZSFLAG_ZRWA_ALLOC
) {
3716 uint16_t ozcs
= le16_to_cpu(ns
->id_ns_zoned
->ozcs
);
3718 if (!(ozcs
& NVME_ID_NS_ZONED_OZCS_ZRWASUP
)) {
3719 return NVME_INVALID_ZONE_OP
| NVME_DNR
;
3722 if (zone
->w_ptr
% ns
->zns
.zrwafg
) {
3723 return NVME_NOZRWA
| NVME_DNR
;
3726 flags
= NVME_ZRM_ZRWA
;
3729 return nvme_zrm_open_flags(nvme_ctrl(req
), ns
, zone
, flags
);
3732 static uint16_t nvme_close_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3733 NvmeZoneState state
, NvmeRequest
*req
)
3735 return nvme_zrm_close(ns
, zone
);
3738 static uint16_t nvme_finish_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3739 NvmeZoneState state
, NvmeRequest
*req
)
3741 return nvme_zrm_finish(ns
, zone
);
3744 static uint16_t nvme_offline_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3745 NvmeZoneState state
, NvmeRequest
*req
)
3748 case NVME_ZONE_STATE_READ_ONLY
:
3749 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_OFFLINE
);
3751 case NVME_ZONE_STATE_OFFLINE
:
3752 return NVME_SUCCESS
;
3754 return NVME_ZONE_INVAL_TRANSITION
;
3758 static uint16_t nvme_set_zd_ext(NvmeNamespace
*ns
, NvmeZone
*zone
)
3761 uint8_t state
= nvme_get_zone_state(zone
);
3763 if (state
== NVME_ZONE_STATE_EMPTY
) {
3764 status
= nvme_aor_check(ns
, 1, 0);
3768 nvme_aor_inc_active(ns
);
3769 zone
->d
.za
|= NVME_ZA_ZD_EXT_VALID
;
3770 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
3771 return NVME_SUCCESS
;
3774 return NVME_ZONE_INVAL_TRANSITION
;
3777 static uint16_t nvme_bulk_proc_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3778 enum NvmeZoneProcessingMask proc_mask
,
3779 op_handler_t op_hndlr
, NvmeRequest
*req
)
3781 uint16_t status
= NVME_SUCCESS
;
3782 NvmeZoneState zs
= nvme_get_zone_state(zone
);
3786 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
3787 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
3788 proc_zone
= proc_mask
& NVME_PROC_OPENED_ZONES
;
3790 case NVME_ZONE_STATE_CLOSED
:
3791 proc_zone
= proc_mask
& NVME_PROC_CLOSED_ZONES
;
3793 case NVME_ZONE_STATE_READ_ONLY
:
3794 proc_zone
= proc_mask
& NVME_PROC_READ_ONLY_ZONES
;
3796 case NVME_ZONE_STATE_FULL
:
3797 proc_zone
= proc_mask
& NVME_PROC_FULL_ZONES
;
3804 status
= op_hndlr(ns
, zone
, zs
, req
);
3810 static uint16_t nvme_do_zone_op(NvmeNamespace
*ns
, NvmeZone
*zone
,
3811 enum NvmeZoneProcessingMask proc_mask
,
3812 op_handler_t op_hndlr
, NvmeRequest
*req
)
3815 uint16_t status
= NVME_SUCCESS
;
3819 status
= op_hndlr(ns
, zone
, nvme_get_zone_state(zone
), req
);
3821 if (proc_mask
& NVME_PROC_CLOSED_ZONES
) {
3822 QTAILQ_FOREACH_SAFE(zone
, &ns
->closed_zones
, entry
, next
) {
3823 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3825 if (status
&& status
!= NVME_NO_COMPLETE
) {
3830 if (proc_mask
& NVME_PROC_OPENED_ZONES
) {
3831 QTAILQ_FOREACH_SAFE(zone
, &ns
->imp_open_zones
, entry
, next
) {
3832 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3834 if (status
&& status
!= NVME_NO_COMPLETE
) {
3839 QTAILQ_FOREACH_SAFE(zone
, &ns
->exp_open_zones
, entry
, next
) {
3840 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3842 if (status
&& status
!= NVME_NO_COMPLETE
) {
3847 if (proc_mask
& NVME_PROC_FULL_ZONES
) {
3848 QTAILQ_FOREACH_SAFE(zone
, &ns
->full_zones
, entry
, next
) {
3849 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3851 if (status
&& status
!= NVME_NO_COMPLETE
) {
3857 if (proc_mask
& NVME_PROC_READ_ONLY_ZONES
) {
3858 for (i
= 0; i
< ns
->num_zones
; i
++, zone
++) {
3859 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3861 if (status
&& status
!= NVME_NO_COMPLETE
) {
3872 typedef struct NvmeZoneResetAIOCB
{
3881 } NvmeZoneResetAIOCB
;
3883 static void nvme_zone_reset_cancel(BlockAIOCB
*aiocb
)
3885 NvmeZoneResetAIOCB
*iocb
= container_of(aiocb
, NvmeZoneResetAIOCB
, common
);
3886 NvmeRequest
*req
= iocb
->req
;
3887 NvmeNamespace
*ns
= req
->ns
;
3889 iocb
->idx
= ns
->num_zones
;
3891 iocb
->ret
= -ECANCELED
;
3894 blk_aio_cancel_async(iocb
->aiocb
);
3899 static const AIOCBInfo nvme_zone_reset_aiocb_info
= {
3900 .aiocb_size
= sizeof(NvmeZoneResetAIOCB
),
3901 .cancel_async
= nvme_zone_reset_cancel
,
3904 static void nvme_zone_reset_cb(void *opaque
, int ret
);
3906 static void nvme_zone_reset_epilogue_cb(void *opaque
, int ret
)
3908 NvmeZoneResetAIOCB
*iocb
= opaque
;
3909 NvmeRequest
*req
= iocb
->req
;
3910 NvmeNamespace
*ns
= req
->ns
;
3914 if (ret
< 0 || iocb
->ret
< 0 || !ns
->lbaf
.ms
) {
3918 moff
= nvme_moff(ns
, iocb
->zone
->d
.zslba
);
3919 count
= nvme_m2b(ns
, ns
->zone_size
);
3921 iocb
->aiocb
= blk_aio_pwrite_zeroes(ns
->blkconf
.blk
, moff
, count
,
3923 nvme_zone_reset_cb
, iocb
);
3927 nvme_zone_reset_cb(iocb
, ret
);
3930 static void nvme_zone_reset_cb(void *opaque
, int ret
)
3932 NvmeZoneResetAIOCB
*iocb
= opaque
;
3933 NvmeRequest
*req
= iocb
->req
;
3934 NvmeNamespace
*ns
= req
->ns
;
3936 if (iocb
->ret
< 0) {
3938 } else if (ret
< 0) {
3944 nvme_zrm_reset(ns
, iocb
->zone
);
3951 while (iocb
->idx
< ns
->num_zones
) {
3952 NvmeZone
*zone
= &ns
->zone_array
[iocb
->idx
++];
3954 switch (nvme_get_zone_state(zone
)) {
3955 case NVME_ZONE_STATE_EMPTY
:
3962 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
3963 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
3964 case NVME_ZONE_STATE_CLOSED
:
3965 case NVME_ZONE_STATE_FULL
:
3973 trace_pci_nvme_zns_zone_reset(zone
->d
.zslba
);
3975 iocb
->aiocb
= blk_aio_pwrite_zeroes(ns
->blkconf
.blk
,
3976 nvme_l2b(ns
, zone
->d
.zslba
),
3977 nvme_l2b(ns
, ns
->zone_size
),
3979 nvme_zone_reset_epilogue_cb
,
3987 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
3988 qemu_aio_unref(iocb
);
3991 static uint16_t nvme_zone_mgmt_send_zrwa_flush(NvmeCtrl
*n
, NvmeZone
*zone
,
3992 uint64_t elba
, NvmeRequest
*req
)
3994 NvmeNamespace
*ns
= req
->ns
;
3995 uint16_t ozcs
= le16_to_cpu(ns
->id_ns_zoned
->ozcs
);
3996 uint64_t wp
= zone
->d
.wp
;
3997 uint32_t nlb
= elba
- wp
+ 1;
4001 if (!(ozcs
& NVME_ID_NS_ZONED_OZCS_ZRWASUP
)) {
4002 return NVME_INVALID_ZONE_OP
| NVME_DNR
;
4005 if (!(zone
->d
.za
& NVME_ZA_ZRWA_VALID
)) {
4006 return NVME_INVALID_FIELD
| NVME_DNR
;
4009 if (elba
< wp
|| elba
> wp
+ ns
->zns
.zrwas
) {
4010 return NVME_ZONE_BOUNDARY_ERROR
| NVME_DNR
;
4013 if (nlb
% ns
->zns
.zrwafg
) {
4014 return NVME_INVALID_FIELD
| NVME_DNR
;
4017 status
= nvme_zrm_auto(n
, ns
, zone
);
4024 nvme_advance_zone_wp(ns
, zone
, nlb
);
4026 return NVME_SUCCESS
;
4029 static uint16_t nvme_zone_mgmt_send(NvmeCtrl
*n
, NvmeRequest
*req
)
4031 NvmeZoneSendCmd
*cmd
= (NvmeZoneSendCmd
*)&req
->cmd
;
4032 NvmeNamespace
*ns
= req
->ns
;
4034 NvmeZoneResetAIOCB
*iocb
;
4037 uint32_t zone_idx
= 0;
4039 uint8_t action
= cmd
->zsa
;
4041 enum NvmeZoneProcessingMask proc_mask
= NVME_PROC_CURRENT_ZONE
;
4043 all
= cmd
->zsflags
& NVME_ZSFLAG_SELECT_ALL
;
4045 req
->status
= NVME_SUCCESS
;
4048 status
= nvme_get_mgmt_zone_slba_idx(ns
, &req
->cmd
, &slba
, &zone_idx
);
4054 zone
= &ns
->zone_array
[zone_idx
];
4055 if (slba
!= zone
->d
.zslba
&& action
!= NVME_ZONE_ACTION_ZRWA_FLUSH
) {
4056 trace_pci_nvme_err_unaligned_zone_cmd(action
, slba
, zone
->d
.zslba
);
4057 return NVME_INVALID_FIELD
| NVME_DNR
;
4062 case NVME_ZONE_ACTION_OPEN
:
4064 proc_mask
= NVME_PROC_CLOSED_ZONES
;
4066 trace_pci_nvme_open_zone(slba
, zone_idx
, all
);
4067 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_open_zone
, req
);
4070 case NVME_ZONE_ACTION_CLOSE
:
4072 proc_mask
= NVME_PROC_OPENED_ZONES
;
4074 trace_pci_nvme_close_zone(slba
, zone_idx
, all
);
4075 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_close_zone
, req
);
4078 case NVME_ZONE_ACTION_FINISH
:
4080 proc_mask
= NVME_PROC_OPENED_ZONES
| NVME_PROC_CLOSED_ZONES
;
4082 trace_pci_nvme_finish_zone(slba
, zone_idx
, all
);
4083 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_finish_zone
, req
);
4086 case NVME_ZONE_ACTION_RESET
:
4087 trace_pci_nvme_reset_zone(slba
, zone_idx
, all
);
4089 iocb
= blk_aio_get(&nvme_zone_reset_aiocb_info
, ns
->blkconf
.blk
,
4095 iocb
->idx
= zone_idx
;
4098 req
->aiocb
= &iocb
->common
;
4099 nvme_zone_reset_cb(iocb
, 0);
4101 return NVME_NO_COMPLETE
;
4103 case NVME_ZONE_ACTION_OFFLINE
:
4105 proc_mask
= NVME_PROC_READ_ONLY_ZONES
;
4107 trace_pci_nvme_offline_zone(slba
, zone_idx
, all
);
4108 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_offline_zone
, req
);
4111 case NVME_ZONE_ACTION_SET_ZD_EXT
:
4112 trace_pci_nvme_set_descriptor_extension(slba
, zone_idx
);
4113 if (all
|| !ns
->params
.zd_extension_size
) {
4114 return NVME_INVALID_FIELD
| NVME_DNR
;
4116 zd_ext
= nvme_get_zd_extension(ns
, zone_idx
);
4117 status
= nvme_h2c(n
, zd_ext
, ns
->params
.zd_extension_size
, req
);
4119 trace_pci_nvme_err_zd_extension_map_error(zone_idx
);
4123 status
= nvme_set_zd_ext(ns
, zone
);
4124 if (status
== NVME_SUCCESS
) {
4125 trace_pci_nvme_zd_extension_set(zone_idx
);
4130 case NVME_ZONE_ACTION_ZRWA_FLUSH
:
4132 return NVME_INVALID_FIELD
| NVME_DNR
;
4135 return nvme_zone_mgmt_send_zrwa_flush(n
, zone
, slba
, req
);
4138 trace_pci_nvme_err_invalid_mgmt_action(action
);
4139 status
= NVME_INVALID_FIELD
;
4142 if (status
== NVME_ZONE_INVAL_TRANSITION
) {
4143 trace_pci_nvme_err_invalid_zone_state_transition(action
, slba
,
4153 static bool nvme_zone_matches_filter(uint32_t zafs
, NvmeZone
*zl
)
4155 NvmeZoneState zs
= nvme_get_zone_state(zl
);
4158 case NVME_ZONE_REPORT_ALL
:
4160 case NVME_ZONE_REPORT_EMPTY
:
4161 return zs
== NVME_ZONE_STATE_EMPTY
;
4162 case NVME_ZONE_REPORT_IMPLICITLY_OPEN
:
4163 return zs
== NVME_ZONE_STATE_IMPLICITLY_OPEN
;
4164 case NVME_ZONE_REPORT_EXPLICITLY_OPEN
:
4165 return zs
== NVME_ZONE_STATE_EXPLICITLY_OPEN
;
4166 case NVME_ZONE_REPORT_CLOSED
:
4167 return zs
== NVME_ZONE_STATE_CLOSED
;
4168 case NVME_ZONE_REPORT_FULL
:
4169 return zs
== NVME_ZONE_STATE_FULL
;
4170 case NVME_ZONE_REPORT_READ_ONLY
:
4171 return zs
== NVME_ZONE_STATE_READ_ONLY
;
4172 case NVME_ZONE_REPORT_OFFLINE
:
4173 return zs
== NVME_ZONE_STATE_OFFLINE
;
4179 static uint16_t nvme_zone_mgmt_recv(NvmeCtrl
*n
, NvmeRequest
*req
)
4181 NvmeCmd
*cmd
= (NvmeCmd
*)&req
->cmd
;
4182 NvmeNamespace
*ns
= req
->ns
;
4183 /* cdw12 is zero-based number of dwords to return. Convert to bytes */
4184 uint32_t data_size
= (le32_to_cpu(cmd
->cdw12
) + 1) << 2;
4185 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
4186 uint32_t zone_idx
, zra
, zrasf
, partial
;
4187 uint64_t max_zones
, nr_zones
= 0;
4192 NvmeZoneReportHeader
*header
;
4194 size_t zone_entry_sz
;
4197 req
->status
= NVME_SUCCESS
;
4199 status
= nvme_get_mgmt_zone_slba_idx(ns
, cmd
, &slba
, &zone_idx
);
4205 if (zra
!= NVME_ZONE_REPORT
&& zra
!= NVME_ZONE_REPORT_EXTENDED
) {
4206 return NVME_INVALID_FIELD
| NVME_DNR
;
4208 if (zra
== NVME_ZONE_REPORT_EXTENDED
&& !ns
->params
.zd_extension_size
) {
4209 return NVME_INVALID_FIELD
| NVME_DNR
;
4212 zrasf
= (dw13
>> 8) & 0xff;
4213 if (zrasf
> NVME_ZONE_REPORT_OFFLINE
) {
4214 return NVME_INVALID_FIELD
| NVME_DNR
;
4217 if (data_size
< sizeof(NvmeZoneReportHeader
)) {
4218 return NVME_INVALID_FIELD
| NVME_DNR
;
4221 status
= nvme_check_mdts(n
, data_size
);
4226 partial
= (dw13
>> 16) & 0x01;
4228 zone_entry_sz
= sizeof(NvmeZoneDescr
);
4229 if (zra
== NVME_ZONE_REPORT_EXTENDED
) {
4230 zone_entry_sz
+= ns
->params
.zd_extension_size
;
4233 max_zones
= (data_size
- sizeof(NvmeZoneReportHeader
)) / zone_entry_sz
;
4234 buf
= g_malloc0(data_size
);
4236 zone
= &ns
->zone_array
[zone_idx
];
4237 for (i
= zone_idx
; i
< ns
->num_zones
; i
++) {
4238 if (partial
&& nr_zones
>= max_zones
) {
4241 if (nvme_zone_matches_filter(zrasf
, zone
++)) {
4246 header
->nr_zones
= cpu_to_le64(nr_zones
);
4248 buf_p
= buf
+ sizeof(NvmeZoneReportHeader
);
4249 for (; zone_idx
< ns
->num_zones
&& max_zones
> 0; zone_idx
++) {
4250 zone
= &ns
->zone_array
[zone_idx
];
4251 if (nvme_zone_matches_filter(zrasf
, zone
)) {
4253 buf_p
+= sizeof(NvmeZoneDescr
);
4257 z
->zcap
= cpu_to_le64(zone
->d
.zcap
);
4258 z
->zslba
= cpu_to_le64(zone
->d
.zslba
);
4261 if (nvme_wp_is_valid(zone
)) {
4262 z
->wp
= cpu_to_le64(zone
->d
.wp
);
4264 z
->wp
= cpu_to_le64(~0ULL);
4267 if (zra
== NVME_ZONE_REPORT_EXTENDED
) {
4268 if (zone
->d
.za
& NVME_ZA_ZD_EXT_VALID
) {
4269 memcpy(buf_p
, nvme_get_zd_extension(ns
, zone_idx
),
4270 ns
->params
.zd_extension_size
);
4272 buf_p
+= ns
->params
.zd_extension_size
;
4279 status
= nvme_c2h(n
, (uint8_t *)buf
, data_size
, req
);
4286 static uint16_t nvme_io_mgmt_recv_ruhs(NvmeCtrl
*n
, NvmeRequest
*req
,
4289 NvmeNamespace
*ns
= req
->ns
;
4290 NvmeEnduranceGroup
*endgrp
;
4292 NvmeRuhStatusDescr
*ruhsd
;
4293 unsigned int nruhsd
;
4294 uint16_t rg
, ph
, *ruhid
;
4296 g_autofree
uint8_t *buf
= NULL
;
4299 return NVME_INVALID_FIELD
| NVME_DNR
;
4302 if (ns
->params
.nsid
== 0 || ns
->params
.nsid
== 0xffffffff) {
4303 return NVME_INVALID_NSID
| NVME_DNR
;
4306 if (!n
->subsys
->endgrp
.fdp
.enabled
) {
4307 return NVME_FDP_DISABLED
| NVME_DNR
;
4310 endgrp
= ns
->endgrp
;
4312 nruhsd
= ns
->fdp
.nphs
* endgrp
->fdp
.nrg
;
4313 trans_len
= sizeof(NvmeRuhStatus
) + nruhsd
* sizeof(NvmeRuhStatusDescr
);
4314 buf
= g_malloc(trans_len
);
4316 trans_len
= MIN(trans_len
, len
);
4318 hdr
= (NvmeRuhStatus
*)buf
;
4319 ruhsd
= (NvmeRuhStatusDescr
*)(buf
+ sizeof(NvmeRuhStatus
));
4321 hdr
->nruhsd
= cpu_to_le16(nruhsd
);
4323 ruhid
= ns
->fdp
.phs
;
4325 for (ph
= 0; ph
< ns
->fdp
.nphs
; ph
++, ruhid
++) {
4326 NvmeRuHandle
*ruh
= &endgrp
->fdp
.ruhs
[*ruhid
];
4328 for (rg
= 0; rg
< endgrp
->fdp
.nrg
; rg
++, ruhsd
++) {
4329 uint16_t pid
= nvme_make_pid(ns
, rg
, ph
);
4331 ruhsd
->pid
= cpu_to_le16(pid
);
4332 ruhsd
->ruhid
= *ruhid
;
4334 ruhsd
->ruamw
= cpu_to_le64(ruh
->rus
[rg
].ruamw
);
4338 return nvme_c2h(n
, buf
, trans_len
, req
);
4341 static uint16_t nvme_io_mgmt_recv(NvmeCtrl
*n
, NvmeRequest
*req
)
4343 NvmeCmd
*cmd
= &req
->cmd
;
4344 uint32_t cdw10
= le32_to_cpu(cmd
->cdw10
);
4345 uint32_t numd
= le32_to_cpu(cmd
->cdw11
);
4346 uint8_t mo
= (cdw10
& 0xff);
4347 size_t len
= (numd
+ 1) << 2;
4350 case NVME_IOMR_MO_NOP
:
4352 case NVME_IOMR_MO_RUH_STATUS
:
4353 return nvme_io_mgmt_recv_ruhs(n
, req
, len
);
4355 return NVME_INVALID_FIELD
| NVME_DNR
;
4359 static uint16_t nvme_io_mgmt_send_ruh_update(NvmeCtrl
*n
, NvmeRequest
*req
)
4361 NvmeCmd
*cmd
= &req
->cmd
;
4362 NvmeNamespace
*ns
= req
->ns
;
4363 uint32_t cdw10
= le32_to_cpu(cmd
->cdw10
);
4364 uint16_t ret
= NVME_SUCCESS
;
4365 uint32_t npid
= (cdw10
>> 1) + 1;
4367 g_autofree
uint16_t *pids
= NULL
;
4368 uint32_t maxnpid
= n
->subsys
->endgrp
.fdp
.nrg
* n
->subsys
->endgrp
.fdp
.nruh
;
4370 if (unlikely(npid
>= MIN(NVME_FDP_MAXPIDS
, maxnpid
))) {
4371 return NVME_INVALID_FIELD
| NVME_DNR
;
4374 pids
= g_new(uint16_t, npid
);
4376 ret
= nvme_h2c(n
, pids
, npid
* sizeof(uint16_t), req
);
4381 for (; i
< npid
; i
++) {
4382 if (!nvme_update_ruh(n
, ns
, pids
[i
])) {
4383 return NVME_INVALID_FIELD
| NVME_DNR
;
4390 static uint16_t nvme_io_mgmt_send(NvmeCtrl
*n
, NvmeRequest
*req
)
4392 NvmeCmd
*cmd
= &req
->cmd
;
4393 uint32_t cdw10
= le32_to_cpu(cmd
->cdw10
);
4394 uint8_t mo
= (cdw10
& 0xff);
4397 case NVME_IOMS_MO_NOP
:
4399 case NVME_IOMS_MO_RUH_UPDATE
:
4400 return nvme_io_mgmt_send_ruh_update(n
, req
);
4402 return NVME_INVALID_FIELD
| NVME_DNR
;
4406 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
4409 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
4411 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
4412 req
->cmd
.opcode
, nvme_io_opc_str(req
->cmd
.opcode
));
4414 if (!nvme_nsid_valid(n
, nsid
)) {
4415 return NVME_INVALID_NSID
| NVME_DNR
;
4419 * In the base NVM command set, Flush may apply to all namespaces
4420 * (indicated by NSID being set to FFFFFFFFh). But if that feature is used
4421 * along with TP 4056 (Namespace Types), it may be pretty screwed up.
4423 * If NSID is indeed set to FFFFFFFFh, we simply cannot associate the
4424 * opcode with a specific command since we cannot determine a unique I/O
4425 * command set. Opcode 0h could have any other meaning than something
4426 * equivalent to flushing and say it DOES have completely different
4427 * semantics in some other command set - does an NSID of FFFFFFFFh then
4428 * mean "for all namespaces, apply whatever command set specific command
4429 * that uses the 0h opcode?" Or does it mean "for all namespaces, apply
4430 * whatever command that uses the 0h opcode if, and only if, it allows NSID
4433 * Anyway (and luckily), for now, we do not care about this since the
4434 * device only supports namespace types that includes the NVM Flush command
4435 * (NVM and Zoned), so always do an NVM Flush.
4437 if (req
->cmd
.opcode
== NVME_CMD_FLUSH
) {
4438 return nvme_flush(n
, req
);
4441 ns
= nvme_ns(n
, nsid
);
4442 if (unlikely(!ns
)) {
4443 return NVME_INVALID_FIELD
| NVME_DNR
;
4446 if (!(ns
->iocs
[req
->cmd
.opcode
] & NVME_CMD_EFF_CSUPP
)) {
4447 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
4448 return NVME_INVALID_OPCODE
| NVME_DNR
;
4455 if (NVME_CMD_FLAGS_FUSE(req
->cmd
.flags
)) {
4456 return NVME_INVALID_FIELD
;
4461 switch (req
->cmd
.opcode
) {
4462 case NVME_CMD_WRITE_ZEROES
:
4463 return nvme_write_zeroes(n
, req
);
4464 case NVME_CMD_ZONE_APPEND
:
4465 return nvme_zone_append(n
, req
);
4466 case NVME_CMD_WRITE
:
4467 return nvme_write(n
, req
);
4469 return nvme_read(n
, req
);
4470 case NVME_CMD_COMPARE
:
4471 return nvme_compare(n
, req
);
4473 return nvme_dsm(n
, req
);
4474 case NVME_CMD_VERIFY
:
4475 return nvme_verify(n
, req
);
4477 return nvme_copy(n
, req
);
4478 case NVME_CMD_ZONE_MGMT_SEND
:
4479 return nvme_zone_mgmt_send(n
, req
);
4480 case NVME_CMD_ZONE_MGMT_RECV
:
4481 return nvme_zone_mgmt_recv(n
, req
);
4482 case NVME_CMD_IO_MGMT_RECV
:
4483 return nvme_io_mgmt_recv(n
, req
);
4484 case NVME_CMD_IO_MGMT_SEND
:
4485 return nvme_io_mgmt_send(n
, req
);
4490 return NVME_INVALID_OPCODE
| NVME_DNR
;
4493 static void nvme_cq_notifier(EventNotifier
*e
)
4495 NvmeCQueue
*cq
= container_of(e
, NvmeCQueue
, notifier
);
4496 NvmeCtrl
*n
= cq
->ctrl
;
4498 if (!event_notifier_test_and_clear(e
)) {
4502 nvme_update_cq_head(cq
);
4504 if (cq
->tail
== cq
->head
) {
4505 if (cq
->irq_enabled
) {
4509 nvme_irq_deassert(n
, cq
);
4512 qemu_bh_schedule(cq
->bh
);
4515 static int nvme_init_cq_ioeventfd(NvmeCQueue
*cq
)
4517 NvmeCtrl
*n
= cq
->ctrl
;
4518 uint16_t offset
= (cq
->cqid
<< 3) + (1 << 2);
4521 ret
= event_notifier_init(&cq
->notifier
, 0);
4526 event_notifier_set_handler(&cq
->notifier
, nvme_cq_notifier
);
4527 memory_region_add_eventfd(&n
->iomem
,
4528 0x1000 + offset
, 4, false, 0, &cq
->notifier
);
4533 static void nvme_sq_notifier(EventNotifier
*e
)
4535 NvmeSQueue
*sq
= container_of(e
, NvmeSQueue
, notifier
);
4537 if (!event_notifier_test_and_clear(e
)) {
4541 nvme_process_sq(sq
);
4544 static int nvme_init_sq_ioeventfd(NvmeSQueue
*sq
)
4546 NvmeCtrl
*n
= sq
->ctrl
;
4547 uint16_t offset
= sq
->sqid
<< 3;
4550 ret
= event_notifier_init(&sq
->notifier
, 0);
4555 event_notifier_set_handler(&sq
->notifier
, nvme_sq_notifier
);
4556 memory_region_add_eventfd(&n
->iomem
,
4557 0x1000 + offset
, 4, false, 0, &sq
->notifier
);
4562 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
4564 uint16_t offset
= sq
->sqid
<< 3;
4566 n
->sq
[sq
->sqid
] = NULL
;
4567 qemu_bh_delete(sq
->bh
);
4568 if (sq
->ioeventfd_enabled
) {
4569 memory_region_del_eventfd(&n
->iomem
,
4570 0x1000 + offset
, 4, false, 0, &sq
->notifier
);
4571 event_notifier_set_handler(&sq
->notifier
, NULL
);
4572 event_notifier_cleanup(&sq
->notifier
);
4580 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
4582 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
4583 NvmeRequest
*r
, *next
;
4586 uint16_t qid
= le16_to_cpu(c
->qid
);
4588 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
4589 trace_pci_nvme_err_invalid_del_sq(qid
);
4590 return NVME_INVALID_QID
| NVME_DNR
;
4593 trace_pci_nvme_del_sq(qid
);
4596 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
4597 r
= QTAILQ_FIRST(&sq
->out_req_list
);
4599 blk_aio_cancel(r
->aiocb
);
4602 assert(QTAILQ_EMPTY(&sq
->out_req_list
));
4604 if (!nvme_check_cqid(n
, sq
->cqid
)) {
4605 cq
= n
->cq
[sq
->cqid
];
4606 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
4609 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
4611 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
4612 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
4617 nvme_free_sq(sq
, n
);
4618 return NVME_SUCCESS
;
4621 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
4622 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
4628 sq
->dma_addr
= dma_addr
;
4632 sq
->head
= sq
->tail
= 0;
4633 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
4635 QTAILQ_INIT(&sq
->req_list
);
4636 QTAILQ_INIT(&sq
->out_req_list
);
4637 for (i
= 0; i
< sq
->size
; i
++) {
4638 sq
->io_req
[i
].sq
= sq
;
4639 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
4642 sq
->bh
= qemu_bh_new_guarded(nvme_process_sq
, sq
,
4643 &DEVICE(sq
->ctrl
)->mem_reentrancy_guard
);
4645 if (n
->dbbuf_enabled
) {
4646 sq
->db_addr
= n
->dbbuf_dbs
+ (sqid
<< 3);
4647 sq
->ei_addr
= n
->dbbuf_eis
+ (sqid
<< 3);
4649 if (n
->params
.ioeventfd
&& sq
->sqid
!= 0) {
4650 if (!nvme_init_sq_ioeventfd(sq
)) {
4651 sq
->ioeventfd_enabled
= true;
4656 assert(n
->cq
[cqid
]);
4658 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
4662 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
4665 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
4667 uint16_t cqid
= le16_to_cpu(c
->cqid
);
4668 uint16_t sqid
= le16_to_cpu(c
->sqid
);
4669 uint16_t qsize
= le16_to_cpu(c
->qsize
);
4670 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
4671 uint64_t prp1
= le64_to_cpu(c
->prp1
);
4673 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
4675 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
4676 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
4677 return NVME_INVALID_CQID
| NVME_DNR
;
4679 if (unlikely(!sqid
|| sqid
> n
->conf_ioqpairs
|| n
->sq
[sqid
] != NULL
)) {
4680 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
4681 return NVME_INVALID_QID
| NVME_DNR
;
4683 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(ldq_le_p(&n
->bar
.cap
)))) {
4684 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
4685 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
4687 if (unlikely(prp1
& (n
->page_size
- 1))) {
4688 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
4689 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
4691 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
4692 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
4693 return NVME_INVALID_FIELD
| NVME_DNR
;
4695 sq
= g_malloc0(sizeof(*sq
));
4696 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
4697 return NVME_SUCCESS
;
4701 uint64_t units_read
;
4702 uint64_t units_written
;
4703 uint64_t read_commands
;
4704 uint64_t write_commands
;
4707 static void nvme_set_blk_stats(NvmeNamespace
*ns
, struct nvme_stats
*stats
)
4709 BlockAcctStats
*s
= blk_get_stats(ns
->blkconf
.blk
);
4711 stats
->units_read
+= s
->nr_bytes
[BLOCK_ACCT_READ
];
4712 stats
->units_written
+= s
->nr_bytes
[BLOCK_ACCT_WRITE
];
4713 stats
->read_commands
+= s
->nr_ops
[BLOCK_ACCT_READ
];
4714 stats
->write_commands
+= s
->nr_ops
[BLOCK_ACCT_WRITE
];
4717 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
4718 uint64_t off
, NvmeRequest
*req
)
4720 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
4721 struct nvme_stats stats
= { 0 };
4722 NvmeSmartLog smart
= { 0 };
4726 uint64_t u_read
, u_written
;
4728 if (off
>= sizeof(smart
)) {
4729 return NVME_INVALID_FIELD
| NVME_DNR
;
4732 if (nsid
!= 0xffffffff) {
4733 ns
= nvme_ns(n
, nsid
);
4735 return NVME_INVALID_NSID
| NVME_DNR
;
4737 nvme_set_blk_stats(ns
, &stats
);
4741 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
4746 nvme_set_blk_stats(ns
, &stats
);
4750 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
4751 smart
.critical_warning
= n
->smart_critical_warning
;
4753 u_read
= DIV_ROUND_UP(stats
.units_read
>> BDRV_SECTOR_BITS
, 1000);
4754 u_written
= DIV_ROUND_UP(stats
.units_written
>> BDRV_SECTOR_BITS
, 1000);
4756 smart
.data_units_read
[0] = cpu_to_le64(u_read
);
4757 smart
.data_units_written
[0] = cpu_to_le64(u_written
);
4758 smart
.host_read_commands
[0] = cpu_to_le64(stats
.read_commands
);
4759 smart
.host_write_commands
[0] = cpu_to_le64(stats
.write_commands
);
4761 smart
.temperature
= cpu_to_le16(n
->temperature
);
4763 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
4764 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
4765 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
4768 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
4769 smart
.power_on_hours
[0] =
4770 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
4773 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
4776 return nvme_c2h(n
, (uint8_t *) &smart
+ off
, trans_len
, req
);
4779 static uint16_t nvme_endgrp_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
4780 uint64_t off
, NvmeRequest
*req
)
4782 uint32_t dw11
= le32_to_cpu(req
->cmd
.cdw11
);
4783 uint16_t endgrpid
= (dw11
>> 16) & 0xffff;
4784 struct nvme_stats stats
= {};
4785 NvmeEndGrpLog info
= {};
4788 if (!n
->subsys
|| endgrpid
!= 0x1) {
4789 return NVME_INVALID_FIELD
| NVME_DNR
;
4792 if (off
>= sizeof(info
)) {
4793 return NVME_INVALID_FIELD
| NVME_DNR
;
4796 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
4797 NvmeNamespace
*ns
= nvme_subsys_ns(n
->subsys
, i
);
4802 nvme_set_blk_stats(ns
, &stats
);
4805 info
.data_units_read
[0] =
4806 cpu_to_le64(DIV_ROUND_UP(stats
.units_read
/ 1000000000, 1000000000));
4807 info
.data_units_written
[0] =
4808 cpu_to_le64(DIV_ROUND_UP(stats
.units_written
/ 1000000000, 1000000000));
4809 info
.media_units_written
[0] =
4810 cpu_to_le64(DIV_ROUND_UP(stats
.units_written
/ 1000000000, 1000000000));
4812 info
.host_read_commands
[0] = cpu_to_le64(stats
.read_commands
);
4813 info
.host_write_commands
[0] = cpu_to_le64(stats
.write_commands
);
4815 buf_len
= MIN(sizeof(info
) - off
, buf_len
);
4817 return nvme_c2h(n
, (uint8_t *)&info
+ off
, buf_len
, req
);
4821 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
4825 NvmeFwSlotInfoLog fw_log
= {
4829 if (off
>= sizeof(fw_log
)) {
4830 return NVME_INVALID_FIELD
| NVME_DNR
;
4833 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
4834 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
4836 return nvme_c2h(n
, (uint8_t *) &fw_log
+ off
, trans_len
, req
);
4839 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
4840 uint64_t off
, NvmeRequest
*req
)
4843 NvmeErrorLog errlog
;
4845 if (off
>= sizeof(errlog
)) {
4846 return NVME_INVALID_FIELD
| NVME_DNR
;
4850 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
4853 memset(&errlog
, 0x0, sizeof(errlog
));
4854 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
4856 return nvme_c2h(n
, (uint8_t *)&errlog
, trans_len
, req
);
4859 static uint16_t nvme_changed_nslist(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
4860 uint64_t off
, NvmeRequest
*req
)
4862 uint32_t nslist
[1024];
4867 if (off
>= sizeof(nslist
)) {
4868 trace_pci_nvme_err_invalid_log_page_offset(off
, sizeof(nslist
));
4869 return NVME_INVALID_FIELD
| NVME_DNR
;
4872 memset(nslist
, 0x0, sizeof(nslist
));
4873 trans_len
= MIN(sizeof(nslist
) - off
, buf_len
);
4875 while ((nsid
= find_first_bit(n
->changed_nsids
, NVME_CHANGED_NSID_SIZE
)) !=
4876 NVME_CHANGED_NSID_SIZE
) {
4878 * If more than 1024 namespaces, the first entry in the log page should
4879 * be set to FFFFFFFFh and the others to 0 as spec.
4881 if (i
== ARRAY_SIZE(nslist
)) {
4882 memset(nslist
, 0x0, sizeof(nslist
));
4883 nslist
[0] = 0xffffffff;
4888 clear_bit(nsid
, n
->changed_nsids
);
4892 * Remove all the remaining list entries in case returns directly due to
4893 * more than 1024 namespaces.
4895 if (nslist
[0] == 0xffffffff) {
4896 bitmap_zero(n
->changed_nsids
, NVME_CHANGED_NSID_SIZE
);
4900 nvme_clear_events(n
, NVME_AER_TYPE_NOTICE
);
4903 return nvme_c2h(n
, ((uint8_t *)nslist
) + off
, trans_len
, req
);
4906 static uint16_t nvme_cmd_effects(NvmeCtrl
*n
, uint8_t csi
, uint32_t buf_len
,
4907 uint64_t off
, NvmeRequest
*req
)
4909 NvmeEffectsLog log
= {};
4910 const uint32_t *src_iocs
= NULL
;
4913 if (off
>= sizeof(log
)) {
4914 trace_pci_nvme_err_invalid_log_page_offset(off
, sizeof(log
));
4915 return NVME_INVALID_FIELD
| NVME_DNR
;
4918 switch (NVME_CC_CSS(ldl_le_p(&n
->bar
.cc
))) {
4919 case NVME_CC_CSS_NVM
:
4920 src_iocs
= nvme_cse_iocs_nvm
;
4922 case NVME_CC_CSS_ADMIN_ONLY
:
4924 case NVME_CC_CSS_CSI
:
4927 src_iocs
= nvme_cse_iocs_nvm
;
4929 case NVME_CSI_ZONED
:
4930 src_iocs
= nvme_cse_iocs_zoned
;
4935 memcpy(log
.acs
, nvme_cse_acs
, sizeof(nvme_cse_acs
));
4938 memcpy(log
.iocs
, src_iocs
, sizeof(log
.iocs
));
4941 trans_len
= MIN(sizeof(log
) - off
, buf_len
);
4943 return nvme_c2h(n
, ((uint8_t *)&log
) + off
, trans_len
, req
);
4946 static size_t sizeof_fdp_conf_descr(size_t nruh
, size_t vss
)
4948 size_t entry_siz
= sizeof(NvmeFdpDescrHdr
) + nruh
* sizeof(NvmeRuhDescr
)
4950 return ROUND_UP(entry_siz
, 8);
4953 static uint16_t nvme_fdp_confs(NvmeCtrl
*n
, uint32_t endgrpid
, uint32_t buf_len
,
4954 uint64_t off
, NvmeRequest
*req
)
4956 uint32_t log_size
, trans_len
;
4957 g_autofree
uint8_t *buf
= NULL
;
4958 NvmeFdpDescrHdr
*hdr
;
4960 NvmeEnduranceGroup
*endgrp
;
4961 NvmeFdpConfsHdr
*log
;
4962 size_t nruh
, fdp_descr_size
;
4965 if (endgrpid
!= 1 || !n
->subsys
) {
4966 return NVME_INVALID_FIELD
| NVME_DNR
;
4969 endgrp
= &n
->subsys
->endgrp
;
4971 if (endgrp
->fdp
.enabled
) {
4972 nruh
= endgrp
->fdp
.nruh
;
4977 fdp_descr_size
= sizeof_fdp_conf_descr(nruh
, FDPVSS
);
4978 log_size
= sizeof(NvmeFdpConfsHdr
) + fdp_descr_size
;
4980 if (off
>= log_size
) {
4981 return NVME_INVALID_FIELD
| NVME_DNR
;
4984 trans_len
= MIN(log_size
- off
, buf_len
);
4986 buf
= g_malloc0(log_size
);
4987 log
= (NvmeFdpConfsHdr
*)buf
;
4988 hdr
= (NvmeFdpDescrHdr
*)(log
+ 1);
4989 ruhd
= (NvmeRuhDescr
*)(buf
+ sizeof(*log
) + sizeof(*hdr
));
4991 log
->num_confs
= cpu_to_le16(0);
4992 log
->size
= cpu_to_le32(log_size
);
4994 hdr
->descr_size
= cpu_to_le16(fdp_descr_size
);
4995 if (endgrp
->fdp
.enabled
) {
4996 hdr
->fdpa
= FIELD_DP8(hdr
->fdpa
, FDPA
, VALID
, 1);
4997 hdr
->fdpa
= FIELD_DP8(hdr
->fdpa
, FDPA
, RGIF
, endgrp
->fdp
.rgif
);
4998 hdr
->nrg
= cpu_to_le16(endgrp
->fdp
.nrg
);
4999 hdr
->nruh
= cpu_to_le16(endgrp
->fdp
.nruh
);
5000 hdr
->maxpids
= cpu_to_le16(NVME_FDP_MAXPIDS
- 1);
5001 hdr
->nnss
= cpu_to_le32(NVME_MAX_NAMESPACES
);
5002 hdr
->runs
= cpu_to_le64(endgrp
->fdp
.runs
);
5004 for (i
= 0; i
< nruh
; i
++) {
5005 ruhd
->ruht
= NVME_RUHT_INITIALLY_ISOLATED
;
5009 /* 1 bit for RUH in PIF -> 2 RUHs max. */
5010 hdr
->nrg
= cpu_to_le16(1);
5011 hdr
->nruh
= cpu_to_le16(1);
5012 hdr
->maxpids
= cpu_to_le16(NVME_FDP_MAXPIDS
- 1);
5013 hdr
->nnss
= cpu_to_le32(1);
5014 hdr
->runs
= cpu_to_le64(96 * MiB
);
5016 ruhd
->ruht
= NVME_RUHT_INITIALLY_ISOLATED
;
5019 return nvme_c2h(n
, (uint8_t *)buf
+ off
, trans_len
, req
);
5022 static uint16_t nvme_fdp_ruh_usage(NvmeCtrl
*n
, uint32_t endgrpid
,
5023 uint32_t dw10
, uint32_t dw12
,
5024 uint32_t buf_len
, uint64_t off
,
5029 NvmeRuhuDescr
*ruhud
;
5030 NvmeEnduranceGroup
*endgrp
;
5031 g_autofree
uint8_t *buf
= NULL
;
5032 uint32_t log_size
, trans_len
;
5035 if (endgrpid
!= 1 || !n
->subsys
) {
5036 return NVME_INVALID_FIELD
| NVME_DNR
;
5039 endgrp
= &n
->subsys
->endgrp
;
5041 if (!endgrp
->fdp
.enabled
) {
5042 return NVME_FDP_DISABLED
| NVME_DNR
;
5045 log_size
= sizeof(NvmeRuhuLog
) + endgrp
->fdp
.nruh
* sizeof(NvmeRuhuDescr
);
5047 if (off
>= log_size
) {
5048 return NVME_INVALID_FIELD
| NVME_DNR
;
5051 trans_len
= MIN(log_size
- off
, buf_len
);
5053 buf
= g_malloc0(log_size
);
5054 hdr
= (NvmeRuhuLog
*)buf
;
5055 ruhud
= (NvmeRuhuDescr
*)(hdr
+ 1);
5057 ruh
= endgrp
->fdp
.ruhs
;
5058 hdr
->nruh
= cpu_to_le16(endgrp
->fdp
.nruh
);
5060 for (i
= 0; i
< endgrp
->fdp
.nruh
; i
++, ruhud
++, ruh
++) {
5061 ruhud
->ruha
= ruh
->ruha
;
5064 return nvme_c2h(n
, (uint8_t *)buf
+ off
, trans_len
, req
);
5067 static uint16_t nvme_fdp_stats(NvmeCtrl
*n
, uint32_t endgrpid
, uint32_t buf_len
,
5068 uint64_t off
, NvmeRequest
*req
)
5070 NvmeEnduranceGroup
*endgrp
;
5071 NvmeFdpStatsLog log
= {};
5074 if (off
>= sizeof(NvmeFdpStatsLog
)) {
5075 return NVME_INVALID_FIELD
| NVME_DNR
;
5078 if (endgrpid
!= 1 || !n
->subsys
) {
5079 return NVME_INVALID_FIELD
| NVME_DNR
;
5082 if (!n
->subsys
->endgrp
.fdp
.enabled
) {
5083 return NVME_FDP_DISABLED
| NVME_DNR
;
5086 endgrp
= &n
->subsys
->endgrp
;
5088 trans_len
= MIN(sizeof(log
) - off
, buf_len
);
5090 /* spec value is 128 bit, we only use 64 bit */
5091 log
.hbmw
[0] = cpu_to_le64(endgrp
->fdp
.hbmw
);
5092 log
.mbmw
[0] = cpu_to_le64(endgrp
->fdp
.mbmw
);
5093 log
.mbe
[0] = cpu_to_le64(endgrp
->fdp
.mbe
);
5095 return nvme_c2h(n
, (uint8_t *)&log
+ off
, trans_len
, req
);
5098 static uint16_t nvme_fdp_events(NvmeCtrl
*n
, uint32_t endgrpid
,
5099 uint32_t buf_len
, uint64_t off
,
5102 NvmeEnduranceGroup
*endgrp
;
5103 NvmeCmd
*cmd
= &req
->cmd
;
5104 bool host_events
= (cmd
->cdw10
>> 8) & 0x1;
5105 uint32_t log_size
, trans_len
;
5106 NvmeFdpEventBuffer
*ebuf
;
5107 g_autofree NvmeFdpEventsLog
*elog
= NULL
;
5108 NvmeFdpEvent
*event
;
5110 if (endgrpid
!= 1 || !n
->subsys
) {
5111 return NVME_INVALID_FIELD
| NVME_DNR
;
5114 endgrp
= &n
->subsys
->endgrp
;
5116 if (!endgrp
->fdp
.enabled
) {
5117 return NVME_FDP_DISABLED
| NVME_DNR
;
5121 ebuf
= &endgrp
->fdp
.host_events
;
5123 ebuf
= &endgrp
->fdp
.ctrl_events
;
5126 log_size
= sizeof(NvmeFdpEventsLog
) + ebuf
->nelems
* sizeof(NvmeFdpEvent
);
5127 trans_len
= MIN(log_size
- off
, buf_len
);
5128 elog
= g_malloc0(log_size
);
5129 elog
->num_events
= cpu_to_le32(ebuf
->nelems
);
5130 event
= (NvmeFdpEvent
*)(elog
+ 1);
5132 if (ebuf
->nelems
&& ebuf
->start
== ebuf
->next
) {
5133 unsigned int nelems
= (NVME_FDP_MAX_EVENTS
- ebuf
->start
);
5134 /* wrap over, copy [start;NVME_FDP_MAX_EVENTS[ and [0; next[ */
5135 memcpy(event
, &ebuf
->events
[ebuf
->start
],
5136 sizeof(NvmeFdpEvent
) * nelems
);
5137 memcpy(event
+ nelems
, ebuf
->events
,
5138 sizeof(NvmeFdpEvent
) * ebuf
->next
);
5139 } else if (ebuf
->start
< ebuf
->next
) {
5140 memcpy(event
, &ebuf
->events
[ebuf
->start
],
5141 sizeof(NvmeFdpEvent
) * (ebuf
->next
- ebuf
->start
));
5144 return nvme_c2h(n
, (uint8_t *)elog
+ off
, trans_len
, req
);
5147 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
5149 NvmeCmd
*cmd
= &req
->cmd
;
5151 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
5152 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
5153 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
5154 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
5155 uint8_t lid
= dw10
& 0xff;
5156 uint8_t lsp
= (dw10
>> 8) & 0xf;
5157 uint8_t rae
= (dw10
>> 15) & 0x1;
5158 uint8_t csi
= le32_to_cpu(cmd
->cdw14
) >> 24;
5159 uint32_t numdl
, numdu
, lspi
;
5160 uint64_t off
, lpol
, lpou
;
5164 numdl
= (dw10
>> 16);
5165 numdu
= (dw11
& 0xffff);
5166 lspi
= (dw11
>> 16);
5170 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
5171 off
= (lpou
<< 32ULL) | lpol
;
5174 return NVME_INVALID_FIELD
| NVME_DNR
;
5177 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
5179 status
= nvme_check_mdts(n
, len
);
5185 case NVME_LOG_ERROR_INFO
:
5186 return nvme_error_info(n
, rae
, len
, off
, req
);
5187 case NVME_LOG_SMART_INFO
:
5188 return nvme_smart_info(n
, rae
, len
, off
, req
);
5189 case NVME_LOG_FW_SLOT_INFO
:
5190 return nvme_fw_log_info(n
, len
, off
, req
);
5191 case NVME_LOG_CHANGED_NSLIST
:
5192 return nvme_changed_nslist(n
, rae
, len
, off
, req
);
5193 case NVME_LOG_CMD_EFFECTS
:
5194 return nvme_cmd_effects(n
, csi
, len
, off
, req
);
5195 case NVME_LOG_ENDGRP
:
5196 return nvme_endgrp_info(n
, rae
, len
, off
, req
);
5197 case NVME_LOG_FDP_CONFS
:
5198 return nvme_fdp_confs(n
, lspi
, len
, off
, req
);
5199 case NVME_LOG_FDP_RUH_USAGE
:
5200 return nvme_fdp_ruh_usage(n
, lspi
, dw10
, dw12
, len
, off
, req
);
5201 case NVME_LOG_FDP_STATS
:
5202 return nvme_fdp_stats(n
, lspi
, len
, off
, req
);
5203 case NVME_LOG_FDP_EVENTS
:
5204 return nvme_fdp_events(n
, lspi
, len
, off
, req
);
5206 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
5207 return NVME_INVALID_FIELD
| NVME_DNR
;
5211 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
5213 PCIDevice
*pci
= PCI_DEVICE(n
);
5214 uint16_t offset
= (cq
->cqid
<< 3) + (1 << 2);
5216 n
->cq
[cq
->cqid
] = NULL
;
5217 qemu_bh_delete(cq
->bh
);
5218 if (cq
->ioeventfd_enabled
) {
5219 memory_region_del_eventfd(&n
->iomem
,
5220 0x1000 + offset
, 4, false, 0, &cq
->notifier
);
5221 event_notifier_set_handler(&cq
->notifier
, NULL
);
5222 event_notifier_cleanup(&cq
->notifier
);
5224 if (msix_enabled(pci
)) {
5225 msix_vector_unuse(pci
, cq
->vector
);
5232 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
5234 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
5236 uint16_t qid
= le16_to_cpu(c
->qid
);
5238 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
5239 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
5240 return NVME_INVALID_CQID
| NVME_DNR
;
5244 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
5245 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
5246 return NVME_INVALID_QUEUE_DEL
;
5249 if (cq
->irq_enabled
&& cq
->tail
!= cq
->head
) {
5253 nvme_irq_deassert(n
, cq
);
5254 trace_pci_nvme_del_cq(qid
);
5255 nvme_free_cq(cq
, n
);
5256 return NVME_SUCCESS
;
5259 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
5260 uint16_t cqid
, uint16_t vector
, uint16_t size
,
5261 uint16_t irq_enabled
)
5263 PCIDevice
*pci
= PCI_DEVICE(n
);
5265 if (msix_enabled(pci
)) {
5266 msix_vector_use(pci
, vector
);
5271 cq
->dma_addr
= dma_addr
;
5273 cq
->irq_enabled
= irq_enabled
;
5274 cq
->vector
= vector
;
5275 cq
->head
= cq
->tail
= 0;
5276 QTAILQ_INIT(&cq
->req_list
);
5277 QTAILQ_INIT(&cq
->sq_list
);
5278 if (n
->dbbuf_enabled
) {
5279 cq
->db_addr
= n
->dbbuf_dbs
+ (cqid
<< 3) + (1 << 2);
5280 cq
->ei_addr
= n
->dbbuf_eis
+ (cqid
<< 3) + (1 << 2);
5282 if (n
->params
.ioeventfd
&& cqid
!= 0) {
5283 if (!nvme_init_cq_ioeventfd(cq
)) {
5284 cq
->ioeventfd_enabled
= true;
5289 cq
->bh
= qemu_bh_new_guarded(nvme_post_cqes
, cq
,
5290 &DEVICE(cq
->ctrl
)->mem_reentrancy_guard
);
5293 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
5296 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
5297 uint16_t cqid
= le16_to_cpu(c
->cqid
);
5298 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
5299 uint16_t qsize
= le16_to_cpu(c
->qsize
);
5300 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
5301 uint64_t prp1
= le64_to_cpu(c
->prp1
);
5303 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
5304 NVME_CQ_FLAGS_IEN(qflags
) != 0);
5306 if (unlikely(!cqid
|| cqid
> n
->conf_ioqpairs
|| n
->cq
[cqid
] != NULL
)) {
5307 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
5308 return NVME_INVALID_QID
| NVME_DNR
;
5310 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(ldq_le_p(&n
->bar
.cap
)))) {
5311 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
5312 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
5314 if (unlikely(prp1
& (n
->page_size
- 1))) {
5315 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
5316 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
5318 if (unlikely(!msix_enabled(PCI_DEVICE(n
)) && vector
)) {
5319 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
5320 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
5322 if (unlikely(vector
>= n
->conf_msix_qsize
)) {
5323 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
5324 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
5326 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
5327 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
5328 return NVME_INVALID_FIELD
| NVME_DNR
;
5331 cq
= g_malloc0(sizeof(*cq
));
5332 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
5333 NVME_CQ_FLAGS_IEN(qflags
));
5336 * It is only required to set qs_created when creating a completion queue;
5337 * creating a submission queue without a matching completion queue will
5340 n
->qs_created
= true;
5341 return NVME_SUCCESS
;
5344 static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl
*n
, NvmeRequest
*req
)
5346 uint8_t id
[NVME_IDENTIFY_DATA_SIZE
] = {};
5348 return nvme_c2h(n
, id
, sizeof(id
), req
);
5351 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
5353 trace_pci_nvme_identify_ctrl();
5355 return nvme_c2h(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
), req
);
5358 static uint16_t nvme_identify_ctrl_csi(NvmeCtrl
*n
, NvmeRequest
*req
)
5360 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
5361 uint8_t id
[NVME_IDENTIFY_DATA_SIZE
] = {};
5362 NvmeIdCtrlNvm
*id_nvm
= (NvmeIdCtrlNvm
*)&id
;
5364 trace_pci_nvme_identify_ctrl_csi(c
->csi
);
5368 id_nvm
->vsl
= n
->params
.vsl
;
5369 id_nvm
->dmrsl
= cpu_to_le32(n
->dmrsl
);
5372 case NVME_CSI_ZONED
:
5373 ((NvmeIdCtrlZoned
*)&id
)->zasl
= n
->params
.zasl
;
5377 return NVME_INVALID_FIELD
| NVME_DNR
;
5380 return nvme_c2h(n
, id
, sizeof(id
), req
);
5383 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
, bool active
)
5386 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
5387 uint32_t nsid
= le32_to_cpu(c
->nsid
);
5389 trace_pci_nvme_identify_ns(nsid
);
5391 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
5392 return NVME_INVALID_NSID
| NVME_DNR
;
5395 ns
= nvme_ns(n
, nsid
);
5396 if (unlikely(!ns
)) {
5398 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
5400 return nvme_rpt_empty_id_struct(n
, req
);
5403 return nvme_rpt_empty_id_struct(n
, req
);
5407 if (active
|| ns
->csi
== NVME_CSI_NVM
) {
5408 return nvme_c2h(n
, (uint8_t *)&ns
->id_ns
, sizeof(NvmeIdNs
), req
);
5411 return NVME_INVALID_CMD_SET
| NVME_DNR
;
5414 static uint16_t nvme_identify_ctrl_list(NvmeCtrl
*n
, NvmeRequest
*req
,
5417 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
5418 uint32_t nsid
= le32_to_cpu(c
->nsid
);
5419 uint16_t min_id
= le16_to_cpu(c
->ctrlid
);
5420 uint16_t list
[NVME_CONTROLLER_LIST_SIZE
] = {};
5421 uint16_t *ids
= &list
[1];
5424 int cntlid
, nr_ids
= 0;
5426 trace_pci_nvme_identify_ctrl_list(c
->cns
, min_id
);
5429 return NVME_INVALID_FIELD
| NVME_DNR
;
5433 if (nsid
== NVME_NSID_BROADCAST
) {
5434 return NVME_INVALID_FIELD
| NVME_DNR
;
5437 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
5439 return NVME_INVALID_FIELD
| NVME_DNR
;
5443 for (cntlid
= min_id
; cntlid
< ARRAY_SIZE(n
->subsys
->ctrls
); cntlid
++) {
5444 ctrl
= nvme_subsys_ctrl(n
->subsys
, cntlid
);
5449 if (attached
&& !nvme_ns(ctrl
, nsid
)) {
5453 ids
[nr_ids
++] = cntlid
;
5458 return nvme_c2h(n
, (uint8_t *)list
, sizeof(list
), req
);
5461 static uint16_t nvme_identify_pri_ctrl_cap(NvmeCtrl
*n
, NvmeRequest
*req
)
5463 trace_pci_nvme_identify_pri_ctrl_cap(le16_to_cpu(n
->pri_ctrl_cap
.cntlid
));
5465 return nvme_c2h(n
, (uint8_t *)&n
->pri_ctrl_cap
,
5466 sizeof(NvmePriCtrlCap
), req
);
5469 static uint16_t nvme_identify_sec_ctrl_list(NvmeCtrl
*n
, NvmeRequest
*req
)
5471 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
5472 uint16_t pri_ctrl_id
= le16_to_cpu(n
->pri_ctrl_cap
.cntlid
);
5473 uint16_t min_id
= le16_to_cpu(c
->ctrlid
);
5474 uint8_t num_sec_ctrl
= n
->sec_ctrl_list
.numcntl
;
5475 NvmeSecCtrlList list
= {0};
5478 for (i
= 0; i
< num_sec_ctrl
; i
++) {
5479 if (n
->sec_ctrl_list
.sec
[i
].scid
>= min_id
) {
5480 list
.numcntl
= num_sec_ctrl
- i
;
5481 memcpy(&list
.sec
, n
->sec_ctrl_list
.sec
+ i
,
5482 list
.numcntl
* sizeof(NvmeSecCtrlEntry
));
5487 trace_pci_nvme_identify_sec_ctrl_list(pri_ctrl_id
, list
.numcntl
);
5489 return nvme_c2h(n
, (uint8_t *)&list
, sizeof(list
), req
);
5492 static uint16_t nvme_identify_ns_csi(NvmeCtrl
*n
, NvmeRequest
*req
,
5496 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
5497 uint32_t nsid
= le32_to_cpu(c
->nsid
);
5499 trace_pci_nvme_identify_ns_csi(nsid
, c
->csi
);
5501 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
5502 return NVME_INVALID_NSID
| NVME_DNR
;
5505 ns
= nvme_ns(n
, nsid
);
5506 if (unlikely(!ns
)) {
5508 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
5510 return nvme_rpt_empty_id_struct(n
, req
);
5513 return nvme_rpt_empty_id_struct(n
, req
);
5517 if (c
->csi
== NVME_CSI_NVM
) {
5518 return nvme_c2h(n
, (uint8_t *)&ns
->id_ns_nvm
, sizeof(NvmeIdNsNvm
),
5520 } else if (c
->csi
== NVME_CSI_ZONED
&& ns
->csi
== NVME_CSI_ZONED
) {
5521 return nvme_c2h(n
, (uint8_t *)ns
->id_ns_zoned
, sizeof(NvmeIdNsZoned
),
5525 return NVME_INVALID_FIELD
| NVME_DNR
;
5528 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
,
5532 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
5533 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
5534 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
5535 static const int data_len
= sizeof(list
);
5536 uint32_t *list_ptr
= (uint32_t *)list
;
5539 trace_pci_nvme_identify_nslist(min_nsid
);
5542 * Both FFFFFFFFh (NVME_NSID_BROADCAST) and FFFFFFFFEh are invalid values
5543 * since the Active Namespace ID List should return namespaces with ids
5544 * *higher* than the NSID specified in the command. This is also specified
5545 * in the spec (NVM Express v1.3d, Section 5.15.4).
5547 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
5548 return NVME_INVALID_NSID
| NVME_DNR
;
5551 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
5555 ns
= nvme_subsys_ns(n
->subsys
, i
);
5563 if (ns
->params
.nsid
<= min_nsid
) {
5566 list_ptr
[j
++] = cpu_to_le32(ns
->params
.nsid
);
5567 if (j
== data_len
/ sizeof(uint32_t)) {
5572 return nvme_c2h(n
, list
, data_len
, req
);
5575 static uint16_t nvme_identify_nslist_csi(NvmeCtrl
*n
, NvmeRequest
*req
,
5579 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
5580 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
5581 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
5582 static const int data_len
= sizeof(list
);
5583 uint32_t *list_ptr
= (uint32_t *)list
;
5586 trace_pci_nvme_identify_nslist_csi(min_nsid
, c
->csi
);
5589 * Same as in nvme_identify_nslist(), FFFFFFFFh/FFFFFFFFEh are invalid.
5591 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
5592 return NVME_INVALID_NSID
| NVME_DNR
;
5595 if (c
->csi
!= NVME_CSI_NVM
&& c
->csi
!= NVME_CSI_ZONED
) {
5596 return NVME_INVALID_FIELD
| NVME_DNR
;
5599 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
5603 ns
= nvme_subsys_ns(n
->subsys
, i
);
5611 if (ns
->params
.nsid
<= min_nsid
|| c
->csi
!= ns
->csi
) {
5614 list_ptr
[j
++] = cpu_to_le32(ns
->params
.nsid
);
5615 if (j
== data_len
/ sizeof(uint32_t)) {
5620 return nvme_c2h(n
, list
, data_len
, req
);
5623 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
5626 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
5627 uint32_t nsid
= le32_to_cpu(c
->nsid
);
5628 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
5629 uint8_t *pos
= list
;
5632 uint8_t v
[NVME_NIDL_UUID
];
5633 } QEMU_PACKED uuid
= {};
5637 } QEMU_PACKED eui64
= {};
5641 } QEMU_PACKED csi
= {};
5643 trace_pci_nvme_identify_ns_descr_list(nsid
);
5645 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
5646 return NVME_INVALID_NSID
| NVME_DNR
;
5649 ns
= nvme_ns(n
, nsid
);
5650 if (unlikely(!ns
)) {
5651 return NVME_INVALID_FIELD
| NVME_DNR
;
5654 if (!qemu_uuid_is_null(&ns
->params
.uuid
)) {
5655 uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
5656 uuid
.hdr
.nidl
= NVME_NIDL_UUID
;
5657 memcpy(uuid
.v
, ns
->params
.uuid
.data
, NVME_NIDL_UUID
);
5658 memcpy(pos
, &uuid
, sizeof(uuid
));
5659 pos
+= sizeof(uuid
);
5662 if (ns
->params
.eui64
) {
5663 eui64
.hdr
.nidt
= NVME_NIDT_EUI64
;
5664 eui64
.hdr
.nidl
= NVME_NIDL_EUI64
;
5665 eui64
.v
= cpu_to_be64(ns
->params
.eui64
);
5666 memcpy(pos
, &eui64
, sizeof(eui64
));
5667 pos
+= sizeof(eui64
);
5670 csi
.hdr
.nidt
= NVME_NIDT_CSI
;
5671 csi
.hdr
.nidl
= NVME_NIDL_CSI
;
5673 memcpy(pos
, &csi
, sizeof(csi
));
5676 return nvme_c2h(n
, list
, sizeof(list
), req
);
5679 static uint16_t nvme_identify_cmd_set(NvmeCtrl
*n
, NvmeRequest
*req
)
5681 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
5682 static const int data_len
= sizeof(list
);
5684 trace_pci_nvme_identify_cmd_set();
5686 NVME_SET_CSI(*list
, NVME_CSI_NVM
);
5687 NVME_SET_CSI(*list
, NVME_CSI_ZONED
);
5689 return nvme_c2h(n
, list
, data_len
, req
);
5692 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
5694 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
5696 trace_pci_nvme_identify(nvme_cid(req
), c
->cns
, le16_to_cpu(c
->ctrlid
),
5700 case NVME_ID_CNS_NS
:
5701 return nvme_identify_ns(n
, req
, true);
5702 case NVME_ID_CNS_NS_PRESENT
:
5703 return nvme_identify_ns(n
, req
, false);
5704 case NVME_ID_CNS_NS_ATTACHED_CTRL_LIST
:
5705 return nvme_identify_ctrl_list(n
, req
, true);
5706 case NVME_ID_CNS_CTRL_LIST
:
5707 return nvme_identify_ctrl_list(n
, req
, false);
5708 case NVME_ID_CNS_PRIMARY_CTRL_CAP
:
5709 return nvme_identify_pri_ctrl_cap(n
, req
);
5710 case NVME_ID_CNS_SECONDARY_CTRL_LIST
:
5711 return nvme_identify_sec_ctrl_list(n
, req
);
5712 case NVME_ID_CNS_CS_NS
:
5713 return nvme_identify_ns_csi(n
, req
, true);
5714 case NVME_ID_CNS_CS_NS_PRESENT
:
5715 return nvme_identify_ns_csi(n
, req
, false);
5716 case NVME_ID_CNS_CTRL
:
5717 return nvme_identify_ctrl(n
, req
);
5718 case NVME_ID_CNS_CS_CTRL
:
5719 return nvme_identify_ctrl_csi(n
, req
);
5720 case NVME_ID_CNS_NS_ACTIVE_LIST
:
5721 return nvme_identify_nslist(n
, req
, true);
5722 case NVME_ID_CNS_NS_PRESENT_LIST
:
5723 return nvme_identify_nslist(n
, req
, false);
5724 case NVME_ID_CNS_CS_NS_ACTIVE_LIST
:
5725 return nvme_identify_nslist_csi(n
, req
, true);
5726 case NVME_ID_CNS_CS_NS_PRESENT_LIST
:
5727 return nvme_identify_nslist_csi(n
, req
, false);
5728 case NVME_ID_CNS_NS_DESCR_LIST
:
5729 return nvme_identify_ns_descr_list(n
, req
);
5730 case NVME_ID_CNS_IO_COMMAND_SET
:
5731 return nvme_identify_cmd_set(n
, req
);
5733 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
5734 return NVME_INVALID_FIELD
| NVME_DNR
;
5738 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
5740 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
5742 req
->cqe
.result
= 1;
5743 if (nvme_check_sqid(n
, sqid
)) {
5744 return NVME_INVALID_FIELD
| NVME_DNR
;
5747 return NVME_SUCCESS
;
5750 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
5752 trace_pci_nvme_setfeat_timestamp(ts
);
5754 n
->host_timestamp
= le64_to_cpu(ts
);
5755 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
5758 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
5760 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
5761 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
5763 union nvme_timestamp
{
5765 uint64_t timestamp
:48;
5773 union nvme_timestamp ts
;
5775 ts
.timestamp
= n
->host_timestamp
+ elapsed_time
;
5777 /* If the host timestamp is non-zero, set the timestamp origin */
5778 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
5780 trace_pci_nvme_getfeat_timestamp(ts
.all
);
5782 return cpu_to_le64(ts
.all
);
5785 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
5787 uint64_t timestamp
= nvme_get_timestamp(n
);
5789 return nvme_c2h(n
, (uint8_t *)×tamp
, sizeof(timestamp
), req
);
5792 static int nvme_get_feature_fdp(NvmeCtrl
*n
, uint32_t endgrpid
,
5797 if (!n
->subsys
|| !n
->subsys
->endgrp
.fdp
.enabled
) {
5798 return NVME_INVALID_FIELD
| NVME_DNR
;
5801 *result
= FIELD_DP16(0, FEAT_FDP
, FDPE
, 1);
5802 *result
= FIELD_DP16(*result
, FEAT_FDP
, CONF_NDX
, 0);
5804 return NVME_SUCCESS
;
5807 static uint16_t nvme_get_feature_fdp_events(NvmeCtrl
*n
, NvmeNamespace
*ns
,
5808 NvmeRequest
*req
, uint32_t *result
)
5810 NvmeCmd
*cmd
= &req
->cmd
;
5811 uint32_t cdw11
= le32_to_cpu(cmd
->cdw11
);
5812 uint16_t ph
= cdw11
& 0xffff;
5813 uint8_t noet
= (cdw11
>> 16) & 0xff;
5814 uint16_t ruhid
, ret
;
5815 uint32_t nentries
= 0;
5816 uint8_t s_events_ndx
= 0;
5817 size_t s_events_siz
= sizeof(NvmeFdpEventDescr
) * noet
;
5818 g_autofree NvmeFdpEventDescr
*s_events
= g_malloc0(s_events_siz
);
5820 NvmeFdpEventDescr
*s_event
;
5822 if (!n
->subsys
|| !n
->subsys
->endgrp
.fdp
.enabled
) {
5823 return NVME_FDP_DISABLED
| NVME_DNR
;
5826 if (!nvme_ph_valid(ns
, ph
)) {
5827 return NVME_INVALID_FIELD
| NVME_DNR
;
5830 ruhid
= ns
->fdp
.phs
[ph
];
5831 ruh
= &n
->subsys
->endgrp
.fdp
.ruhs
[ruhid
];
5835 if (unlikely(noet
== 0)) {
5836 return NVME_INVALID_FIELD
| NVME_DNR
;
5839 for (uint8_t event_type
= 0; event_type
< FDP_EVT_MAX
; event_type
++) {
5840 uint8_t shift
= nvme_fdp_evf_shifts
[event_type
];
5841 if (!shift
&& event_type
) {
5843 * only first entry (event_type == 0) has a shift value of 0
5844 * other entries are simply unpopulated.
5851 s_event
= &s_events
[s_events_ndx
];
5852 s_event
->evt
= event_type
;
5853 s_event
->evta
= (ruh
->event_filter
>> shift
) & 0x1;
5855 /* break if all `noet` entries are filled */
5856 if ((++s_events_ndx
) == noet
) {
5861 ret
= nvme_c2h(n
, s_events
, s_events_siz
, req
);
5867 return NVME_SUCCESS
;
5870 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
5872 NvmeCmd
*cmd
= &req
->cmd
;
5873 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
5874 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
5875 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
5877 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
5878 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
5882 uint16_t endgrpid
= 0, ret
= NVME_SUCCESS
;
5884 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
5885 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
5888 trace_pci_nvme_getfeat(nvme_cid(req
), nsid
, fid
, sel
, dw11
);
5890 if (!nvme_feature_support
[fid
]) {
5891 return NVME_INVALID_FIELD
| NVME_DNR
;
5894 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
5895 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
5897 * The Reservation Notification Mask and Reservation Persistence
5898 * features require a status code of Invalid Field in Command when
5899 * NSID is FFFFFFFFh. Since the device does not support those
5900 * features we can always return Invalid Namespace or Format as we
5901 * should do for all other features.
5903 return NVME_INVALID_NSID
| NVME_DNR
;
5906 if (!nvme_ns(n
, nsid
)) {
5907 return NVME_INVALID_FIELD
| NVME_DNR
;
5912 case NVME_GETFEAT_SELECT_CURRENT
:
5914 case NVME_GETFEAT_SELECT_SAVED
:
5915 /* no features are saveable by the controller; fallthrough */
5916 case NVME_GETFEAT_SELECT_DEFAULT
:
5918 case NVME_GETFEAT_SELECT_CAP
:
5919 result
= nvme_feature_cap
[fid
];
5924 case NVME_TEMPERATURE_THRESHOLD
:
5928 * The controller only implements the Composite Temperature sensor, so
5929 * return 0 for all other sensors.
5931 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
5935 switch (NVME_TEMP_THSEL(dw11
)) {
5936 case NVME_TEMP_THSEL_OVER
:
5937 result
= n
->features
.temp_thresh_hi
;
5939 case NVME_TEMP_THSEL_UNDER
:
5940 result
= n
->features
.temp_thresh_low
;
5944 return NVME_INVALID_FIELD
| NVME_DNR
;
5945 case NVME_ERROR_RECOVERY
:
5946 if (!nvme_nsid_valid(n
, nsid
)) {
5947 return NVME_INVALID_NSID
| NVME_DNR
;
5950 ns
= nvme_ns(n
, nsid
);
5951 if (unlikely(!ns
)) {
5952 return NVME_INVALID_FIELD
| NVME_DNR
;
5955 result
= ns
->features
.err_rec
;
5957 case NVME_VOLATILE_WRITE_CACHE
:
5959 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
5965 result
= blk_enable_write_cache(ns
->blkconf
.blk
);
5970 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
5972 case NVME_ASYNCHRONOUS_EVENT_CONF
:
5973 result
= n
->features
.async_config
;
5975 case NVME_TIMESTAMP
:
5976 return nvme_get_feature_timestamp(n
, req
);
5977 case NVME_HOST_BEHAVIOR_SUPPORT
:
5978 return nvme_c2h(n
, (uint8_t *)&n
->features
.hbs
,
5979 sizeof(n
->features
.hbs
), req
);
5981 endgrpid
= dw11
& 0xff;
5983 if (endgrpid
!= 0x1) {
5984 return NVME_INVALID_FIELD
| NVME_DNR
;
5987 ret
= nvme_get_feature_fdp(n
, endgrpid
, &result
);
5992 case NVME_FDP_EVENTS
:
5993 if (!nvme_nsid_valid(n
, nsid
)) {
5994 return NVME_INVALID_NSID
| NVME_DNR
;
5997 ns
= nvme_ns(n
, nsid
);
5998 if (unlikely(!ns
)) {
5999 return NVME_INVALID_FIELD
| NVME_DNR
;
6002 ret
= nvme_get_feature_fdp_events(n
, ns
, req
, &result
);
6013 case NVME_TEMPERATURE_THRESHOLD
:
6016 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
6020 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
6021 result
= NVME_TEMPERATURE_WARNING
;
6025 case NVME_NUMBER_OF_QUEUES
:
6026 result
= (n
->conf_ioqpairs
- 1) | ((n
->conf_ioqpairs
- 1) << 16);
6027 trace_pci_nvme_getfeat_numq(result
);
6029 case NVME_INTERRUPT_VECTOR_CONF
:
6031 if (iv
>= n
->conf_ioqpairs
+ 1) {
6032 return NVME_INVALID_FIELD
| NVME_DNR
;
6036 if (iv
== n
->admin_cq
.vector
) {
6037 result
|= NVME_INTVC_NOCOALESCING
;
6041 endgrpid
= dw11
& 0xff;
6043 if (endgrpid
!= 0x1) {
6044 return NVME_INVALID_FIELD
| NVME_DNR
;
6047 ret
= nvme_get_feature_fdp(n
, endgrpid
, &result
);
6055 result
= nvme_feature_default
[fid
];
6060 req
->cqe
.result
= cpu_to_le32(result
);
6064 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
6069 ret
= nvme_h2c(n
, (uint8_t *)×tamp
, sizeof(timestamp
), req
);
6074 nvme_set_timestamp(n
, timestamp
);
6076 return NVME_SUCCESS
;
6079 static uint16_t nvme_set_feature_fdp_events(NvmeCtrl
*n
, NvmeNamespace
*ns
,
6082 NvmeCmd
*cmd
= &req
->cmd
;
6083 uint32_t cdw11
= le32_to_cpu(cmd
->cdw11
);
6084 uint16_t ph
= cdw11
& 0xffff;
6085 uint8_t noet
= (cdw11
>> 16) & 0xff;
6086 uint16_t ret
, ruhid
;
6087 uint8_t enable
= le32_to_cpu(cmd
->cdw12
) & 0x1;
6088 uint8_t event_mask
= 0;
6090 g_autofree
uint8_t *events
= g_malloc0(noet
);
6091 NvmeRuHandle
*ruh
= NULL
;
6095 if (!n
->subsys
|| !n
->subsys
->endgrp
.fdp
.enabled
) {
6096 return NVME_FDP_DISABLED
| NVME_DNR
;
6099 if (!nvme_ph_valid(ns
, ph
)) {
6100 return NVME_INVALID_FIELD
| NVME_DNR
;
6103 ruhid
= ns
->fdp
.phs
[ph
];
6104 ruh
= &n
->subsys
->endgrp
.fdp
.ruhs
[ruhid
];
6106 ret
= nvme_h2c(n
, events
, noet
, req
);
6111 for (i
= 0; i
< noet
; i
++) {
6112 event_mask
|= (1 << nvme_fdp_evf_shifts
[events
[i
]]);
6116 ruh
->event_filter
|= event_mask
;
6118 ruh
->event_filter
= ruh
->event_filter
& ~event_mask
;
6121 return NVME_SUCCESS
;
6124 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
6126 NvmeNamespace
*ns
= NULL
;
6128 NvmeCmd
*cmd
= &req
->cmd
;
6129 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
6130 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
6131 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
6132 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
6133 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
6137 trace_pci_nvme_setfeat(nvme_cid(req
), nsid
, fid
, save
, dw11
);
6139 if (save
&& !(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_SAVE
)) {
6140 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
6143 if (!nvme_feature_support
[fid
]) {
6144 return NVME_INVALID_FIELD
| NVME_DNR
;
6147 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
6148 if (nsid
!= NVME_NSID_BROADCAST
) {
6149 if (!nvme_nsid_valid(n
, nsid
)) {
6150 return NVME_INVALID_NSID
| NVME_DNR
;
6153 ns
= nvme_ns(n
, nsid
);
6154 if (unlikely(!ns
)) {
6155 return NVME_INVALID_FIELD
| NVME_DNR
;
6158 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
6159 if (!nvme_nsid_valid(n
, nsid
)) {
6160 return NVME_INVALID_NSID
| NVME_DNR
;
6163 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
6166 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
6167 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
6171 case NVME_TEMPERATURE_THRESHOLD
:
6172 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
6176 switch (NVME_TEMP_THSEL(dw11
)) {
6177 case NVME_TEMP_THSEL_OVER
:
6178 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
6180 case NVME_TEMP_THSEL_UNDER
:
6181 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
6184 return NVME_INVALID_FIELD
| NVME_DNR
;
6187 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
6188 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
6189 nvme_smart_event(n
, NVME_SMART_TEMPERATURE
);
6193 case NVME_ERROR_RECOVERY
:
6194 if (nsid
== NVME_NSID_BROADCAST
) {
6195 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
6202 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
6203 ns
->features
.err_rec
= dw11
;
6211 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
6212 ns
->features
.err_rec
= dw11
;
6215 case NVME_VOLATILE_WRITE_CACHE
:
6216 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
6222 if (!(dw11
& 0x1) && blk_enable_write_cache(ns
->blkconf
.blk
)) {
6223 blk_flush(ns
->blkconf
.blk
);
6226 blk_set_enable_write_cache(ns
->blkconf
.blk
, dw11
& 1);
6231 case NVME_NUMBER_OF_QUEUES
:
6232 if (n
->qs_created
) {
6233 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
6237 * NVMe v1.3, Section 5.21.1.7: FFFFh is not an allowed value for NCQR
6240 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
6241 return NVME_INVALID_FIELD
| NVME_DNR
;
6244 trace_pci_nvme_setfeat_numq((dw11
& 0xffff) + 1,
6245 ((dw11
>> 16) & 0xffff) + 1,
6248 req
->cqe
.result
= cpu_to_le32((n
->conf_ioqpairs
- 1) |
6249 ((n
->conf_ioqpairs
- 1) << 16));
6251 case NVME_ASYNCHRONOUS_EVENT_CONF
:
6252 n
->features
.async_config
= dw11
;
6254 case NVME_TIMESTAMP
:
6255 return nvme_set_feature_timestamp(n
, req
);
6256 case NVME_HOST_BEHAVIOR_SUPPORT
:
6257 status
= nvme_h2c(n
, (uint8_t *)&n
->features
.hbs
,
6258 sizeof(n
->features
.hbs
), req
);
6263 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
6270 ns
->id_ns
.nlbaf
= ns
->nlbaf
- 1;
6271 if (!n
->features
.hbs
.lbafee
) {
6272 ns
->id_ns
.nlbaf
= MIN(ns
->id_ns
.nlbaf
, 15);
6277 case NVME_COMMAND_SET_PROFILE
:
6279 trace_pci_nvme_err_invalid_iocsci(dw11
& 0x1ff);
6280 return NVME_CMD_SET_CMB_REJECTED
| NVME_DNR
;
6284 /* spec: abort with cmd seq err if there's one or more NS' in endgrp */
6285 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
6286 case NVME_FDP_EVENTS
:
6287 return nvme_set_feature_fdp_events(n
, ns
, req
);
6289 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
6291 return NVME_SUCCESS
;
6294 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
6296 trace_pci_nvme_aer(nvme_cid(req
));
6298 if (n
->outstanding_aers
> n
->params
.aerl
) {
6299 trace_pci_nvme_aer_aerl_exceeded();
6300 return NVME_AER_LIMIT_EXCEEDED
;
6303 n
->aer_reqs
[n
->outstanding_aers
] = req
;
6304 n
->outstanding_aers
++;
6306 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
6307 nvme_process_aers(n
);
6310 return NVME_NO_COMPLETE
;
6313 static void nvme_update_dmrsl(NvmeCtrl
*n
)
6317 for (nsid
= 1; nsid
<= NVME_MAX_NAMESPACES
; nsid
++) {
6318 NvmeNamespace
*ns
= nvme_ns(n
, nsid
);
6323 n
->dmrsl
= MIN_NON_ZERO(n
->dmrsl
,
6324 BDRV_REQUEST_MAX_BYTES
/ nvme_l2b(ns
, 1));
6328 static void nvme_select_iocs_ns(NvmeCtrl
*n
, NvmeNamespace
*ns
)
6330 uint32_t cc
= ldl_le_p(&n
->bar
.cc
);
6332 ns
->iocs
= nvme_cse_iocs_none
;
6335 if (NVME_CC_CSS(cc
) != NVME_CC_CSS_ADMIN_ONLY
) {
6336 ns
->iocs
= nvme_cse_iocs_nvm
;
6339 case NVME_CSI_ZONED
:
6340 if (NVME_CC_CSS(cc
) == NVME_CC_CSS_CSI
) {
6341 ns
->iocs
= nvme_cse_iocs_zoned
;
6342 } else if (NVME_CC_CSS(cc
) == NVME_CC_CSS_NVM
) {
6343 ns
->iocs
= nvme_cse_iocs_nvm
;
6349 static uint16_t nvme_ns_attachment(NvmeCtrl
*n
, NvmeRequest
*req
)
6353 uint16_t list
[NVME_CONTROLLER_LIST_SIZE
] = {};
6354 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
6355 uint32_t dw10
= le32_to_cpu(req
->cmd
.cdw10
);
6356 uint8_t sel
= dw10
& 0xf;
6357 uint16_t *nr_ids
= &list
[0];
6358 uint16_t *ids
= &list
[1];
6362 trace_pci_nvme_ns_attachment(nvme_cid(req
), dw10
& 0xf);
6364 if (!nvme_nsid_valid(n
, nsid
)) {
6365 return NVME_INVALID_NSID
| NVME_DNR
;
6368 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
6370 return NVME_INVALID_FIELD
| NVME_DNR
;
6373 ret
= nvme_h2c(n
, (uint8_t *)list
, 4096, req
);
6379 return NVME_NS_CTRL_LIST_INVALID
| NVME_DNR
;
6382 *nr_ids
= MIN(*nr_ids
, NVME_CONTROLLER_LIST_SIZE
- 1);
6383 for (i
= 0; i
< *nr_ids
; i
++) {
6384 ctrl
= nvme_subsys_ctrl(n
->subsys
, ids
[i
]);
6386 return NVME_NS_CTRL_LIST_INVALID
| NVME_DNR
;
6390 case NVME_NS_ATTACHMENT_ATTACH
:
6391 if (nvme_ns(ctrl
, nsid
)) {
6392 return NVME_NS_ALREADY_ATTACHED
| NVME_DNR
;
6395 if (ns
->attached
&& !ns
->params
.shared
) {
6396 return NVME_NS_PRIVATE
| NVME_DNR
;
6399 nvme_attach_ns(ctrl
, ns
);
6400 nvme_select_iocs_ns(ctrl
, ns
);
6404 case NVME_NS_ATTACHMENT_DETACH
:
6405 if (!nvme_ns(ctrl
, nsid
)) {
6406 return NVME_NS_NOT_ATTACHED
| NVME_DNR
;
6409 ctrl
->namespaces
[nsid
] = NULL
;
6412 nvme_update_dmrsl(ctrl
);
6417 return NVME_INVALID_FIELD
| NVME_DNR
;
6421 * Add namespace id to the changed namespace id list for event clearing
6422 * via Get Log Page command.
6424 if (!test_and_set_bit(nsid
, ctrl
->changed_nsids
)) {
6425 nvme_enqueue_event(ctrl
, NVME_AER_TYPE_NOTICE
,
6426 NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED
,
6427 NVME_LOG_CHANGED_NSLIST
);
6431 return NVME_SUCCESS
;
6434 typedef struct NvmeFormatAIOCB
{
6451 static void nvme_format_cancel(BlockAIOCB
*aiocb
)
6453 NvmeFormatAIOCB
*iocb
= container_of(aiocb
, NvmeFormatAIOCB
, common
);
6455 iocb
->ret
= -ECANCELED
;
6458 blk_aio_cancel_async(iocb
->aiocb
);
6463 static const AIOCBInfo nvme_format_aiocb_info
= {
6464 .aiocb_size
= sizeof(NvmeFormatAIOCB
),
6465 .cancel_async
= nvme_format_cancel
,
6466 .get_aio_context
= nvme_get_aio_context
,
6469 static void nvme_format_set(NvmeNamespace
*ns
, uint8_t lbaf
, uint8_t mset
,
6470 uint8_t pi
, uint8_t pil
)
6472 uint8_t lbafl
= lbaf
& 0xf;
6473 uint8_t lbafu
= lbaf
>> 4;
6475 trace_pci_nvme_format_set(ns
->params
.nsid
, lbaf
, mset
, pi
, pil
);
6477 ns
->id_ns
.dps
= (pil
<< 3) | pi
;
6478 ns
->id_ns
.flbas
= (lbafu
<< 5) | (mset
<< 4) | lbafl
;
6480 nvme_ns_init_format(ns
);
6483 static void nvme_do_format(NvmeFormatAIOCB
*iocb
);
6485 static void nvme_format_ns_cb(void *opaque
, int ret
)
6487 NvmeFormatAIOCB
*iocb
= opaque
;
6488 NvmeNamespace
*ns
= iocb
->ns
;
6491 if (iocb
->ret
< 0) {
6493 } else if (ret
< 0) {
6500 if (iocb
->offset
< ns
->size
) {
6501 bytes
= MIN(BDRV_REQUEST_MAX_BYTES
, ns
->size
- iocb
->offset
);
6503 iocb
->aiocb
= blk_aio_pwrite_zeroes(ns
->blkconf
.blk
, iocb
->offset
,
6504 bytes
, BDRV_REQ_MAY_UNMAP
,
6505 nvme_format_ns_cb
, iocb
);
6507 iocb
->offset
+= bytes
;
6511 nvme_format_set(ns
, iocb
->lbaf
, iocb
->mset
, iocb
->pi
, iocb
->pil
);
6517 nvme_do_format(iocb
);
6520 static uint16_t nvme_format_check(NvmeNamespace
*ns
, uint8_t lbaf
, uint8_t pi
)
6522 if (ns
->params
.zoned
) {
6523 return NVME_INVALID_FORMAT
| NVME_DNR
;
6526 if (lbaf
> ns
->id_ns
.nlbaf
) {
6527 return NVME_INVALID_FORMAT
| NVME_DNR
;
6530 if (pi
&& (ns
->id_ns
.lbaf
[lbaf
].ms
< nvme_pi_tuple_size(ns
))) {
6531 return NVME_INVALID_FORMAT
| NVME_DNR
;
6534 if (pi
&& pi
> NVME_ID_NS_DPS_TYPE_3
) {
6535 return NVME_INVALID_FIELD
| NVME_DNR
;
6538 return NVME_SUCCESS
;
6541 static void nvme_do_format(NvmeFormatAIOCB
*iocb
)
6543 NvmeRequest
*req
= iocb
->req
;
6544 NvmeCtrl
*n
= nvme_ctrl(req
);
6545 uint32_t dw10
= le32_to_cpu(req
->cmd
.cdw10
);
6546 uint8_t lbaf
= dw10
& 0xf;
6547 uint8_t pi
= (dw10
>> 5) & 0x7;
6551 if (iocb
->ret
< 0) {
6555 if (iocb
->broadcast
) {
6556 for (i
= iocb
->nsid
+ 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
6557 iocb
->ns
= nvme_ns(n
, i
);
6569 status
= nvme_format_check(iocb
->ns
, lbaf
, pi
);
6571 req
->status
= status
;
6575 iocb
->ns
->status
= NVME_FORMAT_IN_PROGRESS
;
6576 nvme_format_ns_cb(iocb
, 0);
6580 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
6581 qemu_aio_unref(iocb
);
6584 static uint16_t nvme_format(NvmeCtrl
*n
, NvmeRequest
*req
)
6586 NvmeFormatAIOCB
*iocb
;
6587 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
6588 uint32_t dw10
= le32_to_cpu(req
->cmd
.cdw10
);
6589 uint8_t lbaf
= dw10
& 0xf;
6590 uint8_t mset
= (dw10
>> 4) & 0x1;
6591 uint8_t pi
= (dw10
>> 5) & 0x7;
6592 uint8_t pil
= (dw10
>> 8) & 0x1;
6593 uint8_t lbafu
= (dw10
>> 12) & 0x3;
6596 iocb
= qemu_aio_get(&nvme_format_aiocb_info
, NULL
, nvme_misc_cb
, req
);
6606 iocb
->broadcast
= (nsid
== NVME_NSID_BROADCAST
);
6609 if (n
->features
.hbs
.lbafee
) {
6610 iocb
->lbaf
|= lbafu
<< 4;
6613 if (!iocb
->broadcast
) {
6614 if (!nvme_nsid_valid(n
, nsid
)) {
6615 status
= NVME_INVALID_NSID
| NVME_DNR
;
6619 iocb
->ns
= nvme_ns(n
, nsid
);
6621 status
= NVME_INVALID_FIELD
| NVME_DNR
;
6626 req
->aiocb
= &iocb
->common
;
6627 nvme_do_format(iocb
);
6629 return NVME_NO_COMPLETE
;
6632 qemu_aio_unref(iocb
);
6637 static void nvme_get_virt_res_num(NvmeCtrl
*n
, uint8_t rt
, int *num_total
,
6638 int *num_prim
, int *num_sec
)
6640 *num_total
= le32_to_cpu(rt
?
6641 n
->pri_ctrl_cap
.vifrt
: n
->pri_ctrl_cap
.vqfrt
);
6642 *num_prim
= le16_to_cpu(rt
?
6643 n
->pri_ctrl_cap
.virfap
: n
->pri_ctrl_cap
.vqrfap
);
6644 *num_sec
= le16_to_cpu(rt
? n
->pri_ctrl_cap
.virfa
: n
->pri_ctrl_cap
.vqrfa
);
6647 static uint16_t nvme_assign_virt_res_to_prim(NvmeCtrl
*n
, NvmeRequest
*req
,
6648 uint16_t cntlid
, uint8_t rt
,
6651 int num_total
, num_prim
, num_sec
;
6653 if (cntlid
!= n
->cntlid
) {
6654 return NVME_INVALID_CTRL_ID
| NVME_DNR
;
6657 nvme_get_virt_res_num(n
, rt
, &num_total
, &num_prim
, &num_sec
);
6659 if (nr
> num_total
) {
6660 return NVME_INVALID_NUM_RESOURCES
| NVME_DNR
;
6663 if (nr
> num_total
- num_sec
) {
6664 return NVME_INVALID_RESOURCE_ID
| NVME_DNR
;
6668 n
->next_pri_ctrl_cap
.virfap
= cpu_to_le16(nr
);
6670 n
->next_pri_ctrl_cap
.vqrfap
= cpu_to_le16(nr
);
6673 req
->cqe
.result
= cpu_to_le32(nr
);
6677 static void nvme_update_virt_res(NvmeCtrl
*n
, NvmeSecCtrlEntry
*sctrl
,
6680 int prev_nr
, prev_total
;
6683 prev_nr
= le16_to_cpu(sctrl
->nvi
);
6684 prev_total
= le32_to_cpu(n
->pri_ctrl_cap
.virfa
);
6685 sctrl
->nvi
= cpu_to_le16(nr
);
6686 n
->pri_ctrl_cap
.virfa
= cpu_to_le32(prev_total
+ nr
- prev_nr
);
6688 prev_nr
= le16_to_cpu(sctrl
->nvq
);
6689 prev_total
= le32_to_cpu(n
->pri_ctrl_cap
.vqrfa
);
6690 sctrl
->nvq
= cpu_to_le16(nr
);
6691 n
->pri_ctrl_cap
.vqrfa
= cpu_to_le32(prev_total
+ nr
- prev_nr
);
6695 static uint16_t nvme_assign_virt_res_to_sec(NvmeCtrl
*n
, NvmeRequest
*req
,
6696 uint16_t cntlid
, uint8_t rt
, int nr
)
6698 int num_total
, num_prim
, num_sec
, num_free
, diff
, limit
;
6699 NvmeSecCtrlEntry
*sctrl
;
6701 sctrl
= nvme_sctrl_for_cntlid(n
, cntlid
);
6703 return NVME_INVALID_CTRL_ID
| NVME_DNR
;
6707 return NVME_INVALID_SEC_CTRL_STATE
| NVME_DNR
;
6710 limit
= le16_to_cpu(rt
? n
->pri_ctrl_cap
.vifrsm
: n
->pri_ctrl_cap
.vqfrsm
);
6712 return NVME_INVALID_NUM_RESOURCES
| NVME_DNR
;
6715 nvme_get_virt_res_num(n
, rt
, &num_total
, &num_prim
, &num_sec
);
6716 num_free
= num_total
- num_prim
- num_sec
;
6717 diff
= nr
- le16_to_cpu(rt
? sctrl
->nvi
: sctrl
->nvq
);
6719 if (diff
> num_free
) {
6720 return NVME_INVALID_RESOURCE_ID
| NVME_DNR
;
6723 nvme_update_virt_res(n
, sctrl
, rt
, nr
);
6724 req
->cqe
.result
= cpu_to_le32(nr
);
6729 static uint16_t nvme_virt_set_state(NvmeCtrl
*n
, uint16_t cntlid
, bool online
)
6731 PCIDevice
*pci
= PCI_DEVICE(n
);
6732 NvmeCtrl
*sn
= NULL
;
6733 NvmeSecCtrlEntry
*sctrl
;
6736 sctrl
= nvme_sctrl_for_cntlid(n
, cntlid
);
6738 return NVME_INVALID_CTRL_ID
| NVME_DNR
;
6741 if (!pci_is_vf(pci
)) {
6742 vf_index
= le16_to_cpu(sctrl
->vfn
) - 1;
6743 sn
= NVME(pcie_sriov_get_vf_at_index(pci
, vf_index
));
6747 if (!sctrl
->nvi
|| (le16_to_cpu(sctrl
->nvq
) < 2) || !sn
) {
6748 return NVME_INVALID_SEC_CTRL_STATE
| NVME_DNR
;
6753 nvme_ctrl_reset(sn
, NVME_RESET_FUNCTION
);
6756 nvme_update_virt_res(n
, sctrl
, NVME_VIRT_RES_INTERRUPT
, 0);
6757 nvme_update_virt_res(n
, sctrl
, NVME_VIRT_RES_QUEUE
, 0);
6762 nvme_ctrl_reset(sn
, NVME_RESET_FUNCTION
);
6767 return NVME_SUCCESS
;
6770 static uint16_t nvme_virt_mngmt(NvmeCtrl
*n
, NvmeRequest
*req
)
6772 uint32_t dw10
= le32_to_cpu(req
->cmd
.cdw10
);
6773 uint32_t dw11
= le32_to_cpu(req
->cmd
.cdw11
);
6774 uint8_t act
= dw10
& 0xf;
6775 uint8_t rt
= (dw10
>> 8) & 0x7;
6776 uint16_t cntlid
= (dw10
>> 16) & 0xffff;
6777 int nr
= dw11
& 0xffff;
6779 trace_pci_nvme_virt_mngmt(nvme_cid(req
), act
, cntlid
, rt
? "VI" : "VQ", nr
);
6781 if (rt
!= NVME_VIRT_RES_QUEUE
&& rt
!= NVME_VIRT_RES_INTERRUPT
) {
6782 return NVME_INVALID_RESOURCE_ID
| NVME_DNR
;
6786 case NVME_VIRT_MNGMT_ACTION_SEC_ASSIGN
:
6787 return nvme_assign_virt_res_to_sec(n
, req
, cntlid
, rt
, nr
);
6788 case NVME_VIRT_MNGMT_ACTION_PRM_ALLOC
:
6789 return nvme_assign_virt_res_to_prim(n
, req
, cntlid
, rt
, nr
);
6790 case NVME_VIRT_MNGMT_ACTION_SEC_ONLINE
:
6791 return nvme_virt_set_state(n
, cntlid
, true);
6792 case NVME_VIRT_MNGMT_ACTION_SEC_OFFLINE
:
6793 return nvme_virt_set_state(n
, cntlid
, false);
6795 return NVME_INVALID_FIELD
| NVME_DNR
;
6799 static uint16_t nvme_dbbuf_config(NvmeCtrl
*n
, const NvmeRequest
*req
)
6801 PCIDevice
*pci
= PCI_DEVICE(n
);
6802 uint64_t dbs_addr
= le64_to_cpu(req
->cmd
.dptr
.prp1
);
6803 uint64_t eis_addr
= le64_to_cpu(req
->cmd
.dptr
.prp2
);
6806 /* Address should be page aligned */
6807 if (dbs_addr
& (n
->page_size
- 1) || eis_addr
& (n
->page_size
- 1)) {
6808 return NVME_INVALID_FIELD
| NVME_DNR
;
6811 /* Save shadow buffer base addr for use during queue creation */
6812 n
->dbbuf_dbs
= dbs_addr
;
6813 n
->dbbuf_eis
= eis_addr
;
6814 n
->dbbuf_enabled
= true;
6816 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
6817 NvmeSQueue
*sq
= n
->sq
[i
];
6818 NvmeCQueue
*cq
= n
->cq
[i
];
6822 * CAP.DSTRD is 0, so offset of ith sq db_addr is (i<<3)
6823 * nvme_process_db() uses this hard-coded way to calculate
6824 * doorbell offsets. Be consistent with that here.
6826 sq
->db_addr
= dbs_addr
+ (i
<< 3);
6827 sq
->ei_addr
= eis_addr
+ (i
<< 3);
6828 pci_dma_write(pci
, sq
->db_addr
, &sq
->tail
, sizeof(sq
->tail
));
6830 if (n
->params
.ioeventfd
&& sq
->sqid
!= 0) {
6831 if (!nvme_init_sq_ioeventfd(sq
)) {
6832 sq
->ioeventfd_enabled
= true;
6838 /* CAP.DSTRD is 0, so offset of ith cq db_addr is (i<<3)+(1<<2) */
6839 cq
->db_addr
= dbs_addr
+ (i
<< 3) + (1 << 2);
6840 cq
->ei_addr
= eis_addr
+ (i
<< 3) + (1 << 2);
6841 pci_dma_write(pci
, cq
->db_addr
, &cq
->head
, sizeof(cq
->head
));
6843 if (n
->params
.ioeventfd
&& cq
->cqid
!= 0) {
6844 if (!nvme_init_cq_ioeventfd(cq
)) {
6845 cq
->ioeventfd_enabled
= true;
6851 trace_pci_nvme_dbbuf_config(dbs_addr
, eis_addr
);
6853 return NVME_SUCCESS
;
6856 static uint16_t nvme_directive_send(NvmeCtrl
*n
, NvmeRequest
*req
)
6858 return NVME_INVALID_FIELD
| NVME_DNR
;
6861 static uint16_t nvme_directive_receive(NvmeCtrl
*n
, NvmeRequest
*req
)
6864 uint32_t dw10
= le32_to_cpu(req
->cmd
.cdw10
);
6865 uint32_t dw11
= le32_to_cpu(req
->cmd
.cdw11
);
6866 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
6867 uint8_t doper
, dtype
;
6868 uint32_t numd
, trans_len
;
6869 NvmeDirectiveIdentify id
= {
6870 .supported
= 1 << NVME_DIRECTIVE_IDENTIFY
,
6871 .enabled
= 1 << NVME_DIRECTIVE_IDENTIFY
,
6875 doper
= dw11
& 0xff;
6876 dtype
= (dw11
>> 8) & 0xff;
6878 trans_len
= MIN(sizeof(NvmeDirectiveIdentify
), numd
<< 2);
6880 if (nsid
== NVME_NSID_BROADCAST
|| dtype
!= NVME_DIRECTIVE_IDENTIFY
||
6881 doper
!= NVME_DIRECTIVE_RETURN_PARAMS
) {
6882 return NVME_INVALID_FIELD
| NVME_DNR
;
6885 ns
= nvme_ns(n
, nsid
);
6887 return NVME_INVALID_FIELD
| NVME_DNR
;
6891 case NVME_DIRECTIVE_IDENTIFY
:
6893 case NVME_DIRECTIVE_RETURN_PARAMS
:
6894 if (ns
->endgrp
->fdp
.enabled
) {
6895 id
.supported
|= 1 << NVME_DIRECTIVE_DATA_PLACEMENT
;
6896 id
.enabled
|= 1 << NVME_DIRECTIVE_DATA_PLACEMENT
;
6897 id
.persistent
|= 1 << NVME_DIRECTIVE_DATA_PLACEMENT
;
6900 return nvme_c2h(n
, (uint8_t *)&id
, trans_len
, req
);
6903 return NVME_INVALID_FIELD
| NVME_DNR
;
6907 return NVME_INVALID_FIELD
;
6911 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
6913 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
,
6914 nvme_adm_opc_str(req
->cmd
.opcode
));
6916 if (!(nvme_cse_acs
[req
->cmd
.opcode
] & NVME_CMD_EFF_CSUPP
)) {
6917 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
6918 return NVME_INVALID_OPCODE
| NVME_DNR
;
6921 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
6922 if (NVME_CMD_FLAGS_PSDT(req
->cmd
.flags
) != NVME_PSDT_PRP
) {
6923 return NVME_INVALID_FIELD
| NVME_DNR
;
6926 if (NVME_CMD_FLAGS_FUSE(req
->cmd
.flags
)) {
6927 return NVME_INVALID_FIELD
;
6930 switch (req
->cmd
.opcode
) {
6931 case NVME_ADM_CMD_DELETE_SQ
:
6932 return nvme_del_sq(n
, req
);
6933 case NVME_ADM_CMD_CREATE_SQ
:
6934 return nvme_create_sq(n
, req
);
6935 case NVME_ADM_CMD_GET_LOG_PAGE
:
6936 return nvme_get_log(n
, req
);
6937 case NVME_ADM_CMD_DELETE_CQ
:
6938 return nvme_del_cq(n
, req
);
6939 case NVME_ADM_CMD_CREATE_CQ
:
6940 return nvme_create_cq(n
, req
);
6941 case NVME_ADM_CMD_IDENTIFY
:
6942 return nvme_identify(n
, req
);
6943 case NVME_ADM_CMD_ABORT
:
6944 return nvme_abort(n
, req
);
6945 case NVME_ADM_CMD_SET_FEATURES
:
6946 return nvme_set_feature(n
, req
);
6947 case NVME_ADM_CMD_GET_FEATURES
:
6948 return nvme_get_feature(n
, req
);
6949 case NVME_ADM_CMD_ASYNC_EV_REQ
:
6950 return nvme_aer(n
, req
);
6951 case NVME_ADM_CMD_NS_ATTACHMENT
:
6952 return nvme_ns_attachment(n
, req
);
6953 case NVME_ADM_CMD_VIRT_MNGMT
:
6954 return nvme_virt_mngmt(n
, req
);
6955 case NVME_ADM_CMD_DBBUF_CONFIG
:
6956 return nvme_dbbuf_config(n
, req
);
6957 case NVME_ADM_CMD_FORMAT_NVM
:
6958 return nvme_format(n
, req
);
6959 case NVME_ADM_CMD_DIRECTIVE_SEND
:
6960 return nvme_directive_send(n
, req
);
6961 case NVME_ADM_CMD_DIRECTIVE_RECV
:
6962 return nvme_directive_receive(n
, req
);
6967 return NVME_INVALID_OPCODE
| NVME_DNR
;
6970 static void nvme_update_sq_eventidx(const NvmeSQueue
*sq
)
6972 uint32_t v
= cpu_to_le32(sq
->tail
);
6974 trace_pci_nvme_update_sq_eventidx(sq
->sqid
, sq
->tail
);
6976 pci_dma_write(PCI_DEVICE(sq
->ctrl
), sq
->ei_addr
, &v
, sizeof(v
));
6979 static void nvme_update_sq_tail(NvmeSQueue
*sq
)
6983 pci_dma_read(PCI_DEVICE(sq
->ctrl
), sq
->db_addr
, &v
, sizeof(v
));
6985 sq
->tail
= le32_to_cpu(v
);
6987 trace_pci_nvme_update_sq_tail(sq
->sqid
, sq
->tail
);
6990 static void nvme_process_sq(void *opaque
)
6992 NvmeSQueue
*sq
= opaque
;
6993 NvmeCtrl
*n
= sq
->ctrl
;
6994 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
7001 if (n
->dbbuf_enabled
) {
7002 nvme_update_sq_tail(sq
);
7005 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
7006 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
7007 if (nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
))) {
7008 trace_pci_nvme_err_addr_read(addr
);
7009 trace_pci_nvme_err_cfs();
7010 stl_le_p(&n
->bar
.csts
, NVME_CSTS_FAILED
);
7013 nvme_inc_sq_head(sq
);
7015 req
= QTAILQ_FIRST(&sq
->req_list
);
7016 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
7017 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
7018 nvme_req_clear(req
);
7019 req
->cqe
.cid
= cmd
.cid
;
7020 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
7022 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
7023 nvme_admin_cmd(n
, req
);
7024 if (status
!= NVME_NO_COMPLETE
) {
7025 req
->status
= status
;
7026 nvme_enqueue_req_completion(cq
, req
);
7029 if (n
->dbbuf_enabled
) {
7030 nvme_update_sq_eventidx(sq
);
7031 nvme_update_sq_tail(sq
);
7036 static void nvme_update_msixcap_ts(PCIDevice
*pci_dev
, uint32_t table_size
)
7040 if (!msix_present(pci_dev
)) {
7044 assert(table_size
> 0 && table_size
<= pci_dev
->msix_entries_nr
);
7046 config
= pci_dev
->config
+ pci_dev
->msix_cap
;
7047 pci_set_word_by_mask(config
+ PCI_MSIX_FLAGS
, PCI_MSIX_FLAGS_QSIZE
,
7051 static void nvme_activate_virt_res(NvmeCtrl
*n
)
7053 PCIDevice
*pci_dev
= PCI_DEVICE(n
);
7054 NvmePriCtrlCap
*cap
= &n
->pri_ctrl_cap
;
7055 NvmeSecCtrlEntry
*sctrl
;
7057 /* -1 to account for the admin queue */
7058 if (pci_is_vf(pci_dev
)) {
7059 sctrl
= nvme_sctrl(n
);
7060 cap
->vqprt
= sctrl
->nvq
;
7061 cap
->viprt
= sctrl
->nvi
;
7062 n
->conf_ioqpairs
= sctrl
->nvq
? le16_to_cpu(sctrl
->nvq
) - 1 : 0;
7063 n
->conf_msix_qsize
= sctrl
->nvi
? le16_to_cpu(sctrl
->nvi
) : 1;
7065 cap
->vqrfap
= n
->next_pri_ctrl_cap
.vqrfap
;
7066 cap
->virfap
= n
->next_pri_ctrl_cap
.virfap
;
7067 n
->conf_ioqpairs
= le16_to_cpu(cap
->vqprt
) +
7068 le16_to_cpu(cap
->vqrfap
) - 1;
7069 n
->conf_msix_qsize
= le16_to_cpu(cap
->viprt
) +
7070 le16_to_cpu(cap
->virfap
);
7074 static void nvme_ctrl_reset(NvmeCtrl
*n
, NvmeResetType rst
)
7076 PCIDevice
*pci_dev
= PCI_DEVICE(n
);
7077 NvmeSecCtrlEntry
*sctrl
;
7081 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
7090 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
7091 if (n
->sq
[i
] != NULL
) {
7092 nvme_free_sq(n
->sq
[i
], n
);
7095 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
7096 if (n
->cq
[i
] != NULL
) {
7097 nvme_free_cq(n
->cq
[i
], n
);
7101 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
7102 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
7103 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
7107 if (n
->params
.sriov_max_vfs
) {
7108 if (!pci_is_vf(pci_dev
)) {
7109 for (i
= 0; i
< n
->sec_ctrl_list
.numcntl
; i
++) {
7110 sctrl
= &n
->sec_ctrl_list
.sec
[i
];
7111 nvme_virt_set_state(n
, le16_to_cpu(sctrl
->scid
), false);
7114 if (rst
!= NVME_RESET_CONTROLLER
) {
7115 pcie_sriov_pf_disable_vfs(pci_dev
);
7119 if (rst
!= NVME_RESET_CONTROLLER
) {
7120 nvme_activate_virt_res(n
);
7126 n
->outstanding_aers
= 0;
7127 n
->qs_created
= false;
7129 nvme_update_msixcap_ts(pci_dev
, n
->conf_msix_qsize
);
7131 if (pci_is_vf(pci_dev
)) {
7132 sctrl
= nvme_sctrl(n
);
7134 stl_le_p(&n
->bar
.csts
, sctrl
->scs
? 0 : NVME_CSTS_FAILED
);
7136 stl_le_p(&n
->bar
.csts
, 0);
7139 stl_le_p(&n
->bar
.intms
, 0);
7140 stl_le_p(&n
->bar
.intmc
, 0);
7141 stl_le_p(&n
->bar
.cc
, 0);
7145 n
->dbbuf_enabled
= false;
7148 static void nvme_ctrl_shutdown(NvmeCtrl
*n
)
7154 memory_region_msync(&n
->pmr
.dev
->mr
, 0, n
->pmr
.dev
->size
);
7157 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
7163 nvme_ns_shutdown(ns
);
7167 static void nvme_select_iocs(NvmeCtrl
*n
)
7172 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
7178 nvme_select_iocs_ns(n
, ns
);
7182 static int nvme_start_ctrl(NvmeCtrl
*n
)
7184 uint64_t cap
= ldq_le_p(&n
->bar
.cap
);
7185 uint32_t cc
= ldl_le_p(&n
->bar
.cc
);
7186 uint32_t aqa
= ldl_le_p(&n
->bar
.aqa
);
7187 uint64_t asq
= ldq_le_p(&n
->bar
.asq
);
7188 uint64_t acq
= ldq_le_p(&n
->bar
.acq
);
7189 uint32_t page_bits
= NVME_CC_MPS(cc
) + 12;
7190 uint32_t page_size
= 1 << page_bits
;
7191 NvmeSecCtrlEntry
*sctrl
= nvme_sctrl(n
);
7193 if (pci_is_vf(PCI_DEVICE(n
)) && !sctrl
->scs
) {
7194 trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl
->nvi
),
7195 le16_to_cpu(sctrl
->nvq
));
7198 if (unlikely(n
->cq
[0])) {
7199 trace_pci_nvme_err_startfail_cq();
7202 if (unlikely(n
->sq
[0])) {
7203 trace_pci_nvme_err_startfail_sq();
7206 if (unlikely(asq
& (page_size
- 1))) {
7207 trace_pci_nvme_err_startfail_asq_misaligned(asq
);
7210 if (unlikely(acq
& (page_size
- 1))) {
7211 trace_pci_nvme_err_startfail_acq_misaligned(acq
);
7214 if (unlikely(!(NVME_CAP_CSS(cap
) & (1 << NVME_CC_CSS(cc
))))) {
7215 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(cc
));
7218 if (unlikely(NVME_CC_MPS(cc
) < NVME_CAP_MPSMIN(cap
))) {
7219 trace_pci_nvme_err_startfail_page_too_small(
7221 NVME_CAP_MPSMIN(cap
));
7224 if (unlikely(NVME_CC_MPS(cc
) >
7225 NVME_CAP_MPSMAX(cap
))) {
7226 trace_pci_nvme_err_startfail_page_too_large(
7228 NVME_CAP_MPSMAX(cap
));
7231 if (unlikely(NVME_CC_IOCQES(cc
) <
7232 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
7233 trace_pci_nvme_err_startfail_cqent_too_small(
7235 NVME_CTRL_CQES_MIN(cap
));
7238 if (unlikely(NVME_CC_IOCQES(cc
) >
7239 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
7240 trace_pci_nvme_err_startfail_cqent_too_large(
7242 NVME_CTRL_CQES_MAX(cap
));
7245 if (unlikely(NVME_CC_IOSQES(cc
) <
7246 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
7247 trace_pci_nvme_err_startfail_sqent_too_small(
7249 NVME_CTRL_SQES_MIN(cap
));
7252 if (unlikely(NVME_CC_IOSQES(cc
) >
7253 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
7254 trace_pci_nvme_err_startfail_sqent_too_large(
7256 NVME_CTRL_SQES_MAX(cap
));
7259 if (unlikely(!NVME_AQA_ASQS(aqa
))) {
7260 trace_pci_nvme_err_startfail_asqent_sz_zero();
7263 if (unlikely(!NVME_AQA_ACQS(aqa
))) {
7264 trace_pci_nvme_err_startfail_acqent_sz_zero();
7268 n
->page_bits
= page_bits
;
7269 n
->page_size
= page_size
;
7270 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
7271 n
->cqe_size
= 1 << NVME_CC_IOCQES(cc
);
7272 n
->sqe_size
= 1 << NVME_CC_IOSQES(cc
);
7273 nvme_init_cq(&n
->admin_cq
, n
, acq
, 0, 0, NVME_AQA_ACQS(aqa
) + 1, 1);
7274 nvme_init_sq(&n
->admin_sq
, n
, asq
, 0, 0, NVME_AQA_ASQS(aqa
) + 1);
7276 nvme_set_timestamp(n
, 0ULL);
7278 nvme_select_iocs(n
);
7283 static void nvme_cmb_enable_regs(NvmeCtrl
*n
)
7285 uint32_t cmbloc
= ldl_le_p(&n
->bar
.cmbloc
);
7286 uint32_t cmbsz
= ldl_le_p(&n
->bar
.cmbsz
);
7288 NVME_CMBLOC_SET_CDPCILS(cmbloc
, 1);
7289 NVME_CMBLOC_SET_CDPMLS(cmbloc
, 1);
7290 NVME_CMBLOC_SET_BIR(cmbloc
, NVME_CMB_BIR
);
7291 stl_le_p(&n
->bar
.cmbloc
, cmbloc
);
7293 NVME_CMBSZ_SET_SQS(cmbsz
, 1);
7294 NVME_CMBSZ_SET_CQS(cmbsz
, 0);
7295 NVME_CMBSZ_SET_LISTS(cmbsz
, 1);
7296 NVME_CMBSZ_SET_RDS(cmbsz
, 1);
7297 NVME_CMBSZ_SET_WDS(cmbsz
, 1);
7298 NVME_CMBSZ_SET_SZU(cmbsz
, 2); /* MBs */
7299 NVME_CMBSZ_SET_SZ(cmbsz
, n
->params
.cmb_size_mb
);
7300 stl_le_p(&n
->bar
.cmbsz
, cmbsz
);
7303 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
7306 PCIDevice
*pci
= PCI_DEVICE(n
);
7307 uint64_t cap
= ldq_le_p(&n
->bar
.cap
);
7308 uint32_t cc
= ldl_le_p(&n
->bar
.cc
);
7309 uint32_t intms
= ldl_le_p(&n
->bar
.intms
);
7310 uint32_t csts
= ldl_le_p(&n
->bar
.csts
);
7311 uint32_t pmrsts
= ldl_le_p(&n
->bar
.pmrsts
);
7313 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
7314 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
7315 "MMIO write not 32-bit aligned,"
7316 " offset=0x%"PRIx64
"", offset
);
7317 /* should be ignored, fall through for now */
7320 if (unlikely(size
< sizeof(uint32_t))) {
7321 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
7322 "MMIO write smaller than 32-bits,"
7323 " offset=0x%"PRIx64
", size=%u",
7325 /* should be ignored, fall through for now */
7329 case NVME_REG_INTMS
:
7330 if (unlikely(msix_enabled(pci
))) {
7331 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
7332 "undefined access to interrupt mask set"
7333 " when MSI-X is enabled");
7334 /* should be ignored, fall through for now */
7337 stl_le_p(&n
->bar
.intms
, intms
);
7338 n
->bar
.intmc
= n
->bar
.intms
;
7339 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, intms
);
7342 case NVME_REG_INTMC
:
7343 if (unlikely(msix_enabled(pci
))) {
7344 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
7345 "undefined access to interrupt mask clr"
7346 " when MSI-X is enabled");
7347 /* should be ignored, fall through for now */
7350 stl_le_p(&n
->bar
.intms
, intms
);
7351 n
->bar
.intmc
= n
->bar
.intms
;
7352 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, intms
);
7356 stl_le_p(&n
->bar
.cc
, data
);
7358 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
7360 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(cc
))) {
7361 trace_pci_nvme_mmio_shutdown_set();
7362 nvme_ctrl_shutdown(n
);
7363 csts
&= ~(CSTS_SHST_MASK
<< CSTS_SHST_SHIFT
);
7364 csts
|= NVME_CSTS_SHST_COMPLETE
;
7365 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(cc
)) {
7366 trace_pci_nvme_mmio_shutdown_cleared();
7367 csts
&= ~(CSTS_SHST_MASK
<< CSTS_SHST_SHIFT
);
7370 if (NVME_CC_EN(data
) && !NVME_CC_EN(cc
)) {
7371 if (unlikely(nvme_start_ctrl(n
))) {
7372 trace_pci_nvme_err_startfail();
7373 csts
= NVME_CSTS_FAILED
;
7375 trace_pci_nvme_mmio_start_success();
7376 csts
= NVME_CSTS_READY
;
7378 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(cc
)) {
7379 trace_pci_nvme_mmio_stopped();
7380 nvme_ctrl_reset(n
, NVME_RESET_CONTROLLER
);
7385 stl_le_p(&n
->bar
.csts
, csts
);
7389 if (data
& (1 << 4)) {
7390 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
7391 "attempted to W1C CSTS.NSSRO"
7392 " but CAP.NSSRS is zero (not supported)");
7393 } else if (data
!= 0) {
7394 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
7395 "attempted to set a read only bit"
7396 " of controller status");
7400 if (data
== 0x4e564d65) {
7401 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
7403 /* The spec says that writes of other values have no effect */
7408 stl_le_p(&n
->bar
.aqa
, data
);
7409 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
7412 stn_le_p(&n
->bar
.asq
, size
, data
);
7413 trace_pci_nvme_mmio_asqaddr(data
);
7415 case NVME_REG_ASQ
+ 4:
7416 stl_le_p((uint8_t *)&n
->bar
.asq
+ 4, data
);
7417 trace_pci_nvme_mmio_asqaddr_hi(data
, ldq_le_p(&n
->bar
.asq
));
7420 trace_pci_nvme_mmio_acqaddr(data
);
7421 stn_le_p(&n
->bar
.acq
, size
, data
);
7423 case NVME_REG_ACQ
+ 4:
7424 stl_le_p((uint8_t *)&n
->bar
.acq
+ 4, data
);
7425 trace_pci_nvme_mmio_acqaddr_hi(data
, ldq_le_p(&n
->bar
.acq
));
7427 case NVME_REG_CMBLOC
:
7428 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
7429 "invalid write to reserved CMBLOC"
7430 " when CMBSZ is zero, ignored");
7432 case NVME_REG_CMBSZ
:
7433 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
7434 "invalid write to read only CMBSZ, ignored");
7436 case NVME_REG_CMBMSC
:
7437 if (!NVME_CAP_CMBS(cap
)) {
7441 stn_le_p(&n
->bar
.cmbmsc
, size
, data
);
7442 n
->cmb
.cmse
= false;
7444 if (NVME_CMBMSC_CRE(data
)) {
7445 nvme_cmb_enable_regs(n
);
7447 if (NVME_CMBMSC_CMSE(data
)) {
7448 uint64_t cmbmsc
= ldq_le_p(&n
->bar
.cmbmsc
);
7449 hwaddr cba
= NVME_CMBMSC_CBA(cmbmsc
) << CMBMSC_CBA_SHIFT
;
7450 if (cba
+ int128_get64(n
->cmb
.mem
.size
) < cba
) {
7451 uint32_t cmbsts
= ldl_le_p(&n
->bar
.cmbsts
);
7452 NVME_CMBSTS_SET_CBAI(cmbsts
, 1);
7453 stl_le_p(&n
->bar
.cmbsts
, cmbsts
);
7466 case NVME_REG_CMBMSC
+ 4:
7467 stl_le_p((uint8_t *)&n
->bar
.cmbmsc
+ 4, data
);
7470 case NVME_REG_PMRCAP
:
7471 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
7472 "invalid write to PMRCAP register, ignored");
7474 case NVME_REG_PMRCTL
:
7475 if (!NVME_CAP_PMRS(cap
)) {
7479 stl_le_p(&n
->bar
.pmrctl
, data
);
7480 if (NVME_PMRCTL_EN(data
)) {
7481 memory_region_set_enabled(&n
->pmr
.dev
->mr
, true);
7484 memory_region_set_enabled(&n
->pmr
.dev
->mr
, false);
7485 NVME_PMRSTS_SET_NRDY(pmrsts
, 1);
7486 n
->pmr
.cmse
= false;
7488 stl_le_p(&n
->bar
.pmrsts
, pmrsts
);
7490 case NVME_REG_PMRSTS
:
7491 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
7492 "invalid write to PMRSTS register, ignored");
7494 case NVME_REG_PMREBS
:
7495 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
7496 "invalid write to PMREBS register, ignored");
7498 case NVME_REG_PMRSWTP
:
7499 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
7500 "invalid write to PMRSWTP register, ignored");
7502 case NVME_REG_PMRMSCL
:
7503 if (!NVME_CAP_PMRS(cap
)) {
7507 stl_le_p(&n
->bar
.pmrmscl
, data
);
7508 n
->pmr
.cmse
= false;
7510 if (NVME_PMRMSCL_CMSE(data
)) {
7511 uint64_t pmrmscu
= ldl_le_p(&n
->bar
.pmrmscu
);
7512 hwaddr cba
= pmrmscu
<< 32 |
7513 (NVME_PMRMSCL_CBA(data
) << PMRMSCL_CBA_SHIFT
);
7514 if (cba
+ int128_get64(n
->pmr
.dev
->mr
.size
) < cba
) {
7515 NVME_PMRSTS_SET_CBAI(pmrsts
, 1);
7516 stl_le_p(&n
->bar
.pmrsts
, pmrsts
);
7525 case NVME_REG_PMRMSCU
:
7526 if (!NVME_CAP_PMRS(cap
)) {
7530 stl_le_p(&n
->bar
.pmrmscu
, data
);
7533 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
7534 "invalid MMIO write,"
7535 " offset=0x%"PRIx64
", data=%"PRIx64
"",
7541 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
7543 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
7544 uint8_t *ptr
= (uint8_t *)&n
->bar
;
7546 trace_pci_nvme_mmio_read(addr
, size
);
7548 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
7549 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
7550 "MMIO read not 32-bit aligned,"
7551 " offset=0x%"PRIx64
"", addr
);
7552 /* should RAZ, fall through for now */
7553 } else if (unlikely(size
< sizeof(uint32_t))) {
7554 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
7555 "MMIO read smaller than 32-bits,"
7556 " offset=0x%"PRIx64
"", addr
);
7557 /* should RAZ, fall through for now */
7560 if (addr
> sizeof(n
->bar
) - size
) {
7561 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
7562 "MMIO read beyond last register,"
7563 " offset=0x%"PRIx64
", returning 0", addr
);
7568 if (pci_is_vf(PCI_DEVICE(n
)) && !nvme_sctrl(n
)->scs
&&
7569 addr
!= NVME_REG_CSTS
) {
7570 trace_pci_nvme_err_ignored_mmio_vf_offline(addr
, size
);
7575 * When PMRWBM bit 1 is set then read from
7576 * from PMRSTS should ensure prior writes
7577 * made it to persistent media
7579 if (addr
== NVME_REG_PMRSTS
&&
7580 (NVME_PMRCAP_PMRWBM(ldl_le_p(&n
->bar
.pmrcap
)) & 0x02)) {
7581 memory_region_msync(&n
->pmr
.dev
->mr
, 0, n
->pmr
.dev
->size
);
7584 return ldn_le_p(ptr
+ addr
, size
);
7587 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
7589 PCIDevice
*pci
= PCI_DEVICE(n
);
7592 if (unlikely(addr
& ((1 << 2) - 1))) {
7593 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
7594 "doorbell write not 32-bit aligned,"
7595 " offset=0x%"PRIx64
", ignoring", addr
);
7599 if (((addr
- 0x1000) >> 2) & 1) {
7600 /* Completion queue doorbell write */
7602 uint16_t new_head
= val
& 0xffff;
7606 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
7607 if (unlikely(nvme_check_cqid(n
, qid
))) {
7608 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
7609 "completion queue doorbell write"
7610 " for nonexistent queue,"
7611 " sqid=%"PRIu32
", ignoring", qid
);
7614 * NVM Express v1.3d, Section 4.1 state: "If host software writes
7615 * an invalid value to the Submission Queue Tail Doorbell or
7616 * Completion Queue Head Doorbell regiter and an Asynchronous Event
7617 * Request command is outstanding, then an asynchronous event is
7618 * posted to the Admin Completion Queue with a status code of
7619 * Invalid Doorbell Write Value."
7621 * Also note that the spec includes the "Invalid Doorbell Register"
7622 * status code, but nowhere does it specify when to use it.
7623 * However, it seems reasonable to use it here in a similar
7626 if (n
->outstanding_aers
) {
7627 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
7628 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
7629 NVME_LOG_ERROR_INFO
);
7636 if (unlikely(new_head
>= cq
->size
)) {
7637 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
7638 "completion queue doorbell write value"
7639 " beyond queue size, sqid=%"PRIu32
","
7640 " new_head=%"PRIu16
", ignoring",
7643 if (n
->outstanding_aers
) {
7644 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
7645 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
7646 NVME_LOG_ERROR_INFO
);
7652 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
7654 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
7655 cq
->head
= new_head
;
7656 if (!qid
&& n
->dbbuf_enabled
) {
7657 pci_dma_write(pci
, cq
->db_addr
, &cq
->head
, sizeof(cq
->head
));
7661 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
7662 qemu_bh_schedule(sq
->bh
);
7664 qemu_bh_schedule(cq
->bh
);
7667 if (cq
->tail
== cq
->head
) {
7668 if (cq
->irq_enabled
) {
7672 nvme_irq_deassert(n
, cq
);
7675 /* Submission queue doorbell write */
7677 uint16_t new_tail
= val
& 0xffff;
7680 qid
= (addr
- 0x1000) >> 3;
7681 if (unlikely(nvme_check_sqid(n
, qid
))) {
7682 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
7683 "submission queue doorbell write"
7684 " for nonexistent queue,"
7685 " sqid=%"PRIu32
", ignoring", qid
);
7687 if (n
->outstanding_aers
) {
7688 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
7689 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
7690 NVME_LOG_ERROR_INFO
);
7697 if (unlikely(new_tail
>= sq
->size
)) {
7698 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
7699 "submission queue doorbell write value"
7700 " beyond queue size, sqid=%"PRIu32
","
7701 " new_tail=%"PRIu16
", ignoring",
7704 if (n
->outstanding_aers
) {
7705 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
7706 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
7707 NVME_LOG_ERROR_INFO
);
7713 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
7715 sq
->tail
= new_tail
;
7716 if (!qid
&& n
->dbbuf_enabled
) {
7718 * The spec states "the host shall also update the controller's
7719 * corresponding doorbell property to match the value of that entry
7720 * in the Shadow Doorbell buffer."
7722 * Since this context is currently a VM trap, we can safely enforce
7723 * the requirement from the device side in case the host is
7726 * Note, we shouldn't have to do this, but various drivers
7727 * including ones that run on Linux, are not updating Admin Queues,
7728 * so we can't trust reading it for an appropriate sq tail.
7730 pci_dma_write(pci
, sq
->db_addr
, &sq
->tail
, sizeof(sq
->tail
));
7733 qemu_bh_schedule(sq
->bh
);
7737 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
7740 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
7742 trace_pci_nvme_mmio_write(addr
, data
, size
);
7744 if (pci_is_vf(PCI_DEVICE(n
)) && !nvme_sctrl(n
)->scs
&&
7745 addr
!= NVME_REG_CSTS
) {
7746 trace_pci_nvme_err_ignored_mmio_vf_offline(addr
, size
);
7750 if (addr
< sizeof(n
->bar
)) {
7751 nvme_write_bar(n
, addr
, data
, size
);
7753 nvme_process_db(n
, addr
, data
);
7757 static const MemoryRegionOps nvme_mmio_ops
= {
7758 .read
= nvme_mmio_read
,
7759 .write
= nvme_mmio_write
,
7760 .endianness
= DEVICE_LITTLE_ENDIAN
,
7762 .min_access_size
= 2,
7763 .max_access_size
= 8,
7767 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
7770 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
7771 stn_le_p(&n
->cmb
.buf
[addr
], size
, data
);
7774 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
7776 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
7777 return ldn_le_p(&n
->cmb
.buf
[addr
], size
);
7780 static const MemoryRegionOps nvme_cmb_ops
= {
7781 .read
= nvme_cmb_read
,
7782 .write
= nvme_cmb_write
,
7783 .endianness
= DEVICE_LITTLE_ENDIAN
,
7785 .min_access_size
= 1,
7786 .max_access_size
= 8,
7790 static bool nvme_check_params(NvmeCtrl
*n
, Error
**errp
)
7792 NvmeParams
*params
= &n
->params
;
7794 if (params
->num_queues
) {
7795 warn_report("num_queues is deprecated; please use max_ioqpairs "
7798 params
->max_ioqpairs
= params
->num_queues
- 1;
7801 if (n
->namespace.blkconf
.blk
&& n
->subsys
) {
7802 error_setg(errp
, "subsystem support is unavailable with legacy "
7803 "namespace ('drive' property)");
7807 if (params
->max_ioqpairs
< 1 ||
7808 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
7809 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
7814 if (params
->msix_qsize
< 1 ||
7815 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
7816 error_setg(errp
, "msix_qsize must be between 1 and %d",
7817 PCI_MSIX_FLAGS_QSIZE
+ 1);
7821 if (!params
->serial
) {
7822 error_setg(errp
, "serial property not set");
7827 if (host_memory_backend_is_mapped(n
->pmr
.dev
)) {
7828 error_setg(errp
, "can't use already busy memdev: %s",
7829 object_get_canonical_path_component(OBJECT(n
->pmr
.dev
)));
7833 if (!is_power_of_2(n
->pmr
.dev
->size
)) {
7834 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
7838 host_memory_backend_set_mapped(n
->pmr
.dev
, true);
7841 if (n
->params
.zasl
> n
->params
.mdts
) {
7842 error_setg(errp
, "zoned.zasl (Zone Append Size Limit) must be less "
7843 "than or equal to mdts (Maximum Data Transfer Size)");
7847 if (!n
->params
.vsl
) {
7848 error_setg(errp
, "vsl must be non-zero");
7852 if (params
->sriov_max_vfs
) {
7854 error_setg(errp
, "subsystem is required for the use of SR-IOV");
7858 if (params
->sriov_max_vfs
> NVME_MAX_VFS
) {
7859 error_setg(errp
, "sriov_max_vfs must be between 0 and %d",
7864 if (params
->cmb_size_mb
) {
7865 error_setg(errp
, "CMB is not supported with SR-IOV");
7870 error_setg(errp
, "PMR is not supported with SR-IOV");
7874 if (!params
->sriov_vq_flexible
|| !params
->sriov_vi_flexible
) {
7875 error_setg(errp
, "both sriov_vq_flexible and sriov_vi_flexible"
7876 " must be set for the use of SR-IOV");
7880 if (params
->sriov_vq_flexible
< params
->sriov_max_vfs
* 2) {
7881 error_setg(errp
, "sriov_vq_flexible must be greater than or equal"
7882 " to %d (sriov_max_vfs * 2)", params
->sriov_max_vfs
* 2);
7886 if (params
->max_ioqpairs
< params
->sriov_vq_flexible
+ 2) {
7887 error_setg(errp
, "(max_ioqpairs - sriov_vq_flexible) must be"
7888 " greater than or equal to 2");
7892 if (params
->sriov_vi_flexible
< params
->sriov_max_vfs
) {
7893 error_setg(errp
, "sriov_vi_flexible must be greater than or equal"
7894 " to %d (sriov_max_vfs)", params
->sriov_max_vfs
);
7898 if (params
->msix_qsize
< params
->sriov_vi_flexible
+ 1) {
7899 error_setg(errp
, "(msix_qsize - sriov_vi_flexible) must be"
7900 " greater than or equal to 1");
7904 if (params
->sriov_max_vi_per_vf
&&
7905 (params
->sriov_max_vi_per_vf
- 1) % NVME_VF_RES_GRANULARITY
) {
7906 error_setg(errp
, "sriov_max_vi_per_vf must meet:"
7907 " (sriov_max_vi_per_vf - 1) %% %d == 0 and"
7908 " sriov_max_vi_per_vf >= 1", NVME_VF_RES_GRANULARITY
);
7912 if (params
->sriov_max_vq_per_vf
&&
7913 (params
->sriov_max_vq_per_vf
< 2 ||
7914 (params
->sriov_max_vq_per_vf
- 1) % NVME_VF_RES_GRANULARITY
)) {
7915 error_setg(errp
, "sriov_max_vq_per_vf must meet:"
7916 " (sriov_max_vq_per_vf - 1) %% %d == 0 and"
7917 " sriov_max_vq_per_vf >= 2", NVME_VF_RES_GRANULARITY
);
7925 static void nvme_init_state(NvmeCtrl
*n
)
7927 NvmePriCtrlCap
*cap
= &n
->pri_ctrl_cap
;
7928 NvmeSecCtrlList
*list
= &n
->sec_ctrl_list
;
7929 NvmeSecCtrlEntry
*sctrl
;
7930 PCIDevice
*pci
= PCI_DEVICE(n
);
7934 if (pci_is_vf(pci
)) {
7935 sctrl
= nvme_sctrl(n
);
7937 n
->conf_ioqpairs
= sctrl
->nvq
? le16_to_cpu(sctrl
->nvq
) - 1 : 0;
7938 n
->conf_msix_qsize
= sctrl
->nvi
? le16_to_cpu(sctrl
->nvi
) : 1;
7940 max_vfs
= n
->params
.sriov_max_vfs
;
7941 n
->conf_ioqpairs
= n
->params
.max_ioqpairs
;
7942 n
->conf_msix_qsize
= n
->params
.msix_qsize
;
7945 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
7946 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
7947 n
->temperature
= NVME_TEMPERATURE
;
7948 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
7949 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
7950 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
7951 QTAILQ_INIT(&n
->aer_queue
);
7953 list
->numcntl
= cpu_to_le16(max_vfs
);
7954 for (i
= 0; i
< max_vfs
; i
++) {
7955 sctrl
= &list
->sec
[i
];
7956 sctrl
->pcid
= cpu_to_le16(n
->cntlid
);
7957 sctrl
->vfn
= cpu_to_le16(i
+ 1);
7960 cap
->cntlid
= cpu_to_le16(n
->cntlid
);
7961 cap
->crt
= NVME_CRT_VQ
| NVME_CRT_VI
;
7963 if (pci_is_vf(pci
)) {
7964 cap
->vqprt
= cpu_to_le16(1 + n
->conf_ioqpairs
);
7966 cap
->vqprt
= cpu_to_le16(1 + n
->params
.max_ioqpairs
-
7967 n
->params
.sriov_vq_flexible
);
7968 cap
->vqfrt
= cpu_to_le32(n
->params
.sriov_vq_flexible
);
7969 cap
->vqrfap
= cap
->vqfrt
;
7970 cap
->vqgran
= cpu_to_le16(NVME_VF_RES_GRANULARITY
);
7971 cap
->vqfrsm
= n
->params
.sriov_max_vq_per_vf
?
7972 cpu_to_le16(n
->params
.sriov_max_vq_per_vf
) :
7973 cap
->vqfrt
/ MAX(max_vfs
, 1);
7976 if (pci_is_vf(pci
)) {
7977 cap
->viprt
= cpu_to_le16(n
->conf_msix_qsize
);
7979 cap
->viprt
= cpu_to_le16(n
->params
.msix_qsize
-
7980 n
->params
.sriov_vi_flexible
);
7981 cap
->vifrt
= cpu_to_le32(n
->params
.sriov_vi_flexible
);
7982 cap
->virfap
= cap
->vifrt
;
7983 cap
->vigran
= cpu_to_le16(NVME_VF_RES_GRANULARITY
);
7984 cap
->vifrsm
= n
->params
.sriov_max_vi_per_vf
?
7985 cpu_to_le16(n
->params
.sriov_max_vi_per_vf
) :
7986 cap
->vifrt
/ MAX(max_vfs
, 1);
7990 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
7992 uint64_t cmb_size
= n
->params
.cmb_size_mb
* MiB
;
7993 uint64_t cap
= ldq_le_p(&n
->bar
.cap
);
7995 n
->cmb
.buf
= g_malloc0(cmb_size
);
7996 memory_region_init_io(&n
->cmb
.mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
7997 "nvme-cmb", cmb_size
);
7998 pci_register_bar(pci_dev
, NVME_CMB_BIR
,
7999 PCI_BASE_ADDRESS_SPACE_MEMORY
|
8000 PCI_BASE_ADDRESS_MEM_TYPE_64
|
8001 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->cmb
.mem
);
8003 NVME_CAP_SET_CMBS(cap
, 1);
8004 stq_le_p(&n
->bar
.cap
, cap
);
8006 if (n
->params
.legacy_cmb
) {
8007 nvme_cmb_enable_regs(n
);
8012 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
8014 uint32_t pmrcap
= ldl_le_p(&n
->bar
.pmrcap
);
8016 NVME_PMRCAP_SET_RDS(pmrcap
, 1);
8017 NVME_PMRCAP_SET_WDS(pmrcap
, 1);
8018 NVME_PMRCAP_SET_BIR(pmrcap
, NVME_PMR_BIR
);
8019 /* Turn on bit 1 support */
8020 NVME_PMRCAP_SET_PMRWBM(pmrcap
, 0x02);
8021 NVME_PMRCAP_SET_CMSS(pmrcap
, 1);
8022 stl_le_p(&n
->bar
.pmrcap
, pmrcap
);
8024 pci_register_bar(pci_dev
, NVME_PMR_BIR
,
8025 PCI_BASE_ADDRESS_SPACE_MEMORY
|
8026 PCI_BASE_ADDRESS_MEM_TYPE_64
|
8027 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmr
.dev
->mr
);
8029 memory_region_set_enabled(&n
->pmr
.dev
->mr
, false);
8032 static uint64_t nvme_bar_size(unsigned total_queues
, unsigned total_irqs
,
8033 unsigned *msix_table_offset
,
8034 unsigned *msix_pba_offset
)
8036 uint64_t bar_size
, msix_table_size
, msix_pba_size
;
8038 bar_size
= sizeof(NvmeBar
) + 2 * total_queues
* NVME_DB_SIZE
;
8039 bar_size
= QEMU_ALIGN_UP(bar_size
, 4 * KiB
);
8041 if (msix_table_offset
) {
8042 *msix_table_offset
= bar_size
;
8045 msix_table_size
= PCI_MSIX_ENTRY_SIZE
* total_irqs
;
8046 bar_size
+= msix_table_size
;
8047 bar_size
= QEMU_ALIGN_UP(bar_size
, 4 * KiB
);
8049 if (msix_pba_offset
) {
8050 *msix_pba_offset
= bar_size
;
8053 msix_pba_size
= QEMU_ALIGN_UP(total_irqs
, 64) / 8;
8054 bar_size
+= msix_pba_size
;
8056 bar_size
= pow2ceil(bar_size
);
8060 static void nvme_init_sriov(NvmeCtrl
*n
, PCIDevice
*pci_dev
, uint16_t offset
)
8062 uint16_t vf_dev_id
= n
->params
.use_intel_id
?
8063 PCI_DEVICE_ID_INTEL_NVME
: PCI_DEVICE_ID_REDHAT_NVME
;
8064 NvmePriCtrlCap
*cap
= &n
->pri_ctrl_cap
;
8065 uint64_t bar_size
= nvme_bar_size(le16_to_cpu(cap
->vqfrsm
),
8066 le16_to_cpu(cap
->vifrsm
),
8069 pcie_sriov_pf_init(pci_dev
, offset
, "nvme", vf_dev_id
,
8070 n
->params
.sriov_max_vfs
, n
->params
.sriov_max_vfs
,
8071 NVME_VF_OFFSET
, NVME_VF_STRIDE
);
8073 pcie_sriov_pf_init_vf_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
8074 PCI_BASE_ADDRESS_MEM_TYPE_64
, bar_size
);
8077 static int nvme_add_pm_capability(PCIDevice
*pci_dev
, uint8_t offset
)
8082 ret
= pci_add_capability(pci_dev
, PCI_CAP_ID_PM
, offset
,
8083 PCI_PM_SIZEOF
, &err
);
8085 error_report_err(err
);
8089 pci_set_word(pci_dev
->config
+ offset
+ PCI_PM_PMC
,
8090 PCI_PM_CAP_VER_1_2
);
8091 pci_set_word(pci_dev
->config
+ offset
+ PCI_PM_CTRL
,
8092 PCI_PM_CTRL_NO_SOFT_RESET
);
8093 pci_set_word(pci_dev
->wmask
+ offset
+ PCI_PM_CTRL
,
8094 PCI_PM_CTRL_STATE_MASK
);
8099 static bool nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
8102 uint8_t *pci_conf
= pci_dev
->config
;
8104 unsigned msix_table_offset
, msix_pba_offset
;
8107 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
8108 pci_config_set_prog_interface(pci_conf
, 0x2);
8110 if (n
->params
.use_intel_id
) {
8111 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
8112 pci_config_set_device_id(pci_conf
, PCI_DEVICE_ID_INTEL_NVME
);
8114 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_REDHAT
);
8115 pci_config_set_device_id(pci_conf
, PCI_DEVICE_ID_REDHAT_NVME
);
8118 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
8119 nvme_add_pm_capability(pci_dev
, 0x60);
8120 pcie_endpoint_cap_init(pci_dev
, 0x80);
8121 pcie_cap_flr_init(pci_dev
);
8122 if (n
->params
.sriov_max_vfs
) {
8123 pcie_ari_init(pci_dev
, 0x100, 1);
8126 /* add one to max_ioqpairs to account for the admin queue pair */
8127 bar_size
= nvme_bar_size(n
->params
.max_ioqpairs
+ 1, n
->params
.msix_qsize
,
8128 &msix_table_offset
, &msix_pba_offset
);
8130 memory_region_init(&n
->bar0
, OBJECT(n
), "nvme-bar0", bar_size
);
8131 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
8133 memory_region_add_subregion(&n
->bar0
, 0, &n
->iomem
);
8135 if (pci_is_vf(pci_dev
)) {
8136 pcie_sriov_vf_register_bar(pci_dev
, 0, &n
->bar0
);
8138 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
8139 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->bar0
);
8141 ret
= msix_init(pci_dev
, n
->params
.msix_qsize
,
8142 &n
->bar0
, 0, msix_table_offset
,
8143 &n
->bar0
, 0, msix_pba_offset
, 0, errp
);
8144 if (ret
== -ENOTSUP
) {
8145 /* report that msix is not supported, but do not error out */
8146 warn_report_err(*errp
);
8148 } else if (ret
< 0) {
8149 /* propagate error to caller */
8153 nvme_update_msixcap_ts(pci_dev
, n
->conf_msix_qsize
);
8155 if (n
->params
.cmb_size_mb
) {
8156 nvme_init_cmb(n
, pci_dev
);
8160 nvme_init_pmr(n
, pci_dev
);
8163 if (!pci_is_vf(pci_dev
) && n
->params
.sriov_max_vfs
) {
8164 nvme_init_sriov(n
, pci_dev
, 0x120);
8170 static void nvme_init_subnqn(NvmeCtrl
*n
)
8172 NvmeSubsystem
*subsys
= n
->subsys
;
8173 NvmeIdCtrl
*id
= &n
->id_ctrl
;
8176 snprintf((char *)id
->subnqn
, sizeof(id
->subnqn
),
8177 "nqn.2019-08.org.qemu:%s", n
->params
.serial
);
8179 pstrcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), (char*)subsys
->subnqn
);
8183 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
8185 NvmeIdCtrl
*id
= &n
->id_ctrl
;
8186 uint8_t *pci_conf
= pci_dev
->config
;
8187 uint64_t cap
= ldq_le_p(&n
->bar
.cap
);
8188 NvmeSecCtrlEntry
*sctrl
= nvme_sctrl(n
);
8191 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
8192 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
8193 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
8194 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), QEMU_VERSION
, ' ');
8195 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
8197 id
->cntlid
= cpu_to_le16(n
->cntlid
);
8199 id
->oaes
= cpu_to_le32(NVME_OAES_NS_ATTR
);
8200 ctratt
= NVME_CTRATT_ELBAS
;
8204 if (n
->params
.use_intel_id
) {
8214 id
->mdts
= n
->params
.mdts
;
8215 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
8217 cpu_to_le16(NVME_OACS_NS_MGMT
| NVME_OACS_FORMAT
| NVME_OACS_DBBUF
|
8218 NVME_OACS_DIRECTIVES
);
8219 id
->cntrltype
= 0x1;
8222 * Because the controller always completes the Abort command immediately,
8223 * there can never be more than one concurrently executing Abort command,
8224 * so this value is never used for anything. Note that there can easily be
8225 * many Abort commands in the queues, but they are not considered
8226 * "executing" until processed by nvme_abort.
8228 * The specification recommends a value of 3 for Abort Command Limit (four
8229 * concurrently outstanding Abort commands), so lets use that though it is
8233 id
->aerl
= n
->params
.aerl
;
8234 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
8235 id
->lpa
= NVME_LPA_NS_SMART
| NVME_LPA_CSE
| NVME_LPA_EXTENDED
;
8237 /* recommended default value (~70 C) */
8238 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
8239 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
8241 id
->sqes
= (0x6 << 4) | 0x6;
8242 id
->cqes
= (0x4 << 4) | 0x4;
8243 id
->nn
= cpu_to_le32(NVME_MAX_NAMESPACES
);
8244 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
8245 NVME_ONCS_FEATURES
| NVME_ONCS_DSM
|
8246 NVME_ONCS_COMPARE
| NVME_ONCS_COPY
);
8249 * NOTE: If this device ever supports a command set that does NOT use 0x0
8250 * as a Flush-equivalent operation, support for the broadcast NSID in Flush
8251 * should probably be removed.
8253 * See comment in nvme_io_cmd.
8255 id
->vwc
= NVME_VWC_NSID_BROADCAST_SUPPORT
| NVME_VWC_PRESENT
;
8257 id
->ocfs
= cpu_to_le16(NVME_OCFS_COPY_FORMAT_0
| NVME_OCFS_COPY_FORMAT_1
);
8258 id
->sgls
= cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN
);
8260 nvme_init_subnqn(n
);
8262 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
8263 id
->psd
[0].enlat
= cpu_to_le32(0x10);
8264 id
->psd
[0].exlat
= cpu_to_le32(0x4);
8267 id
->cmic
|= NVME_CMIC_MULTI_CTRL
;
8268 ctratt
|= NVME_CTRATT_ENDGRPS
;
8270 id
->endgidmax
= cpu_to_le16(0x1);
8272 if (n
->subsys
->endgrp
.fdp
.enabled
) {
8273 ctratt
|= NVME_CTRATT_FDPS
;
8277 id
->ctratt
= cpu_to_le32(ctratt
);
8279 NVME_CAP_SET_MQES(cap
, 0x7ff);
8280 NVME_CAP_SET_CQR(cap
, 1);
8281 NVME_CAP_SET_TO(cap
, 0xf);
8282 NVME_CAP_SET_CSS(cap
, NVME_CAP_CSS_NVM
);
8283 NVME_CAP_SET_CSS(cap
, NVME_CAP_CSS_CSI_SUPP
);
8284 NVME_CAP_SET_CSS(cap
, NVME_CAP_CSS_ADMIN_ONLY
);
8285 NVME_CAP_SET_MPSMAX(cap
, 4);
8286 NVME_CAP_SET_CMBS(cap
, n
->params
.cmb_size_mb
? 1 : 0);
8287 NVME_CAP_SET_PMRS(cap
, n
->pmr
.dev
? 1 : 0);
8288 stq_le_p(&n
->bar
.cap
, cap
);
8290 stl_le_p(&n
->bar
.vs
, NVME_SPEC_VER
);
8291 n
->bar
.intmc
= n
->bar
.intms
= 0;
8293 if (pci_is_vf(pci_dev
) && !sctrl
->scs
) {
8294 stl_le_p(&n
->bar
.csts
, NVME_CSTS_FAILED
);
8298 static int nvme_init_subsys(NvmeCtrl
*n
, Error
**errp
)
8306 cntlid
= nvme_subsys_register_ctrl(n
, errp
);
8316 void nvme_attach_ns(NvmeCtrl
*n
, NvmeNamespace
*ns
)
8318 uint32_t nsid
= ns
->params
.nsid
;
8319 assert(nsid
&& nsid
<= NVME_MAX_NAMESPACES
);
8321 n
->namespaces
[nsid
] = ns
;
8324 n
->dmrsl
= MIN_NON_ZERO(n
->dmrsl
,
8325 BDRV_REQUEST_MAX_BYTES
/ nvme_l2b(ns
, 1));
8328 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
8330 NvmeCtrl
*n
= NVME(pci_dev
);
8331 DeviceState
*dev
= DEVICE(pci_dev
);
8333 NvmeCtrl
*pn
= NVME(pcie_sriov_get_pf(pci_dev
));
8335 if (pci_is_vf(pci_dev
)) {
8337 * VFs derive settings from the parent. PF's lifespan exceeds
8338 * that of VF's, so it's safe to share params.serial.
8340 memcpy(&n
->params
, &pn
->params
, sizeof(NvmeParams
));
8341 n
->subsys
= pn
->subsys
;
8344 if (!nvme_check_params(n
, errp
)) {
8348 qbus_init(&n
->bus
, sizeof(NvmeBus
), TYPE_NVME_BUS
, dev
, dev
->id
);
8350 if (nvme_init_subsys(n
, errp
)) {
8354 if (!nvme_init_pci(n
, pci_dev
, errp
)) {
8357 nvme_init_ctrl(n
, pci_dev
);
8359 /* setup a namespace if the controller drive property was given */
8360 if (n
->namespace.blkconf
.blk
) {
8362 ns
->params
.nsid
= 1;
8364 if (nvme_ns_setup(ns
, errp
)) {
8368 nvme_attach_ns(n
, ns
);
8372 static void nvme_exit(PCIDevice
*pci_dev
)
8374 NvmeCtrl
*n
= NVME(pci_dev
);
8378 nvme_ctrl_reset(n
, NVME_RESET_FUNCTION
);
8381 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
8388 nvme_subsys_unregister_ctrl(n
->subsys
, n
);
8393 g_free(n
->aer_reqs
);
8395 if (n
->params
.cmb_size_mb
) {
8400 host_memory_backend_set_mapped(n
->pmr
.dev
, false);
8403 if (!pci_is_vf(pci_dev
) && n
->params
.sriov_max_vfs
) {
8404 pcie_sriov_pf_exit(pci_dev
);
8407 msix_uninit(pci_dev
, &n
->bar0
, &n
->bar0
);
8408 memory_region_del_subregion(&n
->bar0
, &n
->iomem
);
8411 static Property nvme_props
[] = {
8412 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, namespace.blkconf
),
8413 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmr
.dev
, TYPE_MEMORY_BACKEND
,
8414 HostMemoryBackend
*),
8415 DEFINE_PROP_LINK("subsys", NvmeCtrl
, subsys
, TYPE_NVME_SUBSYS
,
8417 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
8418 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
8419 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
8420 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
8421 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
8422 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
8423 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
8424 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
8425 DEFINE_PROP_UINT8("vsl", NvmeCtrl
, params
.vsl
, 7),
8426 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl
, params
.use_intel_id
, false),
8427 DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl
, params
.legacy_cmb
, false),
8428 DEFINE_PROP_BOOL("ioeventfd", NvmeCtrl
, params
.ioeventfd
, false),
8429 DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl
, params
.zasl
, 0),
8430 DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl
,
8431 params
.auto_transition_zones
, true),
8432 DEFINE_PROP_UINT8("sriov_max_vfs", NvmeCtrl
, params
.sriov_max_vfs
, 0),
8433 DEFINE_PROP_UINT16("sriov_vq_flexible", NvmeCtrl
,
8434 params
.sriov_vq_flexible
, 0),
8435 DEFINE_PROP_UINT16("sriov_vi_flexible", NvmeCtrl
,
8436 params
.sriov_vi_flexible
, 0),
8437 DEFINE_PROP_UINT8("sriov_max_vi_per_vf", NvmeCtrl
,
8438 params
.sriov_max_vi_per_vf
, 0),
8439 DEFINE_PROP_UINT8("sriov_max_vq_per_vf", NvmeCtrl
,
8440 params
.sriov_max_vq_per_vf
, 0),
8441 DEFINE_PROP_END_OF_LIST(),
8444 static void nvme_get_smart_warning(Object
*obj
, Visitor
*v
, const char *name
,
8445 void *opaque
, Error
**errp
)
8447 NvmeCtrl
*n
= NVME(obj
);
8448 uint8_t value
= n
->smart_critical_warning
;
8450 visit_type_uint8(v
, name
, &value
, errp
);
8453 static void nvme_set_smart_warning(Object
*obj
, Visitor
*v
, const char *name
,
8454 void *opaque
, Error
**errp
)
8456 NvmeCtrl
*n
= NVME(obj
);
8457 uint8_t value
, old_value
, cap
= 0, index
, event
;
8459 if (!visit_type_uint8(v
, name
, &value
, errp
)) {
8463 cap
= NVME_SMART_SPARE
| NVME_SMART_TEMPERATURE
| NVME_SMART_RELIABILITY
8464 | NVME_SMART_MEDIA_READ_ONLY
| NVME_SMART_FAILED_VOLATILE_MEDIA
;
8465 if (NVME_CAP_PMRS(ldq_le_p(&n
->bar
.cap
))) {
8466 cap
|= NVME_SMART_PMR_UNRELIABLE
;
8469 if ((value
& cap
) != value
) {
8470 error_setg(errp
, "unsupported smart critical warning bits: 0x%x",
8475 old_value
= n
->smart_critical_warning
;
8476 n
->smart_critical_warning
= value
;
8478 /* only inject new bits of smart critical warning */
8479 for (index
= 0; index
< NVME_SMART_WARN_MAX
; index
++) {
8481 if (value
& ~old_value
& event
)
8482 nvme_smart_event(n
, event
);
8486 static void nvme_pci_reset(DeviceState
*qdev
)
8488 PCIDevice
*pci_dev
= PCI_DEVICE(qdev
);
8489 NvmeCtrl
*n
= NVME(pci_dev
);
8491 trace_pci_nvme_pci_reset();
8492 nvme_ctrl_reset(n
, NVME_RESET_FUNCTION
);
8495 static void nvme_sriov_pre_write_ctrl(PCIDevice
*dev
, uint32_t address
,
8496 uint32_t val
, int len
)
8498 NvmeCtrl
*n
= NVME(dev
);
8499 NvmeSecCtrlEntry
*sctrl
;
8500 uint16_t sriov_cap
= dev
->exp
.sriov_cap
;
8501 uint32_t off
= address
- sriov_cap
;
8508 if (range_covers_byte(off
, len
, PCI_SRIOV_CTRL
)) {
8509 if (!(val
& PCI_SRIOV_CTRL_VFE
)) {
8510 num_vfs
= pci_get_word(dev
->config
+ sriov_cap
+ PCI_SRIOV_NUM_VF
);
8511 for (i
= 0; i
< num_vfs
; i
++) {
8512 sctrl
= &n
->sec_ctrl_list
.sec
[i
];
8513 nvme_virt_set_state(n
, le16_to_cpu(sctrl
->scid
), false);
8519 static void nvme_pci_write_config(PCIDevice
*dev
, uint32_t address
,
8520 uint32_t val
, int len
)
8522 nvme_sriov_pre_write_ctrl(dev
, address
, val
, len
);
8523 pci_default_write_config(dev
, address
, val
, len
);
8524 pcie_cap_flr_write_config(dev
, address
, val
, len
);
8527 static const VMStateDescription nvme_vmstate
= {
8532 static void nvme_class_init(ObjectClass
*oc
, void *data
)
8534 DeviceClass
*dc
= DEVICE_CLASS(oc
);
8535 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
8537 pc
->realize
= nvme_realize
;
8538 pc
->config_write
= nvme_pci_write_config
;
8539 pc
->exit
= nvme_exit
;
8540 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
8543 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
8544 dc
->desc
= "Non-Volatile Memory Express";
8545 device_class_set_props(dc
, nvme_props
);
8546 dc
->vmsd
= &nvme_vmstate
;
8547 dc
->reset
= nvme_pci_reset
;
8550 static void nvme_instance_init(Object
*obj
)
8552 NvmeCtrl
*n
= NVME(obj
);
8554 device_add_bootindex_property(obj
, &n
->namespace.blkconf
.bootindex
,
8555 "bootindex", "/namespace@1,0",
8558 object_property_add(obj
, "smart_critical_warning", "uint8",
8559 nvme_get_smart_warning
,
8560 nvme_set_smart_warning
, NULL
, NULL
);
8563 static const TypeInfo nvme_info
= {
8565 .parent
= TYPE_PCI_DEVICE
,
8566 .instance_size
= sizeof(NvmeCtrl
),
8567 .instance_init
= nvme_instance_init
,
8568 .class_init
= nvme_class_init
,
8569 .interfaces
= (InterfaceInfo
[]) {
8570 { INTERFACE_PCIE_DEVICE
},
8575 static const TypeInfo nvme_bus_info
= {
8576 .name
= TYPE_NVME_BUS
,
8578 .instance_size
= sizeof(NvmeBus
),
8581 static void nvme_register_types(void)
8583 type_register_static(&nvme_info
);
8584 type_register_static(&nvme_bus_info
);
8587 type_init(nvme_register_types
)