4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "nvme_internal.h"
36 static struct nvme_request
*_nvme_ns_cmd_rw(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
37 const struct nvme_payload
*payload
, uint32_t payload_offset
, uint32_t md_offset
,
38 uint64_t lba
, uint32_t lba_count
, spdk_nvme_cmd_cb cb_fn
,
39 void *cb_arg
, uint32_t opc
, uint32_t io_flags
,
40 uint16_t apptag_mask
, uint16_t apptag
, bool check_sgl
);
44 spdk_nvme_ns_check_request_length(uint32_t lba_count
, uint32_t sectors_per_max_io
,
45 uint32_t sectors_per_stripe
, uint32_t qdepth
)
47 uint32_t child_per_io
;
49 if (sectors_per_stripe
> 0) {
50 child_per_io
= (lba_count
+ sectors_per_stripe
- 1) / sectors_per_stripe
;
52 child_per_io
= (lba_count
+ sectors_per_max_io
- 1) / sectors_per_max_io
;
55 SPDK_DEBUGLOG(SPDK_LOG_NVME
, "checking maximum i/o length %d\n", child_per_io
);
57 return child_per_io
>= qdepth
;
61 nvme_cb_complete_child(void *child_arg
, const struct spdk_nvme_cpl
*cpl
)
63 struct nvme_request
*child
= child_arg
;
64 struct nvme_request
*parent
= child
->parent
;
66 nvme_request_remove_child(parent
, child
);
68 if (spdk_nvme_cpl_is_error(cpl
)) {
69 memcpy(&parent
->parent_status
, cpl
, sizeof(*cpl
));
72 if (parent
->num_children
== 0) {
73 nvme_complete_request(parent
, &parent
->parent_status
);
74 nvme_free_request(parent
);
79 nvme_request_add_child(struct nvme_request
*parent
, struct nvme_request
*child
)
81 assert(parent
->num_children
!= UINT16_MAX
);
83 if (parent
->num_children
== 0) {
85 * Defer initialization of the children TAILQ since it falls
86 * on a separate cacheline. This ensures we do not touch this
87 * cacheline except on request splitting cases, which are
90 TAILQ_INIT(&parent
->children
);
91 parent
->parent
= NULL
;
92 memset(&parent
->parent_status
, 0, sizeof(struct spdk_nvme_cpl
));
95 parent
->num_children
++;
96 TAILQ_INSERT_TAIL(&parent
->children
, child
, child_tailq
);
97 child
->parent
= parent
;
98 child
->cb_fn
= nvme_cb_complete_child
;
99 child
->cb_arg
= child
;
103 nvme_request_remove_child(struct nvme_request
*parent
, struct nvme_request
*child
)
105 assert(parent
!= NULL
);
106 assert(child
!= NULL
);
107 assert(child
->parent
== parent
);
108 assert(parent
->num_children
!= 0);
110 parent
->num_children
--;
111 TAILQ_REMOVE(&parent
->children
, child
, child_tailq
);
115 nvme_request_free_children(struct nvme_request
*req
)
117 struct nvme_request
*child
, *tmp
;
119 if (req
->num_children
== 0) {
123 /* free all child nvme_request */
124 TAILQ_FOREACH_SAFE(child
, &req
->children
, child_tailq
, tmp
) {
125 nvme_request_remove_child(req
, child
);
126 nvme_request_free_children(child
);
127 nvme_free_request(child
);
131 static struct nvme_request
*
132 _nvme_add_child_request(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
133 const struct nvme_payload
*payload
,
134 uint32_t payload_offset
, uint32_t md_offset
,
135 uint64_t lba
, uint32_t lba_count
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
, uint32_t opc
,
136 uint32_t io_flags
, uint16_t apptag_mask
, uint16_t apptag
,
137 struct nvme_request
*parent
, bool check_sgl
)
139 struct nvme_request
*child
;
141 child
= _nvme_ns_cmd_rw(ns
, qpair
, payload
, payload_offset
, md_offset
, lba
, lba_count
, cb_fn
,
142 cb_arg
, opc
, io_flags
, apptag_mask
, apptag
, check_sgl
);
144 nvme_request_free_children(parent
);
145 nvme_free_request(parent
);
149 nvme_request_add_child(parent
, child
);
153 static struct nvme_request
*
154 _nvme_ns_cmd_split_request(struct spdk_nvme_ns
*ns
,
155 struct spdk_nvme_qpair
*qpair
,
156 const struct nvme_payload
*payload
,
157 uint32_t payload_offset
, uint32_t md_offset
,
158 uint64_t lba
, uint32_t lba_count
,
159 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
, uint32_t opc
,
160 uint32_t io_flags
, struct nvme_request
*req
,
161 uint32_t sectors_per_max_io
, uint32_t sector_mask
,
162 uint16_t apptag_mask
, uint16_t apptag
)
164 uint32_t sector_size
;
165 uint32_t md_size
= ns
->md_size
;
166 uint32_t remaining_lba_count
= lba_count
;
167 struct nvme_request
*child
;
169 sector_size
= ns
->extended_lba_size
;
171 if ((io_flags
& SPDK_NVME_IO_FLAGS_PRACT
) &&
172 (ns
->flags
& SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED
) &&
173 (ns
->flags
& SPDK_NVME_NS_DPS_PI_SUPPORTED
) &&
178 while (remaining_lba_count
> 0) {
179 lba_count
= sectors_per_max_io
- (lba
& sector_mask
);
180 lba_count
= spdk_min(remaining_lba_count
, lba_count
);
182 child
= _nvme_add_child_request(ns
, qpair
, payload
, payload_offset
, md_offset
,
183 lba
, lba_count
, cb_fn
, cb_arg
, opc
,
184 io_flags
, apptag_mask
, apptag
, req
, true);
189 remaining_lba_count
-= lba_count
;
191 payload_offset
+= lba_count
* sector_size
;
192 md_offset
+= lba_count
* md_size
;
199 _nvme_ns_cmd_setup_request(struct spdk_nvme_ns
*ns
, struct nvme_request
*req
,
200 uint32_t opc
, uint64_t lba
, uint32_t lba_count
,
201 uint32_t io_flags
, uint16_t apptag_mask
, uint16_t apptag
)
203 struct spdk_nvme_cmd
*cmd
;
209 *(uint64_t *)&cmd
->cdw10
= lba
;
211 if (ns
->flags
& SPDK_NVME_NS_DPS_PI_SUPPORTED
) {
212 switch (ns
->pi_type
) {
213 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1
:
214 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2
:
215 cmd
->cdw14
= (uint32_t)lba
;
220 cmd
->cdw12
= lba_count
- 1;
221 cmd
->cdw12
|= io_flags
;
223 cmd
->cdw15
= apptag_mask
;
224 cmd
->cdw15
= (cmd
->cdw15
<< 16 | apptag
);
227 static struct nvme_request
*
228 _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns
*ns
,
229 struct spdk_nvme_qpair
*qpair
,
230 const struct nvme_payload
*payload
,
231 uint32_t payload_offset
, uint32_t md_offset
,
232 uint64_t lba
, uint32_t lba_count
,
233 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
, uint32_t opc
,
234 uint32_t io_flags
, struct nvme_request
*req
,
235 uint16_t apptag_mask
, uint16_t apptag
)
237 spdk_nvme_req_reset_sgl_cb reset_sgl_fn
= req
->payload
.reset_sgl_fn
;
238 spdk_nvme_req_next_sge_cb next_sge_fn
= req
->payload
.next_sge_fn
;
239 void *sgl_cb_arg
= req
->payload
.contig_or_cb_arg
;
240 bool start_valid
, end_valid
, last_sge
, child_equals_parent
;
241 uint64_t child_lba
= lba
;
242 uint32_t req_current_length
= 0;
243 uint32_t child_length
= 0;
245 uint32_t page_size
= qpair
->ctrlr
->page_size
;
248 reset_sgl_fn(sgl_cb_arg
, payload_offset
);
249 next_sge_fn(sgl_cb_arg
, (void **)&address
, &sge_length
);
250 while (req_current_length
< req
->payload_size
) {
252 if (sge_length
== 0) {
254 } else if (req_current_length
+ sge_length
> req
->payload_size
) {
255 sge_length
= req
->payload_size
- req_current_length
;
259 * The start of the SGE is invalid if the start address is not page aligned,
260 * unless it is the first SGE in the child request.
262 start_valid
= child_length
== 0 || _is_page_aligned(address
, page_size
);
264 /* Boolean for whether this is the last SGE in the parent request. */
265 last_sge
= (req_current_length
+ sge_length
== req
->payload_size
);
268 * The end of the SGE is invalid if the end address is not page aligned,
269 * unless it is the last SGE in the parent request.
271 end_valid
= last_sge
|| _is_page_aligned(address
+ sge_length
, page_size
);
274 * This child request equals the parent request, meaning that no splitting
275 * was required for the parent request (the one passed into this function).
276 * In this case, we do not create a child request at all - we just send
277 * the original request as a single request at the end of this function.
279 child_equals_parent
= (child_length
+ sge_length
== req
->payload_size
);
283 * The start of the SGE is valid, so advance the length parameters,
284 * to include this SGE with previous SGEs for this child request
285 * (if any). If it is not valid, we do not advance the length
286 * parameters nor get the next SGE, because we must send what has
287 * been collected before this SGE as a child request.
289 child_length
+= sge_length
;
290 req_current_length
+= sge_length
;
291 if (req_current_length
< req
->payload_size
) {
292 next_sge_fn(sgl_cb_arg
, (void **)&address
, &sge_length
);
295 * If the next SGE is not page aligned, we will need to create a child
296 * request for what we have so far, and then start a new child request for
299 start_valid
= _is_page_aligned(address
, page_size
);
302 if (start_valid
&& end_valid
&& !last_sge
) {
307 * We need to create a split here. Send what we have accumulated so far as a child
308 * request. Checking if child_equals_parent allows us to *not* create a child request
309 * when no splitting is required - in that case we will fall-through and just create
310 * a single request with no children for the entire I/O.
312 if (!child_equals_parent
) {
313 struct nvme_request
*child
;
314 uint32_t child_lba_count
;
316 if ((child_length
% ns
->extended_lba_size
) != 0) {
317 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
318 child_length
, ns
->extended_lba_size
);
321 child_lba_count
= child_length
/ ns
->extended_lba_size
;
323 * Note the last parameter is set to "false" - this tells the recursive
324 * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
325 * since we have already verified it here.
327 child
= _nvme_add_child_request(ns
, qpair
, payload
, payload_offset
, md_offset
,
328 child_lba
, child_lba_count
,
329 cb_fn
, cb_arg
, opc
, io_flags
,
330 apptag_mask
, apptag
, req
, false);
334 payload_offset
+= child_length
;
335 md_offset
+= child_lba_count
* ns
->md_size
;
336 child_lba
+= child_lba_count
;
341 if (child_length
== req
->payload_size
) {
342 /* No splitting was required, so setup the whole payload as one request. */
343 _nvme_ns_cmd_setup_request(ns
, req
, opc
, lba
, lba_count
, io_flags
, apptag_mask
, apptag
);
349 static struct nvme_request
*
350 _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns
*ns
,
351 struct spdk_nvme_qpair
*qpair
,
352 const struct nvme_payload
*payload
,
353 uint32_t payload_offset
, uint32_t md_offset
,
354 uint64_t lba
, uint32_t lba_count
,
355 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
, uint32_t opc
,
356 uint32_t io_flags
, struct nvme_request
*req
,
357 uint16_t apptag_mask
, uint16_t apptag
)
359 spdk_nvme_req_reset_sgl_cb reset_sgl_fn
= req
->payload
.reset_sgl_fn
;
360 spdk_nvme_req_next_sge_cb next_sge_fn
= req
->payload
.next_sge_fn
;
361 void *sgl_cb_arg
= req
->payload
.contig_or_cb_arg
;
362 uint64_t child_lba
= lba
;
363 uint32_t req_current_length
= 0;
364 uint32_t child_length
= 0;
366 uint16_t max_sges
, num_sges
;
369 max_sges
= ns
->ctrlr
->max_sges
;
371 reset_sgl_fn(sgl_cb_arg
, payload_offset
);
374 while (req_current_length
< req
->payload_size
) {
375 next_sge_fn(sgl_cb_arg
, (void **)&address
, &sge_length
);
377 if (req_current_length
+ sge_length
> req
->payload_size
) {
378 sge_length
= req
->payload_size
- req_current_length
;
381 child_length
+= sge_length
;
382 req_current_length
+= sge_length
;
385 if (num_sges
< max_sges
) {
390 * We need to create a split here. Send what we have accumulated so far as a child
391 * request. Checking if the child equals the full payload allows us to *not*
392 * create a child request when no splitting is required - in that case we will
393 * fall-through and just create a single request with no children for the entire I/O.
395 if (child_length
!= req
->payload_size
) {
396 struct nvme_request
*child
;
397 uint32_t child_lba_count
;
399 if ((child_length
% ns
->extended_lba_size
) != 0) {
400 SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
401 child_length
, ns
->extended_lba_size
);
404 child_lba_count
= child_length
/ ns
->extended_lba_size
;
406 * Note the last parameter is set to "false" - this tells the recursive
407 * call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
408 * since we have already verified it here.
410 child
= _nvme_add_child_request(ns
, qpair
, payload
, payload_offset
, md_offset
,
411 child_lba
, child_lba_count
,
412 cb_fn
, cb_arg
, opc
, io_flags
,
413 apptag_mask
, apptag
, req
, false);
417 payload_offset
+= child_length
;
418 md_offset
+= child_lba_count
* ns
->md_size
;
419 child_lba
+= child_lba_count
;
425 if (child_length
== req
->payload_size
) {
426 /* No splitting was required, so setup the whole payload as one request. */
427 _nvme_ns_cmd_setup_request(ns
, req
, opc
, lba
, lba_count
, io_flags
, apptag_mask
, apptag
);
433 static struct nvme_request
*
434 _nvme_ns_cmd_rw(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
435 const struct nvme_payload
*payload
, uint32_t payload_offset
, uint32_t md_offset
,
436 uint64_t lba
, uint32_t lba_count
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
, uint32_t opc
,
437 uint32_t io_flags
, uint16_t apptag_mask
, uint16_t apptag
, bool check_sgl
)
439 struct nvme_request
*req
;
440 uint32_t sector_size
;
441 uint32_t sectors_per_max_io
;
442 uint32_t sectors_per_stripe
;
444 if (io_flags
& 0xFFFF) {
445 /* The bottom 16 bits must be empty */
446 SPDK_ERRLOG("io_flags 0x%x bottom 16 bits is not empty\n",
451 sector_size
= ns
->extended_lba_size
;
452 sectors_per_max_io
= ns
->sectors_per_max_io
;
453 sectors_per_stripe
= ns
->sectors_per_stripe
;
455 if ((io_flags
& SPDK_NVME_IO_FLAGS_PRACT
) &&
456 (ns
->flags
& SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED
) &&
457 (ns
->flags
& SPDK_NVME_NS_DPS_PI_SUPPORTED
) &&
458 (ns
->md_size
== 8)) {
462 req
= nvme_allocate_request(qpair
, payload
, lba_count
* sector_size
, cb_fn
, cb_arg
);
467 req
->payload_offset
= payload_offset
;
468 req
->md_offset
= md_offset
;
471 * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping.
472 * If this controller defines a stripe boundary and this I/O spans a stripe
473 * boundary, split the request into multiple requests and submit each
474 * separately to hardware.
476 if (sectors_per_stripe
> 0 &&
477 (((lba
& (sectors_per_stripe
- 1)) + lba_count
) > sectors_per_stripe
)) {
479 return _nvme_ns_cmd_split_request(ns
, qpair
, payload
, payload_offset
, md_offset
, lba
, lba_count
,
482 io_flags
, req
, sectors_per_stripe
, sectors_per_stripe
- 1, apptag_mask
, apptag
);
483 } else if (lba_count
> sectors_per_max_io
) {
484 return _nvme_ns_cmd_split_request(ns
, qpair
, payload
, payload_offset
, md_offset
, lba
, lba_count
,
487 io_flags
, req
, sectors_per_max_io
, 0, apptag_mask
, apptag
);
488 } else if (nvme_payload_type(&req
->payload
) == NVME_PAYLOAD_TYPE_SGL
&& check_sgl
) {
489 if (ns
->ctrlr
->flags
& SPDK_NVME_CTRLR_SGL_SUPPORTED
) {
490 return _nvme_ns_cmd_split_request_sgl(ns
, qpair
, payload
, payload_offset
, md_offset
,
491 lba
, lba_count
, cb_fn
, cb_arg
, opc
, io_flags
,
492 req
, apptag_mask
, apptag
);
494 return _nvme_ns_cmd_split_request_prp(ns
, qpair
, payload
, payload_offset
, md_offset
,
495 lba
, lba_count
, cb_fn
, cb_arg
, opc
, io_flags
,
496 req
, apptag_mask
, apptag
);
500 _nvme_ns_cmd_setup_request(ns
, req
, opc
, lba
, lba_count
, io_flags
, apptag_mask
, apptag
);
505 spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
, void *buffer
,
507 uint32_t lba_count
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
,
510 struct nvme_request
*req
;
511 struct nvme_payload payload
;
513 payload
= NVME_PAYLOAD_CONTIG(buffer
, NULL
);
515 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
,
516 SPDK_NVME_OPC_COMPARE
,
520 return nvme_qpair_submit_request(qpair
, req
);
521 } else if (spdk_nvme_ns_check_request_length(lba_count
,
522 ns
->sectors_per_max_io
,
523 ns
->sectors_per_stripe
,
524 qpair
->ctrlr
->opts
.io_queue_requests
)) {
532 spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
536 uint32_t lba_count
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
,
537 uint32_t io_flags
, uint16_t apptag_mask
, uint16_t apptag
)
539 struct nvme_request
*req
;
540 struct nvme_payload payload
;
542 payload
= NVME_PAYLOAD_CONTIG(buffer
, metadata
);
544 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
,
545 SPDK_NVME_OPC_COMPARE
,
547 apptag_mask
, apptag
, true);
549 return nvme_qpair_submit_request(qpair
, req
);
550 } else if (spdk_nvme_ns_check_request_length(lba_count
,
551 ns
->sectors_per_max_io
,
552 ns
->sectors_per_stripe
,
553 qpair
->ctrlr
->opts
.io_queue_requests
)) {
561 spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
562 uint64_t lba
, uint32_t lba_count
,
563 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
, uint32_t io_flags
,
564 spdk_nvme_req_reset_sgl_cb reset_sgl_fn
,
565 spdk_nvme_req_next_sge_cb next_sge_fn
)
567 struct nvme_request
*req
;
568 struct nvme_payload payload
;
570 if (reset_sgl_fn
== NULL
|| next_sge_fn
== NULL
) {
574 payload
= NVME_PAYLOAD_SGL(reset_sgl_fn
, next_sge_fn
, cb_arg
, NULL
);
576 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
,
577 SPDK_NVME_OPC_COMPARE
,
578 io_flags
, 0, 0, true);
580 return nvme_qpair_submit_request(qpair
, req
);
581 } else if (spdk_nvme_ns_check_request_length(lba_count
,
582 ns
->sectors_per_max_io
,
583 ns
->sectors_per_stripe
,
584 qpair
->ctrlr
->opts
.io_queue_requests
)) {
592 spdk_nvme_ns_cmd_read(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
, void *buffer
,
594 uint32_t lba_count
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
,
597 struct nvme_request
*req
;
598 struct nvme_payload payload
;
600 payload
= NVME_PAYLOAD_CONTIG(buffer
, NULL
);
602 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
, SPDK_NVME_OPC_READ
,
606 return nvme_qpair_submit_request(qpair
, req
);
607 } else if (spdk_nvme_ns_check_request_length(lba_count
,
608 ns
->sectors_per_max_io
,
609 ns
->sectors_per_stripe
,
610 qpair
->ctrlr
->opts
.io_queue_requests
)) {
618 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
, void *buffer
,
621 uint32_t lba_count
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
,
622 uint32_t io_flags
, uint16_t apptag_mask
, uint16_t apptag
)
624 struct nvme_request
*req
;
625 struct nvme_payload payload
;
627 payload
= NVME_PAYLOAD_CONTIG(buffer
, metadata
);
629 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
, SPDK_NVME_OPC_READ
,
631 apptag_mask
, apptag
, true);
633 return nvme_qpair_submit_request(qpair
, req
);
634 } else if (spdk_nvme_ns_check_request_length(lba_count
,
635 ns
->sectors_per_max_io
,
636 ns
->sectors_per_stripe
,
637 qpair
->ctrlr
->opts
.io_queue_requests
)) {
645 spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
646 uint64_t lba
, uint32_t lba_count
,
647 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
, uint32_t io_flags
,
648 spdk_nvme_req_reset_sgl_cb reset_sgl_fn
,
649 spdk_nvme_req_next_sge_cb next_sge_fn
)
651 struct nvme_request
*req
;
652 struct nvme_payload payload
;
654 if (reset_sgl_fn
== NULL
|| next_sge_fn
== NULL
) {
658 payload
= NVME_PAYLOAD_SGL(reset_sgl_fn
, next_sge_fn
, cb_arg
, NULL
);
660 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
, SPDK_NVME_OPC_READ
,
661 io_flags
, 0, 0, true);
663 return nvme_qpair_submit_request(qpair
, req
);
664 } else if (spdk_nvme_ns_check_request_length(lba_count
,
665 ns
->sectors_per_max_io
,
666 ns
->sectors_per_stripe
,
667 qpair
->ctrlr
->opts
.io_queue_requests
)) {
675 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
676 uint64_t lba
, uint32_t lba_count
,
677 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
, uint32_t io_flags
,
678 spdk_nvme_req_reset_sgl_cb reset_sgl_fn
,
679 spdk_nvme_req_next_sge_cb next_sge_fn
, void *metadata
,
680 uint16_t apptag_mask
, uint16_t apptag
)
682 struct nvme_request
*req
;
683 struct nvme_payload payload
;
685 if (reset_sgl_fn
== NULL
|| next_sge_fn
== NULL
) {
689 payload
= NVME_PAYLOAD_SGL(reset_sgl_fn
, next_sge_fn
, cb_arg
, metadata
);
691 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
, SPDK_NVME_OPC_READ
,
692 io_flags
, apptag_mask
, apptag
, true);
694 return nvme_qpair_submit_request(qpair
, req
);
695 } else if (spdk_nvme_ns_check_request_length(lba_count
,
696 ns
->sectors_per_max_io
,
697 ns
->sectors_per_stripe
,
698 qpair
->ctrlr
->opts
.io_queue_requests
)) {
706 spdk_nvme_ns_cmd_write(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
707 void *buffer
, uint64_t lba
,
708 uint32_t lba_count
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
,
711 struct nvme_request
*req
;
712 struct nvme_payload payload
;
714 payload
= NVME_PAYLOAD_CONTIG(buffer
, NULL
);
716 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
, SPDK_NVME_OPC_WRITE
,
717 io_flags
, 0, 0, true);
719 return nvme_qpair_submit_request(qpair
, req
);
720 } else if (spdk_nvme_ns_check_request_length(lba_count
,
721 ns
->sectors_per_max_io
,
722 ns
->sectors_per_stripe
,
723 qpair
->ctrlr
->opts
.io_queue_requests
)) {
731 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
732 void *buffer
, void *metadata
, uint64_t lba
,
733 uint32_t lba_count
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
,
734 uint32_t io_flags
, uint16_t apptag_mask
, uint16_t apptag
)
736 struct nvme_request
*req
;
737 struct nvme_payload payload
;
739 payload
= NVME_PAYLOAD_CONTIG(buffer
, metadata
);
741 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
, SPDK_NVME_OPC_WRITE
,
742 io_flags
, apptag_mask
, apptag
, true);
744 return nvme_qpair_submit_request(qpair
, req
);
745 } else if (spdk_nvme_ns_check_request_length(lba_count
,
746 ns
->sectors_per_max_io
,
747 ns
->sectors_per_stripe
,
748 qpair
->ctrlr
->opts
.io_queue_requests
)) {
756 spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
757 uint64_t lba
, uint32_t lba_count
,
758 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
, uint32_t io_flags
,
759 spdk_nvme_req_reset_sgl_cb reset_sgl_fn
,
760 spdk_nvme_req_next_sge_cb next_sge_fn
)
762 struct nvme_request
*req
;
763 struct nvme_payload payload
;
765 if (reset_sgl_fn
== NULL
|| next_sge_fn
== NULL
) {
769 payload
= NVME_PAYLOAD_SGL(reset_sgl_fn
, next_sge_fn
, cb_arg
, NULL
);
771 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
, SPDK_NVME_OPC_WRITE
,
772 io_flags
, 0, 0, true);
774 return nvme_qpair_submit_request(qpair
, req
);
775 } else if (spdk_nvme_ns_check_request_length(lba_count
,
776 ns
->sectors_per_max_io
,
777 ns
->sectors_per_stripe
,
778 qpair
->ctrlr
->opts
.io_queue_requests
)) {
786 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
787 uint64_t lba
, uint32_t lba_count
,
788 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
, uint32_t io_flags
,
789 spdk_nvme_req_reset_sgl_cb reset_sgl_fn
,
790 spdk_nvme_req_next_sge_cb next_sge_fn
, void *metadata
,
791 uint16_t apptag_mask
, uint16_t apptag
)
793 struct nvme_request
*req
;
794 struct nvme_payload payload
;
796 if (reset_sgl_fn
== NULL
|| next_sge_fn
== NULL
) {
800 payload
= NVME_PAYLOAD_SGL(reset_sgl_fn
, next_sge_fn
, cb_arg
, metadata
);
802 req
= _nvme_ns_cmd_rw(ns
, qpair
, &payload
, 0, 0, lba
, lba_count
, cb_fn
, cb_arg
, SPDK_NVME_OPC_WRITE
,
803 io_flags
, apptag_mask
, apptag
, true);
805 return nvme_qpair_submit_request(qpair
, req
);
806 } else if (spdk_nvme_ns_check_request_length(lba_count
,
807 ns
->sectors_per_max_io
,
808 ns
->sectors_per_stripe
,
809 qpair
->ctrlr
->opts
.io_queue_requests
)) {
817 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
818 uint64_t lba
, uint32_t lba_count
,
819 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
,
822 struct nvme_request
*req
;
823 struct spdk_nvme_cmd
*cmd
;
826 if (lba_count
== 0 || lba_count
> UINT16_MAX
+ 1) {
830 req
= nvme_allocate_request_null(qpair
, cb_fn
, cb_arg
);
836 cmd
->opc
= SPDK_NVME_OPC_WRITE_ZEROES
;
839 tmp_lba
= (uint64_t *)&cmd
->cdw10
;
841 cmd
->cdw12
= lba_count
- 1;
842 cmd
->cdw12
|= io_flags
;
844 return nvme_qpair_submit_request(qpair
, req
);
848 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
850 const struct spdk_nvme_dsm_range
*ranges
, uint16_t num_ranges
,
851 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
853 struct nvme_request
*req
;
854 struct spdk_nvme_cmd
*cmd
;
856 if (num_ranges
== 0 || num_ranges
> SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES
) {
860 if (ranges
== NULL
) {
864 req
= nvme_allocate_request_user_copy(qpair
, (void *)ranges
,
865 num_ranges
* sizeof(struct spdk_nvme_dsm_range
),
866 cb_fn
, cb_arg
, true);
872 cmd
->opc
= SPDK_NVME_OPC_DATASET_MANAGEMENT
;
875 cmd
->cdw10
= num_ranges
- 1;
878 return nvme_qpair_submit_request(qpair
, req
);
882 spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns
*ns
, struct spdk_nvme_qpair
*qpair
,
883 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
885 struct nvme_request
*req
;
886 struct spdk_nvme_cmd
*cmd
;
888 req
= nvme_allocate_request_null(qpair
, cb_fn
, cb_arg
);
894 cmd
->opc
= SPDK_NVME_OPC_FLUSH
;
897 return nvme_qpair_submit_request(qpair
, req
);
901 spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns
*ns
,
902 struct spdk_nvme_qpair
*qpair
,
903 struct spdk_nvme_reservation_register_data
*payload
,
905 enum spdk_nvme_reservation_register_action action
,
906 enum spdk_nvme_reservation_register_cptpl cptpl
,
907 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
909 struct nvme_request
*req
;
910 struct spdk_nvme_cmd
*cmd
;
912 req
= nvme_allocate_request_user_copy(qpair
,
913 payload
, sizeof(struct spdk_nvme_reservation_register_data
),
914 cb_fn
, cb_arg
, true);
920 cmd
->opc
= SPDK_NVME_OPC_RESERVATION_REGISTER
;
926 cmd
->cdw10
|= ignore_key
? 1 << 3 : 0;
928 cmd
->cdw10
|= (uint32_t)cptpl
<< 30;
930 return nvme_qpair_submit_request(qpair
, req
);
934 spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns
*ns
,
935 struct spdk_nvme_qpair
*qpair
,
936 struct spdk_nvme_reservation_key_data
*payload
,
938 enum spdk_nvme_reservation_release_action action
,
939 enum spdk_nvme_reservation_type type
,
940 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
942 struct nvme_request
*req
;
943 struct spdk_nvme_cmd
*cmd
;
945 req
= nvme_allocate_request_user_copy(qpair
,
946 payload
, sizeof(struct spdk_nvme_reservation_key_data
), cb_fn
,
953 cmd
->opc
= SPDK_NVME_OPC_RESERVATION_RELEASE
;
959 cmd
->cdw10
|= ignore_key
? 1 << 3 : 0;
961 cmd
->cdw10
|= (uint32_t)type
<< 8;
963 return nvme_qpair_submit_request(qpair
, req
);
967 spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns
*ns
,
968 struct spdk_nvme_qpair
*qpair
,
969 struct spdk_nvme_reservation_acquire_data
*payload
,
971 enum spdk_nvme_reservation_acquire_action action
,
972 enum spdk_nvme_reservation_type type
,
973 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
975 struct nvme_request
*req
;
976 struct spdk_nvme_cmd
*cmd
;
978 req
= nvme_allocate_request_user_copy(qpair
,
979 payload
, sizeof(struct spdk_nvme_reservation_acquire_data
),
980 cb_fn
, cb_arg
, true);
986 cmd
->opc
= SPDK_NVME_OPC_RESERVATION_ACQUIRE
;
992 cmd
->cdw10
|= ignore_key
? 1 << 3 : 0;
994 cmd
->cdw10
|= (uint32_t)type
<< 8;
996 return nvme_qpair_submit_request(qpair
, req
);
1000 spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns
*ns
,
1001 struct spdk_nvme_qpair
*qpair
,
1002 void *payload
, uint32_t len
,
1003 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
1005 uint32_t num_dwords
;
1006 struct nvme_request
*req
;
1007 struct spdk_nvme_cmd
*cmd
;
1012 num_dwords
= len
/ 4;
1014 req
= nvme_allocate_request_user_copy(qpair
, payload
, len
, cb_fn
, cb_arg
, false);
1020 cmd
->opc
= SPDK_NVME_OPC_RESERVATION_REPORT
;
1023 cmd
->cdw10
= num_dwords
;
1025 return nvme_qpair_submit_request(qpair
, req
);