4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk/stdinc.h"
36 #include "nvmf_internal.h"
38 #include "spdk/bdev.h"
39 #include "spdk/endian.h"
40 #include "spdk/thread.h"
41 #include "spdk/likely.h"
42 #include "spdk/nvme.h"
43 #include "spdk/nvmf_spec.h"
44 #include "spdk/trace.h"
45 #include "spdk/scsi_spec.h"
46 #include "spdk/string.h"
47 #include "spdk/util.h"
49 #include "spdk_internal/log.h"
52 spdk_nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem
*subsystem
,
53 enum spdk_bdev_io_type io_type
)
55 struct spdk_nvmf_ns
*ns
;
57 for (ns
= spdk_nvmf_subsystem_get_first_ns(subsystem
); ns
!= NULL
;
58 ns
= spdk_nvmf_subsystem_get_next_ns(subsystem
, ns
)) {
59 if (ns
->bdev
== NULL
) {
63 if (!spdk_bdev_io_type_supported(ns
->bdev
, io_type
)) {
64 SPDK_DEBUGLOG(SPDK_LOG_NVMF
,
65 "Subsystem %s namespace %u (%s) does not support io_type %d\n",
66 spdk_nvmf_subsystem_get_nqn(subsystem
),
67 ns
->opts
.nsid
, spdk_bdev_get_name(ns
->bdev
), (int)io_type
);
72 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "All devices in Subsystem %s support io_type %d\n",
73 spdk_nvmf_subsystem_get_nqn(subsystem
), (int)io_type
);
78 spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr
*ctrlr
)
80 return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr
->subsys
, SPDK_BDEV_IO_TYPE_UNMAP
);
84 spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr
*ctrlr
)
86 return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr
->subsys
, SPDK_BDEV_IO_TYPE_WRITE_ZEROES
);
90 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io
*bdev_io
, bool success
,
93 struct spdk_nvmf_request
*req
= cb_arg
;
94 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
97 spdk_bdev_io_get_nvme_status(bdev_io
, &sct
, &sc
);
98 response
->status
.sc
= sc
;
99 response
->status
.sct
= sct
;
101 spdk_nvmf_request_complete(req
);
102 spdk_bdev_free_io(bdev_io
);
106 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns
*ns
, struct spdk_nvme_ns_data
*nsdata
)
108 struct spdk_bdev
*bdev
= ns
->bdev
;
111 num_blocks
= spdk_bdev_get_num_blocks(bdev
);
113 nsdata
->nsze
= num_blocks
;
114 nsdata
->ncap
= num_blocks
;
115 nsdata
->nuse
= num_blocks
;
117 nsdata
->flbas
.format
= 0;
118 nsdata
->lbaf
[0].ms
= spdk_bdev_get_md_size(bdev
);
119 nsdata
->lbaf
[0].lbads
= spdk_u32log2(spdk_bdev_get_block_size(bdev
));
120 if (nsdata
->lbaf
[0].ms
!= 0) {
121 nsdata
->flbas
.extended
= 1;
122 nsdata
->mc
.extended
= 1;
123 nsdata
->mc
.pointer
= 0;
124 nsdata
->dps
.md_start
= spdk_bdev_is_dif_head_of_md(bdev
);
126 switch (spdk_bdev_get_dif_type(bdev
)) {
128 nsdata
->dpc
.pit1
= 1;
129 nsdata
->dps
.pit
= SPDK_NVME_FMT_NVM_PROTECTION_TYPE1
;
132 nsdata
->dpc
.pit2
= 1;
133 nsdata
->dps
.pit
= SPDK_NVME_FMT_NVM_PROTECTION_TYPE2
;
136 nsdata
->dpc
.pit3
= 1;
137 nsdata
->dps
.pit
= SPDK_NVME_FMT_NVM_PROTECTION_TYPE3
;
140 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Protection Disabled\n");
141 nsdata
->dps
.pit
= SPDK_NVME_FMT_NVM_PROTECTION_DISABLE
;
145 nsdata
->noiob
= spdk_bdev_get_optimal_io_boundary(bdev
);
146 nsdata
->nmic
.can_share
= 1;
147 nsdata
->nsrescap
.rescap
.persist
= 0; /* TODO: don't support for now */
148 nsdata
->nsrescap
.rescap
.write_exclusive
= 1;
149 nsdata
->nsrescap
.rescap
.exclusive_access
= 1;
150 nsdata
->nsrescap
.rescap
.write_exclusive_reg_only
= 1;
151 nsdata
->nsrescap
.rescap
.exclusive_access_reg_only
= 1;
152 nsdata
->nsrescap
.rescap
.write_exclusive_all_reg
= 1;
153 nsdata
->nsrescap
.rescap
.exclusive_access_all_reg
= 1;
154 nsdata
->nsrescap
.rescap
.ignore_existing_key
= 1;
156 SPDK_STATIC_ASSERT(sizeof(nsdata
->nguid
) == sizeof(ns
->opts
.nguid
), "size mismatch");
157 memcpy(nsdata
->nguid
, ns
->opts
.nguid
, sizeof(nsdata
->nguid
));
159 SPDK_STATIC_ASSERT(sizeof(nsdata
->eui64
) == sizeof(ns
->opts
.eui64
), "size mismatch");
160 memcpy(&nsdata
->eui64
, ns
->opts
.eui64
, sizeof(nsdata
->eui64
));
164 nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd
*cmd
, uint64_t *start_lba
,
165 uint64_t *num_blocks
)
167 /* SLBA: CDW10 and CDW11 */
168 *start_lba
= from_le64(&cmd
->cdw10
);
170 /* NLB: CDW12 bits 15:00, 0's based */
171 *num_blocks
= (from_le32(&cmd
->cdw12
) & 0xFFFFu
) + 1;
175 nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks
, uint64_t io_start_lba
,
176 uint64_t io_num_blocks
)
178 if (io_start_lba
+ io_num_blocks
> bdev_num_blocks
||
179 io_start_lba
+ io_num_blocks
< io_start_lba
) {
187 spdk_nvmf_ctrlr_process_io_cmd_resubmit(void *arg
)
189 struct spdk_nvmf_request
*req
= arg
;
191 spdk_nvmf_ctrlr_process_io_cmd(req
);
195 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request
*req
, struct spdk_bdev
*bdev
,
196 struct spdk_io_channel
*ch
, spdk_bdev_io_wait_cb cb_fn
, void *cb_arg
)
200 req
->bdev_io_wait
.bdev
= bdev
;
201 req
->bdev_io_wait
.cb_fn
= cb_fn
;
202 req
->bdev_io_wait
.cb_arg
= cb_arg
;
204 rc
= spdk_bdev_queue_io_wait(bdev
, ch
, &req
->bdev_io_wait
);
211 spdk_nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev
*bdev
, struct spdk_bdev_desc
*desc
,
212 struct spdk_io_channel
*ch
, struct spdk_nvmf_request
*req
)
214 uint64_t bdev_num_blocks
= spdk_bdev_get_num_blocks(bdev
);
215 uint32_t block_size
= spdk_bdev_get_block_size(bdev
);
216 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
217 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
222 nvmf_bdev_ctrlr_get_rw_params(cmd
, &start_lba
, &num_blocks
);
224 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks
, start_lba
, num_blocks
))) {
225 SPDK_ERRLOG("end of media\n");
226 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
227 rsp
->status
.sc
= SPDK_NVME_SC_LBA_OUT_OF_RANGE
;
228 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
231 if (spdk_unlikely(num_blocks
* block_size
> req
->length
)) {
232 SPDK_ERRLOG("Read NLB %" PRIu64
" * block size %" PRIu32
" > SGL length %" PRIu32
"\n",
233 num_blocks
, block_size
, req
->length
);
234 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
235 rsp
->status
.sc
= SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID
;
236 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
239 rc
= spdk_bdev_readv_blocks(desc
, ch
, req
->iov
, req
->iovcnt
, start_lba
, num_blocks
,
240 nvmf_bdev_ctrlr_complete_cmd
, req
);
241 if (spdk_unlikely(rc
)) {
243 nvmf_bdev_ctrl_queue_io(req
, bdev
, ch
, spdk_nvmf_ctrlr_process_io_cmd_resubmit
, req
);
244 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
246 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
247 rsp
->status
.sc
= SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
;
248 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
251 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
255 spdk_nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev
*bdev
, struct spdk_bdev_desc
*desc
,
256 struct spdk_io_channel
*ch
, struct spdk_nvmf_request
*req
)
258 uint64_t bdev_num_blocks
= spdk_bdev_get_num_blocks(bdev
);
259 uint32_t block_size
= spdk_bdev_get_block_size(bdev
);
260 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
261 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
266 nvmf_bdev_ctrlr_get_rw_params(cmd
, &start_lba
, &num_blocks
);
268 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks
, start_lba
, num_blocks
))) {
269 SPDK_ERRLOG("end of media\n");
270 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
271 rsp
->status
.sc
= SPDK_NVME_SC_LBA_OUT_OF_RANGE
;
272 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
275 if (spdk_unlikely(num_blocks
* block_size
> req
->length
)) {
276 SPDK_ERRLOG("Write NLB %" PRIu64
" * block size %" PRIu32
" > SGL length %" PRIu32
"\n",
277 num_blocks
, block_size
, req
->length
);
278 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
279 rsp
->status
.sc
= SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID
;
280 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
283 rc
= spdk_bdev_writev_blocks(desc
, ch
, req
->iov
, req
->iovcnt
, start_lba
, num_blocks
,
284 nvmf_bdev_ctrlr_complete_cmd
, req
);
285 if (spdk_unlikely(rc
)) {
287 nvmf_bdev_ctrl_queue_io(req
, bdev
, ch
, spdk_nvmf_ctrlr_process_io_cmd_resubmit
, req
);
288 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
290 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
291 rsp
->status
.sc
= SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
;
292 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
295 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
299 spdk_nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev
*bdev
, struct spdk_bdev_desc
*desc
,
300 struct spdk_io_channel
*ch
, struct spdk_nvmf_request
*req
)
302 uint64_t bdev_num_blocks
= spdk_bdev_get_num_blocks(bdev
);
303 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
304 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
309 nvmf_bdev_ctrlr_get_rw_params(cmd
, &start_lba
, &num_blocks
);
311 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks
, start_lba
, num_blocks
))) {
312 SPDK_ERRLOG("end of media\n");
313 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
314 rsp
->status
.sc
= SPDK_NVME_SC_LBA_OUT_OF_RANGE
;
315 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
318 rc
= spdk_bdev_write_zeroes_blocks(desc
, ch
, start_lba
, num_blocks
,
319 nvmf_bdev_ctrlr_complete_cmd
, req
);
320 if (spdk_unlikely(rc
)) {
322 nvmf_bdev_ctrl_queue_io(req
, bdev
, ch
, spdk_nvmf_ctrlr_process_io_cmd_resubmit
, req
);
323 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
325 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
326 rsp
->status
.sc
= SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
;
327 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
330 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
334 spdk_nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev
*bdev
, struct spdk_bdev_desc
*desc
,
335 struct spdk_io_channel
*ch
, struct spdk_nvmf_request
*req
)
337 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
340 /* As for NVMeoF controller, SPDK always set volatile write
341 * cache bit to 1, return success for those block devices
342 * which can't support FLUSH command.
344 if (!spdk_bdev_io_type_supported(bdev
, SPDK_BDEV_IO_TYPE_FLUSH
)) {
345 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
346 response
->status
.sc
= SPDK_NVME_SC_SUCCESS
;
347 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
350 rc
= spdk_bdev_flush_blocks(desc
, ch
, 0, spdk_bdev_get_num_blocks(bdev
),
351 nvmf_bdev_ctrlr_complete_cmd
, req
);
352 if (spdk_unlikely(rc
)) {
354 nvmf_bdev_ctrl_queue_io(req
, bdev
, ch
, spdk_nvmf_ctrlr_process_io_cmd_resubmit
, req
);
355 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
357 response
->status
.sc
= SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
;
358 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
360 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
363 struct nvmf_bdev_ctrlr_unmap
{
364 struct spdk_nvmf_request
*req
;
366 struct spdk_bdev_desc
*desc
;
367 struct spdk_bdev
*bdev
;
368 struct spdk_io_channel
*ch
;
369 uint32_t range_index
;
373 nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io
*bdev_io
, bool success
,
376 struct nvmf_bdev_ctrlr_unmap
*unmap_ctx
= cb_arg
;
377 struct spdk_nvmf_request
*req
= unmap_ctx
->req
;
378 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
383 if (response
->status
.sct
== SPDK_NVME_SCT_GENERIC
&&
384 response
->status
.sc
== SPDK_NVME_SC_SUCCESS
) {
385 spdk_bdev_io_get_nvme_status(bdev_io
, &sct
, &sc
);
386 response
->status
.sc
= sc
;
387 response
->status
.sct
= sct
;
390 if (unmap_ctx
->count
== 0) {
391 spdk_nvmf_request_complete(req
);
394 spdk_bdev_free_io(bdev_io
);
398 nvmf_bdev_ctrlr_unmap(struct spdk_bdev
*bdev
, struct spdk_bdev_desc
*desc
,
399 struct spdk_io_channel
*ch
, struct spdk_nvmf_request
*req
,
400 struct nvmf_bdev_ctrlr_unmap
*unmap_ctx
);
402 nvmf_bdev_ctrlr_unmap_resubmit(void *arg
)
404 struct nvmf_bdev_ctrlr_unmap
*unmap_ctx
= arg
;
405 struct spdk_nvmf_request
*req
= unmap_ctx
->req
;
406 struct spdk_bdev_desc
*desc
= unmap_ctx
->desc
;
407 struct spdk_bdev
*bdev
= unmap_ctx
->bdev
;
408 struct spdk_io_channel
*ch
= unmap_ctx
->ch
;
410 nvmf_bdev_ctrlr_unmap(bdev
, desc
, ch
, req
, unmap_ctx
);
414 nvmf_bdev_ctrlr_unmap(struct spdk_bdev
*bdev
, struct spdk_bdev_desc
*desc
,
415 struct spdk_io_channel
*ch
, struct spdk_nvmf_request
*req
,
416 struct nvmf_bdev_ctrlr_unmap
*unmap_ctx
)
419 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
420 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
421 struct spdk_nvme_dsm_range
*dsm_range
;
426 nr
= ((cmd
->cdw10
& 0x000000ff) + 1);
427 if (nr
* sizeof(struct spdk_nvme_dsm_range
) > req
->length
) {
428 SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n");
429 response
->status
.sc
= SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID
;
430 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
433 if (unmap_ctx
== NULL
) {
434 unmap_ctx
= calloc(1, sizeof(*unmap_ctx
));
436 response
->status
.sc
= SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
;
437 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
440 unmap_ctx
->req
= req
;
441 unmap_ctx
->desc
= desc
;
443 unmap_ctx
->bdev
= bdev
;
445 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
446 response
->status
.sc
= SPDK_NVME_SC_SUCCESS
;
448 unmap_ctx
->count
--; /* dequeued */
451 dsm_range
= (struct spdk_nvme_dsm_range
*)req
->data
;
452 for (i
= unmap_ctx
->range_index
; i
< nr
; i
++) {
453 lba
= dsm_range
[i
].starting_lba
;
454 lba_count
= dsm_range
[i
].length
;
458 rc
= spdk_bdev_unmap_blocks(desc
, ch
, lba
, lba_count
,
459 nvmf_bdev_ctrlr_unmap_cpl
, unmap_ctx
);
462 nvmf_bdev_ctrl_queue_io(req
, bdev
, ch
, nvmf_bdev_ctrlr_unmap_resubmit
, unmap_ctx
);
463 /* Unmap was not yet submitted to bdev */
464 /* unmap_ctx->count will be decremented when the request is dequeued */
465 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
467 response
->status
.sc
= SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
;
469 /* We can't return here - we may have to wait for any other
470 * unmaps already sent to complete */
473 unmap_ctx
->range_index
++;
476 if (unmap_ctx
->count
== 0) {
478 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
481 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
485 spdk_nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev
*bdev
, struct spdk_bdev_desc
*desc
,
486 struct spdk_io_channel
*ch
, struct spdk_nvmf_request
*req
)
489 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
490 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
492 attribute
= cmd
->cdw11
& 0x00000007;
493 if (attribute
& SPDK_NVME_DSM_ATTR_DEALLOCATE
) {
494 return nvmf_bdev_ctrlr_unmap(bdev
, desc
, ch
, req
, NULL
);
497 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
498 response
->status
.sc
= SPDK_NVME_SC_SUCCESS
;
499 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
503 spdk_nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev
*bdev
, struct spdk_bdev_desc
*desc
,
504 struct spdk_io_channel
*ch
, struct spdk_nvmf_request
*req
)
508 rc
= spdk_bdev_nvme_io_passthru(desc
, ch
, &req
->cmd
->nvme_cmd
, req
->data
, req
->length
,
509 nvmf_bdev_ctrlr_complete_cmd
, req
);
510 if (spdk_unlikely(rc
)) {
512 nvmf_bdev_ctrl_queue_io(req
, bdev
, ch
, spdk_nvmf_ctrlr_process_io_cmd_resubmit
, req
);
513 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
515 req
->rsp
->nvme_cpl
.status
.sct
= SPDK_NVME_SCT_GENERIC
;
516 req
->rsp
->nvme_cpl
.status
.sc
= SPDK_NVME_SC_INVALID_OPCODE
;
517 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
520 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;