]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/lib/nvmf/ctrlr_bdev.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / lib / nvmf / ctrlr_bdev.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "spdk/stdinc.h"
35
36 #include "nvmf_internal.h"
37
38 #include "spdk/bdev.h"
39 #include "spdk/endian.h"
40 #include "spdk/thread.h"
41 #include "spdk/likely.h"
42 #include "spdk/nvme.h"
43 #include "spdk/nvmf_spec.h"
44 #include "spdk/trace.h"
45 #include "spdk/scsi_spec.h"
46 #include "spdk/string.h"
47 #include "spdk/util.h"
48
49 #include "spdk_internal/log.h"
50
51 static bool
52 spdk_nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem,
53 enum spdk_bdev_io_type io_type)
54 {
55 struct spdk_nvmf_ns *ns;
56
57 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
58 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
59 if (ns->bdev == NULL) {
60 continue;
61 }
62
63 if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) {
64 SPDK_DEBUGLOG(SPDK_LOG_NVMF,
65 "Subsystem %s namespace %u (%s) does not support io_type %d\n",
66 spdk_nvmf_subsystem_get_nqn(subsystem),
67 ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type);
68 return false;
69 }
70 }
71
72 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "All devices in Subsystem %s support io_type %d\n",
73 spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type);
74 return true;
75 }
76
77 bool
78 spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr)
79 {
80 return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP);
81 }
82
83 bool
84 spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr)
85 {
86 return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
87 }
88
89 static void
90 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
91 void *cb_arg)
92 {
93 struct spdk_nvmf_request *req = cb_arg;
94 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
95 int sc, sct;
96
97 spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
98 response->status.sc = sc;
99 response->status.sct = sct;
100
101 spdk_nvmf_request_complete(req);
102 spdk_bdev_free_io(bdev_io);
103 }
104
105 void
106 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata)
107 {
108 struct spdk_bdev *bdev = ns->bdev;
109 uint64_t num_blocks;
110
111 num_blocks = spdk_bdev_get_num_blocks(bdev);
112
113 nsdata->nsze = num_blocks;
114 nsdata->ncap = num_blocks;
115 nsdata->nuse = num_blocks;
116 nsdata->nlbaf = 0;
117 nsdata->flbas.format = 0;
118 nsdata->lbaf[0].ms = spdk_bdev_get_md_size(bdev);
119 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev));
120 if (nsdata->lbaf[0].ms != 0) {
121 nsdata->flbas.extended = 1;
122 nsdata->mc.extended = 1;
123 nsdata->mc.pointer = 0;
124 nsdata->dps.md_start = spdk_bdev_is_dif_head_of_md(bdev);
125
126 switch (spdk_bdev_get_dif_type(bdev)) {
127 case SPDK_DIF_TYPE1:
128 nsdata->dpc.pit1 = 1;
129 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE1;
130 break;
131 case SPDK_DIF_TYPE2:
132 nsdata->dpc.pit2 = 1;
133 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE2;
134 break;
135 case SPDK_DIF_TYPE3:
136 nsdata->dpc.pit3 = 1;
137 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE3;
138 break;
139 default:
140 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Protection Disabled\n");
141 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE;
142 break;
143 }
144 }
145 nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev);
146 nsdata->nmic.can_share = 1;
147 nsdata->nsrescap.rescap.persist = 0; /* TODO: don't support for now */
148 nsdata->nsrescap.rescap.write_exclusive = 1;
149 nsdata->nsrescap.rescap.exclusive_access = 1;
150 nsdata->nsrescap.rescap.write_exclusive_reg_only = 1;
151 nsdata->nsrescap.rescap.exclusive_access_reg_only = 1;
152 nsdata->nsrescap.rescap.write_exclusive_all_reg = 1;
153 nsdata->nsrescap.rescap.exclusive_access_all_reg = 1;
154 nsdata->nsrescap.rescap.ignore_existing_key = 1;
155
156 SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch");
157 memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid));
158
159 SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch");
160 memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64));
161 }
162
163 static void
164 nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba,
165 uint64_t *num_blocks)
166 {
167 /* SLBA: CDW10 and CDW11 */
168 *start_lba = from_le64(&cmd->cdw10);
169
170 /* NLB: CDW12 bits 15:00, 0's based */
171 *num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1;
172 }
173
174 static bool
175 nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba,
176 uint64_t io_num_blocks)
177 {
178 if (io_start_lba + io_num_blocks > bdev_num_blocks ||
179 io_start_lba + io_num_blocks < io_start_lba) {
180 return false;
181 }
182
183 return true;
184 }
185
186 static void
187 spdk_nvmf_ctrlr_process_io_cmd_resubmit(void *arg)
188 {
189 struct spdk_nvmf_request *req = arg;
190
191 spdk_nvmf_ctrlr_process_io_cmd(req);
192 }
193
194 static void
195 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev,
196 struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg)
197 {
198 int rc;
199
200 req->bdev_io_wait.bdev = bdev;
201 req->bdev_io_wait.cb_fn = cb_fn;
202 req->bdev_io_wait.cb_arg = cb_arg;
203
204 rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait);
205 if (rc != 0) {
206 assert(false);
207 }
208 }
209
210 int
211 spdk_nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
212 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
213 {
214 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
215 uint32_t block_size = spdk_bdev_get_block_size(bdev);
216 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
217 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
218 uint64_t start_lba;
219 uint64_t num_blocks;
220 int rc;
221
222 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
223
224 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
225 SPDK_ERRLOG("end of media\n");
226 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
227 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
228 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
229 }
230
231 if (spdk_unlikely(num_blocks * block_size > req->length)) {
232 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
233 num_blocks, block_size, req->length);
234 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
235 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
236 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
237 }
238
239 rc = spdk_bdev_readv_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
240 nvmf_bdev_ctrlr_complete_cmd, req);
241 if (spdk_unlikely(rc)) {
242 if (rc == -ENOMEM) {
243 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
244 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
245 }
246 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
247 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
248 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
249 }
250
251 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
252 }
253
254 int
255 spdk_nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
256 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
257 {
258 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
259 uint32_t block_size = spdk_bdev_get_block_size(bdev);
260 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
261 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
262 uint64_t start_lba;
263 uint64_t num_blocks;
264 int rc;
265
266 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
267
268 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
269 SPDK_ERRLOG("end of media\n");
270 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
271 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
272 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
273 }
274
275 if (spdk_unlikely(num_blocks * block_size > req->length)) {
276 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
277 num_blocks, block_size, req->length);
278 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
279 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
280 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
281 }
282
283 rc = spdk_bdev_writev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
284 nvmf_bdev_ctrlr_complete_cmd, req);
285 if (spdk_unlikely(rc)) {
286 if (rc == -ENOMEM) {
287 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
288 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
289 }
290 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
291 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
292 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
293 }
294
295 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
296 }
297
298 int
299 spdk_nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
300 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
301 {
302 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
303 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
304 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
305 uint64_t start_lba;
306 uint64_t num_blocks;
307 int rc;
308
309 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
310
311 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
312 SPDK_ERRLOG("end of media\n");
313 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
314 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
315 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
316 }
317
318 rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks,
319 nvmf_bdev_ctrlr_complete_cmd, req);
320 if (spdk_unlikely(rc)) {
321 if (rc == -ENOMEM) {
322 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
323 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
324 }
325 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
326 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
327 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
328 }
329
330 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
331 }
332
333 int
334 spdk_nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
335 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
336 {
337 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
338 int rc;
339
340 /* As for NVMeoF controller, SPDK always set volatile write
341 * cache bit to 1, return success for those block devices
342 * which can't support FLUSH command.
343 */
344 if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) {
345 response->status.sct = SPDK_NVME_SCT_GENERIC;
346 response->status.sc = SPDK_NVME_SC_SUCCESS;
347 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
348 }
349
350 rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev),
351 nvmf_bdev_ctrlr_complete_cmd, req);
352 if (spdk_unlikely(rc)) {
353 if (rc == -ENOMEM) {
354 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
355 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
356 }
357 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
358 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
359 }
360 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
361 }
362
363 struct nvmf_bdev_ctrlr_unmap {
364 struct spdk_nvmf_request *req;
365 uint32_t count;
366 struct spdk_bdev_desc *desc;
367 struct spdk_bdev *bdev;
368 struct spdk_io_channel *ch;
369 uint32_t range_index;
370 };
371
372 static void
373 nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success,
374 void *cb_arg)
375 {
376 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg;
377 struct spdk_nvmf_request *req = unmap_ctx->req;
378 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
379 int sc, sct;
380
381 unmap_ctx->count--;
382
383 if (response->status.sct == SPDK_NVME_SCT_GENERIC &&
384 response->status.sc == SPDK_NVME_SC_SUCCESS) {
385 spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
386 response->status.sc = sc;
387 response->status.sct = sct;
388 }
389
390 if (unmap_ctx->count == 0) {
391 spdk_nvmf_request_complete(req);
392 free(unmap_ctx);
393 }
394 spdk_bdev_free_io(bdev_io);
395 }
396
397 static int
398 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
399 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
400 struct nvmf_bdev_ctrlr_unmap *unmap_ctx);
401 static void
402 nvmf_bdev_ctrlr_unmap_resubmit(void *arg)
403 {
404 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg;
405 struct spdk_nvmf_request *req = unmap_ctx->req;
406 struct spdk_bdev_desc *desc = unmap_ctx->desc;
407 struct spdk_bdev *bdev = unmap_ctx->bdev;
408 struct spdk_io_channel *ch = unmap_ctx->ch;
409
410 nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx);
411 }
412
413 static int
414 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
415 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
416 struct nvmf_bdev_ctrlr_unmap *unmap_ctx)
417 {
418 uint16_t nr, i;
419 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
420 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
421 struct spdk_nvme_dsm_range *dsm_range;
422 uint64_t lba;
423 uint32_t lba_count;
424 int rc;
425
426 nr = ((cmd->cdw10 & 0x000000ff) + 1);
427 if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) {
428 SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n");
429 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
430 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
431 }
432
433 if (unmap_ctx == NULL) {
434 unmap_ctx = calloc(1, sizeof(*unmap_ctx));
435 if (!unmap_ctx) {
436 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
437 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
438 }
439
440 unmap_ctx->req = req;
441 unmap_ctx->desc = desc;
442 unmap_ctx->ch = ch;
443 unmap_ctx->bdev = bdev;
444
445 response->status.sct = SPDK_NVME_SCT_GENERIC;
446 response->status.sc = SPDK_NVME_SC_SUCCESS;
447 } else {
448 unmap_ctx->count--; /* dequeued */
449 }
450
451 dsm_range = (struct spdk_nvme_dsm_range *)req->data;
452 for (i = unmap_ctx->range_index; i < nr; i++) {
453 lba = dsm_range[i].starting_lba;
454 lba_count = dsm_range[i].length;
455
456 unmap_ctx->count++;
457
458 rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count,
459 nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx);
460 if (rc) {
461 if (rc == -ENOMEM) {
462 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx);
463 /* Unmap was not yet submitted to bdev */
464 /* unmap_ctx->count will be decremented when the request is dequeued */
465 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
466 }
467 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
468 unmap_ctx->count--;
469 /* We can't return here - we may have to wait for any other
470 * unmaps already sent to complete */
471 break;
472 }
473 unmap_ctx->range_index++;
474 }
475
476 if (unmap_ctx->count == 0) {
477 free(unmap_ctx);
478 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
479 }
480
481 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
482 }
483
484 int
485 spdk_nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
486 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
487 {
488 uint32_t attribute;
489 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
490 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
491
492 attribute = cmd->cdw11 & 0x00000007;
493 if (attribute & SPDK_NVME_DSM_ATTR_DEALLOCATE) {
494 return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL);
495 }
496
497 response->status.sct = SPDK_NVME_SCT_GENERIC;
498 response->status.sc = SPDK_NVME_SC_SUCCESS;
499 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
500 }
501
502 int
503 spdk_nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
504 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
505 {
506 int rc;
507
508 rc = spdk_bdev_nvme_io_passthru(desc, ch, &req->cmd->nvme_cmd, req->data, req->length,
509 nvmf_bdev_ctrlr_complete_cmd, req);
510 if (spdk_unlikely(rc)) {
511 if (rc == -ENOMEM) {
512 nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
513 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
514 }
515 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
516 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
517 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
518 }
519
520 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
521 }