]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/lib/nvmf/ctrlr_bdev.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / lib / nvmf / ctrlr_bdev.c
CommitLineData
11fdf7f2
TL
1/*-
2 * BSD LICENSE
3 *
f67539c2
TL
4 * Copyright (c) Intel Corporation. All rights reserved.
5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
11fdf7f2
TL
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "spdk/stdinc.h"
35
36#include "nvmf_internal.h"
37
38#include "spdk/bdev.h"
39#include "spdk/endian.h"
40#include "spdk/thread.h"
41#include "spdk/likely.h"
42#include "spdk/nvme.h"
f67539c2 43#include "spdk/nvmf_cmd.h"
11fdf7f2
TL
44#include "spdk/nvmf_spec.h"
45#include "spdk/trace.h"
46#include "spdk/scsi_spec.h"
47#include "spdk/string.h"
48#include "spdk/util.h"
49
50#include "spdk_internal/log.h"
51
52static bool
f67539c2
TL
53nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem,
54 enum spdk_bdev_io_type io_type)
11fdf7f2
TL
55{
56 struct spdk_nvmf_ns *ns;
57
58 for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
59 ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
60 if (ns->bdev == NULL) {
61 continue;
62 }
63
64 if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) {
65 SPDK_DEBUGLOG(SPDK_LOG_NVMF,
66 "Subsystem %s namespace %u (%s) does not support io_type %d\n",
67 spdk_nvmf_subsystem_get_nqn(subsystem),
68 ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type);
69 return false;
70 }
71 }
72
73 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "All devices in Subsystem %s support io_type %d\n",
74 spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type);
75 return true;
76}
77
78bool
f67539c2 79nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr)
11fdf7f2 80{
f67539c2 81 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP);
11fdf7f2
TL
82}
83
84bool
f67539c2 85nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr)
11fdf7f2 86{
f67539c2 87 return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
11fdf7f2
TL
88}
89
90static void
91nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
92 void *cb_arg)
93{
94 struct spdk_nvmf_request *req = cb_arg;
95 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
f67539c2
TL
96 int first_sc = 0, first_sct = 0, second_sc = 0, second_sct = 0;
97 uint32_t cdw0 = 0;
98 struct spdk_nvmf_request *first_req = req->first_fused_req;
99
100 if (spdk_unlikely(first_req != NULL)) {
101 /* fused commands - get status for both operations */
102 struct spdk_nvme_cpl *fused_response = &first_req->rsp->nvme_cpl;
103
104 spdk_bdev_io_get_nvme_fused_status(bdev_io, &cdw0, &second_sct, &second_sc, &first_sct, &first_sc);
105 fused_response->cdw0 = cdw0;
106 fused_response->status.sc = second_sc;
107 fused_response->status.sct = second_sct;
108
109 /* first request should be completed */
110 spdk_nvmf_request_complete(first_req);
111 req->first_fused_req = NULL;
112 } else {
113 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &first_sct, &first_sc);
114 }
11fdf7f2 115
f67539c2
TL
116 response->cdw0 = cdw0;
117 response->status.sc = first_sc;
118 response->status.sct = first_sct;
11fdf7f2
TL
119
120 spdk_nvmf_request_complete(req);
121 spdk_bdev_free_io(bdev_io);
122}
123
f67539c2
TL
124static void
125nvmf_bdev_ctrlr_complete_admin_cmd(struct spdk_bdev_io *bdev_io, bool success,
126 void *cb_arg)
127{
128 struct spdk_nvmf_request *req = cb_arg;
129
130 if (req->cmd_cb_fn) {
131 req->cmd_cb_fn(req);
132 }
133
134 nvmf_bdev_ctrlr_complete_cmd(bdev_io, success, req);
135}
136
11fdf7f2 137void
f67539c2
TL
138nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
139 bool dif_insert_or_strip)
11fdf7f2
TL
140{
141 struct spdk_bdev *bdev = ns->bdev;
142 uint64_t num_blocks;
143
144 num_blocks = spdk_bdev_get_num_blocks(bdev);
145
146 nsdata->nsze = num_blocks;
147 nsdata->ncap = num_blocks;
148 nsdata->nuse = num_blocks;
149 nsdata->nlbaf = 0;
150 nsdata->flbas.format = 0;
f67539c2
TL
151 nsdata->nacwu = spdk_bdev_get_acwu(bdev);
152 if (!dif_insert_or_strip) {
153 nsdata->lbaf[0].ms = spdk_bdev_get_md_size(bdev);
154 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev));
155 if (nsdata->lbaf[0].ms != 0) {
156 nsdata->flbas.extended = 1;
157 nsdata->mc.extended = 1;
158 nsdata->mc.pointer = 0;
159 nsdata->dps.md_start = spdk_bdev_is_dif_head_of_md(bdev);
160
161 switch (spdk_bdev_get_dif_type(bdev)) {
162 case SPDK_DIF_TYPE1:
163 nsdata->dpc.pit1 = 1;
164 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE1;
165 break;
166 case SPDK_DIF_TYPE2:
167 nsdata->dpc.pit2 = 1;
168 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE2;
169 break;
170 case SPDK_DIF_TYPE3:
171 nsdata->dpc.pit3 = 1;
172 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE3;
173 break;
174 default:
175 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Protection Disabled\n");
176 nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE;
177 break;
178 }
9f95a23c 179 }
f67539c2
TL
180 } else {
181 nsdata->lbaf[0].ms = 0;
182 nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_data_block_size(bdev));
9f95a23c 183 }
11fdf7f2
TL
184 nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev);
185 nsdata->nmic.can_share = 1;
f67539c2
TL
186 if (ns->ptpl_file != NULL) {
187 nsdata->nsrescap.rescap.persist = 1;
188 }
9f95a23c
TL
189 nsdata->nsrescap.rescap.write_exclusive = 1;
190 nsdata->nsrescap.rescap.exclusive_access = 1;
191 nsdata->nsrescap.rescap.write_exclusive_reg_only = 1;
192 nsdata->nsrescap.rescap.exclusive_access_reg_only = 1;
193 nsdata->nsrescap.rescap.write_exclusive_all_reg = 1;
194 nsdata->nsrescap.rescap.exclusive_access_all_reg = 1;
195 nsdata->nsrescap.rescap.ignore_existing_key = 1;
11fdf7f2
TL
196
197 SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch");
198 memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid));
199
200 SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch");
201 memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64));
202}
203
204static void
205nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba,
206 uint64_t *num_blocks)
207{
208 /* SLBA: CDW10 and CDW11 */
209 *start_lba = from_le64(&cmd->cdw10);
210
211 /* NLB: CDW12 bits 15:00, 0's based */
212 *num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1;
213}
214
215static bool
216nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba,
217 uint64_t io_num_blocks)
218{
219 if (io_start_lba + io_num_blocks > bdev_num_blocks ||
220 io_start_lba + io_num_blocks < io_start_lba) {
221 return false;
222 }
223
224 return true;
225}
226
227static void
f67539c2 228nvmf_ctrlr_process_io_cmd_resubmit(void *arg)
11fdf7f2
TL
229{
230 struct spdk_nvmf_request *req = arg;
231
f67539c2
TL
232 nvmf_ctrlr_process_io_cmd(req);
233}
234
235static void
236nvmf_ctrlr_process_admin_cmd_resubmit(void *arg)
237{
238 struct spdk_nvmf_request *req = arg;
239
240 nvmf_ctrlr_process_admin_cmd(req);
11fdf7f2
TL
241}
242
243static void
244nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev,
245 struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg)
246{
247 int rc;
248
249 req->bdev_io_wait.bdev = bdev;
250 req->bdev_io_wait.cb_fn = cb_fn;
251 req->bdev_io_wait.cb_arg = cb_arg;
252
253 rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait);
254 if (rc != 0) {
255 assert(false);
256 }
f67539c2 257 req->qpair->group->stat.pending_bdev_io++;
11fdf7f2
TL
258}
259
9f95a23c 260int
f67539c2
TL
261nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
262 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
11fdf7f2
TL
263{
264 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
265 uint32_t block_size = spdk_bdev_get_block_size(bdev);
266 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
267 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
268 uint64_t start_lba;
269 uint64_t num_blocks;
270 int rc;
271
272 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
273
274 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
275 SPDK_ERRLOG("end of media\n");
276 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
277 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
278 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
279 }
280
281 if (spdk_unlikely(num_blocks * block_size > req->length)) {
282 SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
283 num_blocks, block_size, req->length);
284 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
285 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
286 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
287 }
288
289 rc = spdk_bdev_readv_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
290 nvmf_bdev_ctrlr_complete_cmd, req);
291 if (spdk_unlikely(rc)) {
292 if (rc == -ENOMEM) {
f67539c2 293 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
11fdf7f2
TL
294 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
295 }
296 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
297 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
298 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
299 }
300
301 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
302}
303
9f95a23c 304int
f67539c2
TL
305nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
306 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
11fdf7f2
TL
307{
308 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
309 uint32_t block_size = spdk_bdev_get_block_size(bdev);
310 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
311 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
312 uint64_t start_lba;
313 uint64_t num_blocks;
314 int rc;
315
316 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
317
318 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
319 SPDK_ERRLOG("end of media\n");
320 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
321 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
322 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
323 }
324
325 if (spdk_unlikely(num_blocks * block_size > req->length)) {
326 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
327 num_blocks, block_size, req->length);
328 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
329 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
330 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
331 }
332
333 rc = spdk_bdev_writev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
334 nvmf_bdev_ctrlr_complete_cmd, req);
335 if (spdk_unlikely(rc)) {
336 if (rc == -ENOMEM) {
f67539c2
TL
337 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
338 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
339 }
340 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
341 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
342 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
343 }
344
345 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
346}
347
348int
349nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
350 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
351{
352 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
353 uint32_t block_size = spdk_bdev_get_block_size(bdev);
354 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
355 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
356 uint64_t start_lba;
357 uint64_t num_blocks;
358 int rc;
359
360 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
361
362 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
363 SPDK_ERRLOG("end of media\n");
364 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
365 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
366 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
367 }
368
369 if (spdk_unlikely(num_blocks * block_size > req->length)) {
370 SPDK_ERRLOG("Compare NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
371 num_blocks, block_size, req->length);
372 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
373 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
374 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
375 }
376
377 rc = spdk_bdev_comparev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
378 nvmf_bdev_ctrlr_complete_cmd, req);
379 if (spdk_unlikely(rc)) {
380 if (rc == -ENOMEM) {
381 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
382 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
383 }
384 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
385 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
386 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
387 }
388
389 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
390}
391
392int
393nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
394 struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req)
395{
396 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
397 uint32_t block_size = spdk_bdev_get_block_size(bdev);
398 struct spdk_nvme_cmd *cmp_cmd = &cmp_req->cmd->nvme_cmd;
399 struct spdk_nvme_cmd *write_cmd = &write_req->cmd->nvme_cmd;
400 struct spdk_nvme_cpl *rsp = &write_req->rsp->nvme_cpl;
401 uint64_t write_start_lba, cmp_start_lba;
402 uint64_t write_num_blocks, cmp_num_blocks;
403 int rc;
404
405 nvmf_bdev_ctrlr_get_rw_params(cmp_cmd, &cmp_start_lba, &cmp_num_blocks);
406 nvmf_bdev_ctrlr_get_rw_params(write_cmd, &write_start_lba, &write_num_blocks);
407
408 if (spdk_unlikely(write_start_lba != cmp_start_lba || write_num_blocks != cmp_num_blocks)) {
409 SPDK_ERRLOG("Fused command start lba / num blocks mismatch\n");
410 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
411 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
412 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
413 }
414
415 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, write_start_lba,
416 write_num_blocks))) {
417 SPDK_ERRLOG("end of media\n");
418 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
419 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
420 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
421 }
422
423 if (spdk_unlikely(write_num_blocks * block_size > write_req->length)) {
424 SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
425 write_num_blocks, block_size, write_req->length);
426 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
427 rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
428 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
429 }
430
431 rc = spdk_bdev_comparev_and_writev_blocks(desc, ch, cmp_req->iov, cmp_req->iovcnt, write_req->iov,
432 write_req->iovcnt, write_start_lba, write_num_blocks, nvmf_bdev_ctrlr_complete_cmd, write_req);
433 if (spdk_unlikely(rc)) {
434 if (rc == -ENOMEM) {
435 nvmf_bdev_ctrl_queue_io(cmp_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, cmp_req);
436 nvmf_bdev_ctrl_queue_io(write_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, write_req);
11fdf7f2
TL
437 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
438 }
439 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
440 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
441 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
442 }
443
444 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
445}
446
9f95a23c 447int
f67539c2
TL
448nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
449 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
11fdf7f2
TL
450{
451 uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
452 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
453 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
454 uint64_t start_lba;
455 uint64_t num_blocks;
456 int rc;
457
458 nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
459
460 if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
461 SPDK_ERRLOG("end of media\n");
462 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
463 rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
464 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
465 }
466
467 rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks,
468 nvmf_bdev_ctrlr_complete_cmd, req);
469 if (spdk_unlikely(rc)) {
470 if (rc == -ENOMEM) {
f67539c2 471 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
11fdf7f2
TL
472 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
473 }
474 rsp->status.sct = SPDK_NVME_SCT_GENERIC;
475 rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
476 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
477 }
478
479 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
480}
481
9f95a23c 482int
f67539c2
TL
483nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
484 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
11fdf7f2
TL
485{
486 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
487 int rc;
488
489 /* As for NVMeoF controller, SPDK always set volatile write
490 * cache bit to 1, return success for those block devices
491 * which can't support FLUSH command.
492 */
493 if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) {
494 response->status.sct = SPDK_NVME_SCT_GENERIC;
495 response->status.sc = SPDK_NVME_SC_SUCCESS;
496 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
497 }
498
499 rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev),
500 nvmf_bdev_ctrlr_complete_cmd, req);
501 if (spdk_unlikely(rc)) {
502 if (rc == -ENOMEM) {
f67539c2 503 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
11fdf7f2
TL
504 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
505 }
506 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
507 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
508 }
509 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
510}
511
9f95a23c 512struct nvmf_bdev_ctrlr_unmap {
11fdf7f2
TL
513 struct spdk_nvmf_request *req;
514 uint32_t count;
515 struct spdk_bdev_desc *desc;
516 struct spdk_bdev *bdev;
517 struct spdk_io_channel *ch;
9f95a23c 518 uint32_t range_index;
11fdf7f2
TL
519};
520
521static void
9f95a23c
TL
522nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success,
523 void *cb_arg)
11fdf7f2 524{
9f95a23c 525 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg;
11fdf7f2
TL
526 struct spdk_nvmf_request *req = unmap_ctx->req;
527 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
528 int sc, sct;
f67539c2 529 uint32_t cdw0;
11fdf7f2
TL
530
531 unmap_ctx->count--;
532
533 if (response->status.sct == SPDK_NVME_SCT_GENERIC &&
534 response->status.sc == SPDK_NVME_SC_SUCCESS) {
f67539c2
TL
535 spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
536 response->cdw0 = cdw0;
11fdf7f2
TL
537 response->status.sc = sc;
538 response->status.sct = sct;
539 }
540
541 if (unmap_ctx->count == 0) {
542 spdk_nvmf_request_complete(req);
543 free(unmap_ctx);
544 }
545 spdk_bdev_free_io(bdev_io);
546}
547
548static int
9f95a23c
TL
549nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
550 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
551 struct nvmf_bdev_ctrlr_unmap *unmap_ctx);
11fdf7f2 552static void
9f95a23c 553nvmf_bdev_ctrlr_unmap_resubmit(void *arg)
11fdf7f2 554{
9f95a23c 555 struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg;
11fdf7f2
TL
556 struct spdk_nvmf_request *req = unmap_ctx->req;
557 struct spdk_bdev_desc *desc = unmap_ctx->desc;
558 struct spdk_bdev *bdev = unmap_ctx->bdev;
559 struct spdk_io_channel *ch = unmap_ctx->ch;
560
9f95a23c 561 nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx);
11fdf7f2
TL
562}
563
564static int
9f95a23c
TL
565nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
566 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
567 struct nvmf_bdev_ctrlr_unmap *unmap_ctx)
11fdf7f2 568{
11fdf7f2
TL
569 uint16_t nr, i;
570 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
571 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
9f95a23c
TL
572 struct spdk_nvme_dsm_range *dsm_range;
573 uint64_t lba;
574 uint32_t lba_count;
11fdf7f2
TL
575 int rc;
576
f67539c2 577 nr = cmd->cdw10_bits.dsm.nr + 1;
11fdf7f2
TL
578 if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) {
579 SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n");
580 response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
581 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
582 }
583
9f95a23c
TL
584 if (unmap_ctx == NULL) {
585 unmap_ctx = calloc(1, sizeof(*unmap_ctx));
586 if (!unmap_ctx) {
587 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
588 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
11fdf7f2
TL
589 }
590
9f95a23c
TL
591 unmap_ctx->req = req;
592 unmap_ctx->desc = desc;
593 unmap_ctx->ch = ch;
594 unmap_ctx->bdev = bdev;
595
11fdf7f2
TL
596 response->status.sct = SPDK_NVME_SCT_GENERIC;
597 response->status.sc = SPDK_NVME_SC_SUCCESS;
9f95a23c
TL
598 } else {
599 unmap_ctx->count--; /* dequeued */
600 }
11fdf7f2 601
9f95a23c
TL
602 dsm_range = (struct spdk_nvme_dsm_range *)req->data;
603 for (i = unmap_ctx->range_index; i < nr; i++) {
604 lba = dsm_range[i].starting_lba;
605 lba_count = dsm_range[i].length;
606
607 unmap_ctx->count++;
608
609 rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count,
610 nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx);
611 if (rc) {
612 if (rc == -ENOMEM) {
613 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx);
614 /* Unmap was not yet submitted to bdev */
615 /* unmap_ctx->count will be decremented when the request is dequeued */
616 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
11fdf7f2 617 }
9f95a23c
TL
618 response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
619 unmap_ctx->count--;
620 /* We can't return here - we may have to wait for any other
621 * unmaps already sent to complete */
622 break;
11fdf7f2 623 }
9f95a23c
TL
624 unmap_ctx->range_index++;
625 }
11fdf7f2 626
9f95a23c
TL
627 if (unmap_ctx->count == 0) {
628 free(unmap_ctx);
629 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
630 }
631
632 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
633}
634
635int
f67539c2
TL
636nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
637 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
9f95a23c 638{
9f95a23c
TL
639 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
640 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
11fdf7f2 641
f67539c2 642 if (cmd->cdw11_bits.dsm.ad) {
9f95a23c 643 return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL);
11fdf7f2
TL
644 }
645
646 response->status.sct = SPDK_NVME_SCT_GENERIC;
647 response->status.sc = SPDK_NVME_SC_SUCCESS;
648 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
649}
650
9f95a23c 651int
f67539c2
TL
652nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
653 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
11fdf7f2
TL
654{
655 int rc;
656
657 rc = spdk_bdev_nvme_io_passthru(desc, ch, &req->cmd->nvme_cmd, req->data, req->length,
658 nvmf_bdev_ctrlr_complete_cmd, req);
659 if (spdk_unlikely(rc)) {
660 if (rc == -ENOMEM) {
f67539c2 661 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
11fdf7f2
TL
662 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
663 }
664 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
665 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
666 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
667 }
668
669 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
670}
f67539c2
TL
671
672int
673spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
674 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
675 spdk_nvmf_nvme_passthru_cmd_cb cb_fn)
676{
677 int rc;
678
679 req->cmd_cb_fn = cb_fn;
680
681 rc = spdk_bdev_nvme_admin_passthru(desc, ch, &req->cmd->nvme_cmd, req->data, req->length,
682 nvmf_bdev_ctrlr_complete_admin_cmd, req);
683 if (spdk_unlikely(rc)) {
684 if (rc == -ENOMEM) {
685 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req);
686 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
687 }
688 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
689 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
690 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
691 }
692
693 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
694}
695
696static void
697nvmf_bdev_ctrlr_complete_abort_cmd(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
698{
699 struct spdk_nvmf_request *req = cb_arg;
700
701 if (success) {
702 req->rsp->nvme_cpl.cdw0 &= ~1U;
703 }
704
705 spdk_nvmf_request_complete(req);
706 spdk_bdev_free_io(bdev_io);
707}
708
709int
710spdk_nvmf_bdev_ctrlr_abort_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
711 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
712 struct spdk_nvmf_request *req_to_abort)
713{
714 int rc;
715
716 assert((req->rsp->nvme_cpl.cdw0 & 1U) != 0);
717
718 rc = spdk_bdev_abort(desc, ch, req_to_abort, nvmf_bdev_ctrlr_complete_abort_cmd, req);
719 if (spdk_likely(rc == 0)) {
720 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
721 } else if (rc == -ENOMEM) {
722 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req);
723 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
724 } else {
725 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
726 }
727}
728
729bool
730nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
731 struct spdk_dif_ctx *dif_ctx)
732{
733 uint32_t init_ref_tag, dif_check_flags = 0;
734 int rc;
735
736 if (spdk_bdev_get_md_size(bdev) == 0) {
737 return false;
738 }
739
740 /* Initial Reference Tag is the lower 32 bits of the start LBA. */
741 init_ref_tag = (uint32_t)from_le64(&cmd->cdw10);
742
743 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_REFTAG)) {
744 dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK;
745 }
746
747 if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_GUARD)) {
748 dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK;
749 }
750
751 rc = spdk_dif_ctx_init(dif_ctx,
752 spdk_bdev_get_block_size(bdev),
753 spdk_bdev_get_md_size(bdev),
754 spdk_bdev_is_md_interleaved(bdev),
755 spdk_bdev_is_dif_head_of_md(bdev),
756 spdk_bdev_get_dif_type(bdev),
757 dif_check_flags,
758 init_ref_tag, 0, 0, 0, 0);
759
760 return (rc == 0) ? true : false;
761}