]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/lib/nvme/nvme_ctrlr_cmd.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / lib / nvme / nvme_ctrlr_cmd.c
CommitLineData
7c673cae
FG
1/*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "nvme_internal.h"
35
9f95a23c
TL
36int
37spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(struct spdk_nvme_ctrlr *ctrlr,
38 struct spdk_nvme_qpair *qpair,
39 struct spdk_nvme_cmd *cmd,
40 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
41{
42 struct nvme_request *req;
43 struct nvme_payload payload;
44
45 if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
46 return -EINVAL;
47 }
48
49 memset(&payload, 0, sizeof(payload));
50 req = nvme_allocate_request(qpair, &payload, 0, cb_fn, cb_arg);
51
52 if (req == NULL) {
53 return -ENOMEM;
54 }
55
56 memcpy(&req->cmd, cmd, sizeof(req->cmd));
57
58 return nvme_qpair_submit_request(qpair, req);
59}
60
7c673cae
FG
61int
62spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
63 struct spdk_nvme_qpair *qpair,
64 struct spdk_nvme_cmd *cmd,
65 void *buf, uint32_t len,
66 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
67{
68 struct nvme_request *req;
69
70 req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
71
72 if (req == NULL) {
73 return -ENOMEM;
74 }
75
76 memcpy(&req->cmd, cmd, sizeof(req->cmd));
77
78 return nvme_qpair_submit_request(qpair, req);
79}
80
11fdf7f2
TL
81int
82spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
83 struct spdk_nvme_qpair *qpair,
84 struct spdk_nvme_cmd *cmd,
85 void *buf, uint32_t len, void *md_buf,
86 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
87{
88 struct nvme_request *req;
89 struct nvme_payload payload;
90
91 payload = NVME_PAYLOAD_CONTIG(buf, md_buf);
92
93 req = nvme_allocate_request(qpair, &payload, len, cb_fn, cb_arg);
94 if (req == NULL) {
95 return -ENOMEM;
96 }
97
98 memcpy(&req->cmd, cmd, sizeof(req->cmd));
99
100 return nvme_qpair_submit_request(qpair, req);
101}
102
7c673cae
FG
103int
104spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
105 struct spdk_nvme_cmd *cmd,
106 void *buf, uint32_t len,
107 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
108{
109 struct nvme_request *req;
110 int rc;
111
112 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
113 req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
114 if (req == NULL) {
115 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
116 return -ENOMEM;
117 }
118
119 memcpy(&req->cmd, cmd, sizeof(req->cmd));
120
121 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
122
123 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
124 return rc;
125}
126
127int
11fdf7f2
TL
128nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
129 void *payload, size_t payload_size,
130 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
7c673cae
FG
131{
132 struct nvme_request *req;
133 struct spdk_nvme_cmd *cmd;
134
135 req = nvme_allocate_request_user_copy(ctrlr->adminq,
11fdf7f2 136 payload, payload_size,
7c673cae
FG
137 cb_fn, cb_arg, false);
138 if (req == NULL) {
139 return -ENOMEM;
140 }
141
142 cmd = &req->cmd;
143 cmd->opc = SPDK_NVME_OPC_IDENTIFY;
11fdf7f2 144 cmd->cdw10 = cns | ((uint32_t)cntid << 16);
7c673cae
FG
145 cmd->nsid = nsid;
146
147 return nvme_ctrlr_submit_admin_request(ctrlr, req);
148}
149
150int
151nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
152 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
153{
154 struct nvme_request *req;
155 struct spdk_nvme_cmd *cmd;
156 int rc;
157
158 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
159 req = nvme_allocate_request_user_copy(ctrlr->adminq,
160 payload, sizeof(struct spdk_nvme_ctrlr_list),
161 cb_fn, cb_arg, true);
162 if (req == NULL) {
163 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
164 return -ENOMEM;
165 }
166
167 cmd = &req->cmd;
168 cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
169 cmd->nsid = nsid;
170 cmd->cdw10 = SPDK_NVME_NS_CTRLR_ATTACH;
171
172 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
173
174 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
175 return rc;
176}
177
178int
179nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
180 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
181{
182 struct nvme_request *req;
183 struct spdk_nvme_cmd *cmd;
184 int rc;
185
186 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
187 req = nvme_allocate_request_user_copy(ctrlr->adminq,
188 payload, sizeof(struct spdk_nvme_ctrlr_list),
189 cb_fn, cb_arg, true);
190 if (req == NULL) {
191 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
192 return -ENOMEM;
193 }
194
195 cmd = &req->cmd;
196 cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
197 cmd->nsid = nsid;
198 cmd->cdw10 = SPDK_NVME_NS_CTRLR_DETACH;
199
200 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
201
202 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
203 return rc;
204}
205
206int
207nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
208 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
209{
210 struct nvme_request *req;
211 struct spdk_nvme_cmd *cmd;
212 int rc;
213
214 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
215 req = nvme_allocate_request_user_copy(ctrlr->adminq,
216 payload, sizeof(struct spdk_nvme_ns_data),
217 cb_fn, cb_arg, true);
218 if (req == NULL) {
219 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
220 return -ENOMEM;
221 }
222
223 cmd = &req->cmd;
224 cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
225 cmd->cdw10 = SPDK_NVME_NS_MANAGEMENT_CREATE;
226
227 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
228
229 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
230 return rc;
231}
232
233int
234nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
235 void *cb_arg)
236{
237 struct nvme_request *req;
238 struct spdk_nvme_cmd *cmd;
239 int rc;
240
241 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
242 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
243 if (req == NULL) {
244 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
245 return -ENOMEM;
246 }
247
248 cmd = &req->cmd;
249 cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
250 cmd->cdw10 = SPDK_NVME_NS_MANAGEMENT_DELETE;
251 cmd->nsid = nsid;
252
253 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
254
255 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
256 return rc;
257}
258
11fdf7f2
TL
259int
260nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
261 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
262{
263 struct nvme_request *req;
264 struct spdk_nvme_cmd *cmd;
265 int rc;
266
267 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
268 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
269 if (req == NULL) {
270 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
271 return -ENOMEM;
272 }
273
274 cmd = &req->cmd;
275 cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG;
276 cmd->dptr.prp.prp1 = prp1;
277 cmd->dptr.prp.prp2 = prp2;
278
279 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
280
281 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
282 return rc;
283}
284
7c673cae
FG
285int
286nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
287 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
288{
289 struct nvme_request *req;
290 struct spdk_nvme_cmd *cmd;
291 int rc;
292
293 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
294 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
295 if (req == NULL) {
296 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
297 return -ENOMEM;
298 }
299
300 cmd = &req->cmd;
301 cmd->opc = SPDK_NVME_OPC_FORMAT_NVM;
302 cmd->nsid = nsid;
303 memcpy(&cmd->cdw10, format, sizeof(uint32_t));
304
305 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
306 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
307
308 return rc;
309}
310
311int
312spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
313 uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
314 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
315{
316 struct nvme_request *req;
317 struct spdk_nvme_cmd *cmd;
318 int rc;
319
320 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
11fdf7f2
TL
321 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
322 true);
7c673cae
FG
323 if (req == NULL) {
324 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
325 return -ENOMEM;
326 }
327
328 cmd = &req->cmd;
329 cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
330 cmd->cdw10 = feature;
331 cmd->cdw11 = cdw11;
332 cmd->cdw12 = cdw12;
333
334 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
335 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
336
337 return rc;
338}
339
340int
341spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
342 uint32_t cdw11, void *payload, uint32_t payload_size,
343 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
344{
345 struct nvme_request *req;
346 struct spdk_nvme_cmd *cmd;
347 int rc;
348
349 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
11fdf7f2
TL
350 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
351 false);
7c673cae
FG
352 if (req == NULL) {
353 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
354 return -ENOMEM;
355 }
356
357 cmd = &req->cmd;
358 cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
359 cmd->cdw10 = feature;
360 cmd->cdw11 = cdw11;
361
362 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
363 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
364
365 return rc;
366}
367
11fdf7f2
TL
368int
369spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
370 uint32_t cdw11, void *payload,
371 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
372 void *cb_arg, uint32_t ns_id)
373{
374 struct nvme_request *req;
375 struct spdk_nvme_cmd *cmd;
376 int rc;
377
378 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
379 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
380 false);
381 if (req == NULL) {
382 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
383 return -ENOMEM;
384 }
385
386 cmd = &req->cmd;
387 cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
388 cmd->cdw10 = feature;
389 cmd->cdw11 = cdw11;
390 cmd->nsid = ns_id;
391
392 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
393 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
394
395 return rc;
396}
397
398int spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
399 uint32_t cdw11, uint32_t cdw12, void *payload,
400 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
401 void *cb_arg, uint32_t ns_id)
402{
403 struct nvme_request *req;
404 struct spdk_nvme_cmd *cmd;
405 int rc;
406
407 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
408 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
409 true);
410 if (req == NULL) {
411 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
412 return -ENOMEM;
413 }
414
415 cmd = &req->cmd;
416 cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
417 cmd->cdw10 = feature;
418 cmd->cdw11 = cdw11;
419 cmd->cdw12 = cdw12;
420 cmd->nsid = ns_id;
421
422 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
423 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
424
425 return rc;
426}
427
7c673cae
FG
428int
429nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
430 uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
431{
432 uint32_t cdw11;
433
434 cdw11 = ((num_queues - 1) << 16) | (num_queues - 1);
435 return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, cdw11, 0,
436 NULL, 0, cb_fn, cb_arg);
437}
438
11fdf7f2
TL
439int
440nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
441 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
442{
443 return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0,
444 cb_fn, cb_arg);
445}
446
7c673cae
FG
447int
448nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
11fdf7f2 449 union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
7c673cae
FG
450 void *cb_arg)
451{
452 uint32_t cdw11;
453
11fdf7f2 454 cdw11 = config.raw;
7c673cae
FG
455 return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0,
456 NULL, 0,
457 cb_fn, cb_arg);
458}
459
11fdf7f2
TL
460int
461nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
462 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
463{
464 uint32_t cdw11;
465
466 if (host_id_size == 16) {
467 /* 128-bit extended host identifier */
468 cdw11 = 1;
469 } else if (host_id_size == 8) {
470 /* 64-bit host identifier */
471 cdw11 = 0;
472 } else {
473 SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size);
474 return -EINVAL;
475 }
476
477 return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER, cdw11, 0,
478 host_id, host_id_size, cb_fn, cb_arg);
479}
480
7c673cae
FG
481int
482spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
483 uint32_t nsid, void *payload, uint32_t payload_size,
484 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
485{
486 struct nvme_request *req;
487 struct spdk_nvme_cmd *cmd;
488 uint32_t numd, numdl, numdu;
489 uint32_t lpol, lpou;
490 int rc;
491
492 if (payload_size == 0) {
493 return -EINVAL;
494 }
495
496 if (offset & 3) {
497 return -EINVAL;
498 }
499
500 numd = payload_size / sizeof(uint32_t) - 1u;
501 numdl = numd & 0xFFFFu;
502 numdu = (numd >> 16) & 0xFFFFu;
503
504 lpol = (uint32_t)offset;
505 lpou = (uint32_t)(offset >> 32);
506
507 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
508
509 if (offset && !ctrlr->cdata.lpa.edlp) {
510 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
511 return -EINVAL;
512 }
513
514 req = nvme_allocate_request_user_copy(ctrlr->adminq,
515 payload, payload_size, cb_fn, cb_arg, false);
516 if (req == NULL) {
517 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
518 return -ENOMEM;
519 }
520
521 cmd = &req->cmd;
522 cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE;
523 cmd->nsid = nsid;
524 cmd->cdw10 = numdl << 16;
525 cmd->cdw10 |= log_page;
526 cmd->cdw11 = numdu;
527 cmd->cdw12 = lpol;
528 cmd->cdw13 = lpou;
529
530 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
531 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
532
533 return rc;
534}
535
536static void
537spdk_nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
538{
539 struct nvme_request *req, *next, *tmp;
540 struct spdk_nvme_ctrlr *ctrlr;
541 int rc;
542
543 req = ctx;
544 ctrlr = (struct spdk_nvme_ctrlr *)req->user_buffer;
545
546 ctrlr->outstanding_aborts--;
547 STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) {
548 STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
549 ctrlr->outstanding_aborts++;
550 rc = nvme_ctrlr_submit_admin_request(ctrlr, next);
551 if (rc < 0) {
552 SPDK_ERRLOG("Failed to submit queued abort.\n");
11fdf7f2 553 memset(&next->cpl, 0, sizeof(next->cpl));
7c673cae
FG
554 next->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
555 next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
556 next->cpl.status.dnr = 1;
9f95a23c 557 nvme_complete_request(next->cb_fn, next->cb_arg, next->qpair, next, &req->cpl);
7c673cae
FG
558 nvme_free_request(next);
559 } else {
560 /* If the first abort succeeds, stop iterating. */
561 break;
562 }
563 }
564
565 req->user_cb_fn(req->user_cb_arg, cpl);
566}
567
568int
569spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
570 uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
571{
572 int rc;
573 struct nvme_request *req;
574 struct spdk_nvme_cmd *cmd;
575 uint16_t sqid;
576
577 if (qpair) {
578 sqid = qpair->id;
579 } else {
580 sqid = ctrlr->adminq->id; /* 0 */
581 }
582
583 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
584 req = nvme_allocate_request_null(ctrlr->adminq, spdk_nvme_ctrlr_cmd_abort_cpl, NULL);
585 if (req == NULL) {
586 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
587 return -ENOMEM;
588 }
589 req->cb_arg = req;
590 req->user_cb_fn = cb_fn;
591 req->user_cb_arg = cb_arg;
592 req->user_buffer = ctrlr; /* This is a hack to get to the ctrlr in the
593 * completion handler. */
594
595 cmd = &req->cmd;
596 cmd->opc = SPDK_NVME_OPC_ABORT;
597 cmd->cdw10 = (cid << 16) | sqid;
598
599 if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl) {
600 STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq);
601 rc = 0;
602 } else {
603 ctrlr->outstanding_aborts++;
604 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
605 }
606
607 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
608 return rc;
609}
610
611int
612nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
613 const struct spdk_nvme_fw_commit *fw_commit,
614 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
615{
616 struct nvme_request *req;
617 struct spdk_nvme_cmd *cmd;
618 int rc;
619
620 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
621 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
622 if (req == NULL) {
623 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
624 return -ENOMEM;
625 }
626
627 cmd = &req->cmd;
628 cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT;
629 memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));
630
631 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
632 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
633
634 return rc;
635
636}
637
638int
639nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
640 uint32_t size, uint32_t offset, void *payload,
641 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
642{
643 struct nvme_request *req;
644 struct spdk_nvme_cmd *cmd;
645 int rc;
646
647 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
648 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
649 if (req == NULL) {
650 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
651 return -ENOMEM;
652 }
653
654 cmd = &req->cmd;
655 cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
656 cmd->cdw10 = (size >> 2) - 1;
657 cmd->cdw11 = offset >> 2;
658
659 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
660 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
661
662 return rc;
663}
11fdf7f2
TL
664
665int
9f95a23c
TL
666nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
667 uint16_t spsp, uint8_t nssf, void *payload,
668 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
11fdf7f2
TL
669{
670 struct nvme_request *req;
671 struct spdk_nvme_cmd *cmd;
672 int rc;
673
674 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
675 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
676 cb_fn, cb_arg, false);
677 if (req == NULL) {
678 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
679 return -ENOMEM;
680 }
681
682 cmd = &req->cmd;
683 cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE;
684 cmd->cdw10 = ((uint32_t)secp << 24) | ((uint32_t)spsp << 8) | ((uint32_t)nssf);
685 cmd->cdw11 = payload_size;
686
687 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
688 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
689
690 return rc;
691}
692
693int
9f95a23c
TL
694nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
695 uint16_t spsp, uint8_t nssf, void *payload,
696 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
11fdf7f2
TL
697{
698 struct nvme_request *req;
699 struct spdk_nvme_cmd *cmd;
700 int rc;
701
702 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
703 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
704 cb_fn, cb_arg, true);
705 if (req == NULL) {
706 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
707 return -ENOMEM;
708 }
709
710 cmd = &req->cmd;
711 cmd->opc = SPDK_NVME_OPC_SECURITY_SEND;
712 cmd->cdw10 = ((uint32_t)secp << 24) | ((uint32_t)spsp << 8) | ((uint32_t)nssf);
713 cmd->cdw11 = payload_size;
714
715 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
716 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
717
718 return rc;
719}
9f95a23c
TL
720
721int
722nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
723 struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
724 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
725{
726 struct nvme_request *req;
727 struct spdk_nvme_cmd *cmd;
728 int rc;
729
730 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
731 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
732 if (req == NULL) {
733 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
734 return -ENOMEM;
735 }
736
737 cmd = &req->cmd;
738 cmd->opc = SPDK_NVME_OPC_SANITIZE;
739 cmd->nsid = nsid;
740 cmd->cdw11 = cdw11;
741 memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10));
742
743 rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
744 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
745
746 return rc;
747}