]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright (c) Intel Corporation. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of Intel Corporation nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
11fdf7f2 | 34 | #include "spdk/stdinc.h" |
7c673cae | 35 | |
11fdf7f2 TL |
36 | #include "spdk/bdev.h" |
37 | #include "spdk/bit_array.h" | |
7c673cae | 38 | #include "spdk/conf.h" |
11fdf7f2 | 39 | #include "spdk/thread.h" |
7c673cae FG |
40 | #include "spdk/nvmf.h" |
41 | #include "spdk/trace.h" | |
11fdf7f2 TL |
42 | #include "spdk/endian.h" |
43 | #include "spdk/string.h" | |
7c673cae FG |
44 | |
45 | #include "spdk_internal/log.h" | |
46 | ||
11fdf7f2 | 47 | #include "nvmf_internal.h" |
7c673cae FG |
48 | #include "transport.h" |
49 | ||
11fdf7f2 | 50 | SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) |
7c673cae | 51 | |
11fdf7f2 TL |
52 | #define SPDK_NVMF_DEFAULT_MAX_QUEUE_DEPTH 128 |
53 | #define SPDK_NVMF_DEFAULT_MAX_QPAIRS_PER_CTRLR 64 | |
54 | #define SPDK_NVMF_DEFAULT_IN_CAPSULE_DATA_SIZE 4096 | |
55 | #define SPDK_NVMF_DEFAULT_MAX_IO_SIZE 131072 | |
56 | #define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024 | |
57 | #define SPDK_NVMF_DEFAULT_IO_UNIT_SIZE 131072 | |
7c673cae | 58 | |
11fdf7f2 TL |
59 | typedef void (*nvmf_qpair_disconnect_cpl)(void *ctx, int status); |
60 | static void spdk_nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf); | |
7c673cae | 61 | |
11fdf7f2 TL |
62 | /* supplied to a single call to nvmf_qpair_disconnect */ |
63 | struct nvmf_qpair_disconnect_ctx { | |
64 | struct spdk_nvmf_qpair *qpair; | |
65 | struct spdk_nvmf_ctrlr *ctrlr; | |
66 | nvmf_qpair_disconnect_cb cb_fn; | |
67 | struct spdk_thread *thread; | |
68 | void *ctx; | |
69 | uint16_t qid; | |
70 | }; | |
71 | ||
72 | /* | |
73 | * There are several times when we need to iterate through the list of all qpairs and selectively delete them. | |
74 | * In order to do this sequentially without overlap, we must provide a context to recover the next qpair from | |
75 | * to enable calling nvmf_qpair_disconnect on the next desired qpair. | |
76 | */ | |
77 | struct nvmf_qpair_disconnect_many_ctx { | |
78 | struct spdk_nvmf_subsystem *subsystem; | |
79 | struct spdk_nvmf_poll_group *group; | |
80 | spdk_nvmf_poll_group_mod_done cpl_fn; | |
81 | void *cpl_ctx; | |
82 | }; | |
83 | ||
84 | static void | |
85 | spdk_nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair, | |
86 | enum spdk_nvmf_qpair_state state) | |
87 | { | |
88 | assert(qpair != NULL); | |
89 | assert(qpair->group->thread == spdk_get_thread()); | |
90 | ||
91 | qpair->state = state; | |
92 | } | |
93 | ||
94 | void | |
95 | spdk_nvmf_tgt_opts_init(struct spdk_nvmf_tgt_opts *opts) | |
96 | { | |
97 | opts->max_queue_depth = SPDK_NVMF_DEFAULT_MAX_QUEUE_DEPTH; | |
98 | opts->max_qpairs_per_ctrlr = SPDK_NVMF_DEFAULT_MAX_QPAIRS_PER_CTRLR; | |
99 | opts->in_capsule_data_size = SPDK_NVMF_DEFAULT_IN_CAPSULE_DATA_SIZE; | |
100 | opts->max_io_size = SPDK_NVMF_DEFAULT_MAX_IO_SIZE; | |
101 | opts->max_subsystems = SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS; | |
102 | opts->io_unit_size = SPDK_NVMF_DEFAULT_IO_UNIT_SIZE; | |
103 | } | |
104 | ||
105 | static int | |
106 | spdk_nvmf_poll_group_poll(void *ctx) | |
7c673cae | 107 | { |
11fdf7f2 | 108 | struct spdk_nvmf_poll_group *group = ctx; |
7c673cae | 109 | int rc; |
11fdf7f2 TL |
110 | int count = 0; |
111 | struct spdk_nvmf_transport_poll_group *tgroup; | |
7c673cae | 112 | |
11fdf7f2 TL |
113 | TAILQ_FOREACH(tgroup, &group->tgroups, link) { |
114 | rc = spdk_nvmf_transport_poll_group_poll(tgroup); | |
115 | if (rc < 0) { | |
116 | return -1; | |
117 | } | |
118 | count += rc; | |
119 | } | |
120 | ||
121 | return count; | |
122 | } | |
123 | ||
124 | static int | |
125 | spdk_nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf) | |
126 | { | |
127 | struct spdk_nvmf_tgt *tgt = io_device; | |
128 | struct spdk_nvmf_poll_group *group = ctx_buf; | |
129 | struct spdk_nvmf_transport *transport; | |
130 | uint32_t sid; | |
131 | ||
132 | TAILQ_INIT(&group->tgroups); | |
133 | TAILQ_INIT(&group->qpairs); | |
134 | ||
135 | TAILQ_FOREACH(transport, &tgt->transports, link) { | |
136 | spdk_nvmf_poll_group_add_transport(group, transport); | |
137 | } | |
138 | ||
139 | group->num_sgroups = tgt->opts.max_subsystems; | |
140 | group->sgroups = calloc(tgt->opts.max_subsystems, sizeof(struct spdk_nvmf_subsystem_poll_group)); | |
141 | if (!group->sgroups) { | |
7c673cae FG |
142 | return -1; |
143 | } | |
144 | ||
11fdf7f2 TL |
145 | for (sid = 0; sid < tgt->opts.max_subsystems; sid++) { |
146 | struct spdk_nvmf_subsystem *subsystem; | |
147 | ||
148 | subsystem = tgt->subsystems[sid]; | |
149 | if (!subsystem) { | |
150 | continue; | |
151 | } | |
152 | ||
153 | if (spdk_nvmf_poll_group_add_subsystem(group, subsystem, NULL, NULL) != 0) { | |
154 | spdk_nvmf_tgt_destroy_poll_group(io_device, ctx_buf); | |
155 | return -1; | |
156 | } | |
157 | } | |
158 | ||
159 | group->poller = spdk_poller_register(spdk_nvmf_poll_group_poll, group, 0); | |
160 | group->thread = spdk_get_thread(); | |
161 | ||
7c673cae FG |
162 | return 0; |
163 | } | |
164 | ||
11fdf7f2 TL |
165 | static void |
166 | spdk_nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf) | |
7c673cae | 167 | { |
11fdf7f2 TL |
168 | struct spdk_nvmf_poll_group *group = ctx_buf; |
169 | struct spdk_nvmf_transport_poll_group *tgroup, *tmp; | |
170 | struct spdk_nvmf_subsystem_poll_group *sgroup; | |
171 | uint32_t sid, nsid; | |
7c673cae | 172 | |
11fdf7f2 TL |
173 | TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) { |
174 | TAILQ_REMOVE(&group->tgroups, tgroup, link); | |
175 | spdk_nvmf_transport_poll_group_destroy(tgroup); | |
176 | } | |
177 | ||
178 | for (sid = 0; sid < group->num_sgroups; sid++) { | |
179 | sgroup = &group->sgroups[sid]; | |
180 | ||
181 | for (nsid = 0; nsid < sgroup->num_channels; nsid++) { | |
182 | if (sgroup->channels[nsid]) { | |
183 | spdk_put_io_channel(sgroup->channels[nsid]); | |
184 | sgroup->channels[nsid] = NULL; | |
185 | } | |
186 | } | |
7c673cae | 187 | |
11fdf7f2 | 188 | free(sgroup->channels); |
7c673cae FG |
189 | } |
190 | ||
11fdf7f2 TL |
191 | free(group->sgroups); |
192 | } | |
7c673cae | 193 | |
11fdf7f2 TL |
194 | static void |
195 | _nvmf_tgt_disconnect_next_qpair(void *ctx) | |
196 | { | |
197 | struct spdk_nvmf_qpair *qpair; | |
198 | struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; | |
199 | struct spdk_nvmf_poll_group *group = qpair_ctx->group; | |
200 | struct spdk_io_channel *ch; | |
201 | int rc = 0; | |
202 | ||
203 | qpair = TAILQ_FIRST(&group->qpairs); | |
204 | ||
205 | if (qpair) { | |
206 | rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_tgt_disconnect_next_qpair, ctx); | |
207 | } | |
208 | ||
209 | if (!qpair || rc != 0) { | |
210 | /* When the refcount from the channels reaches 0, spdk_nvmf_tgt_destroy_poll_group will be called. */ | |
211 | ch = spdk_io_channel_from_ctx(group); | |
212 | spdk_put_io_channel(ch); | |
213 | free(qpair_ctx); | |
214 | } | |
7c673cae FG |
215 | } |
216 | ||
11fdf7f2 TL |
217 | static void |
218 | spdk_nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group) | |
7c673cae | 219 | { |
11fdf7f2 | 220 | struct nvmf_qpair_disconnect_many_ctx *ctx; |
7c673cae | 221 | |
11fdf7f2 TL |
222 | ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); |
223 | ||
224 | if (!ctx) { | |
225 | SPDK_ERRLOG("Failed to allocate memory for destroy poll group ctx\n"); | |
226 | return; | |
7c673cae FG |
227 | } |
228 | ||
11fdf7f2 TL |
229 | spdk_poller_unregister(&group->poller); |
230 | ||
231 | ctx->group = group; | |
232 | _nvmf_tgt_disconnect_next_qpair(ctx); | |
233 | } | |
234 | ||
235 | struct spdk_nvmf_tgt * | |
236 | spdk_nvmf_tgt_create(struct spdk_nvmf_tgt_opts *opts) | |
237 | { | |
238 | struct spdk_nvmf_tgt *tgt; | |
239 | ||
240 | tgt = calloc(1, sizeof(*tgt)); | |
241 | if (!tgt) { | |
7c673cae FG |
242 | return NULL; |
243 | } | |
244 | ||
11fdf7f2 TL |
245 | if (!opts) { |
246 | spdk_nvmf_tgt_opts_init(&tgt->opts); | |
247 | } else { | |
248 | tgt->opts = *opts; | |
249 | } | |
250 | ||
251 | tgt->discovery_genctr = 0; | |
252 | tgt->discovery_log_page = NULL; | |
253 | tgt->discovery_log_page_size = 0; | |
254 | TAILQ_INIT(&tgt->transports); | |
255 | ||
256 | tgt->subsystems = calloc(tgt->opts.max_subsystems, sizeof(struct spdk_nvmf_subsystem *)); | |
257 | if (!tgt->subsystems) { | |
258 | free(tgt); | |
7c673cae FG |
259 | return NULL; |
260 | } | |
261 | ||
11fdf7f2 TL |
262 | spdk_io_device_register(tgt, |
263 | spdk_nvmf_tgt_create_poll_group, | |
264 | spdk_nvmf_tgt_destroy_poll_group, | |
265 | sizeof(struct spdk_nvmf_poll_group), | |
266 | "nvmf_tgt"); | |
267 | ||
268 | return tgt; | |
269 | } | |
270 | ||
271 | static void | |
272 | spdk_nvmf_tgt_destroy_cb(void *io_device) | |
273 | { | |
274 | struct spdk_nvmf_tgt *tgt = io_device; | |
275 | struct spdk_nvmf_transport *transport, *transport_tmp; | |
276 | spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn; | |
277 | void *destroy_cb_arg; | |
278 | uint32_t i; | |
279 | ||
280 | if (tgt->discovery_log_page) { | |
281 | free(tgt->discovery_log_page); | |
282 | } | |
283 | ||
284 | if (tgt->subsystems) { | |
285 | for (i = 0; i < tgt->opts.max_subsystems; i++) { | |
286 | if (tgt->subsystems[i]) { | |
287 | spdk_nvmf_subsystem_destroy(tgt->subsystems[i]); | |
288 | } | |
289 | } | |
290 | free(tgt->subsystems); | |
291 | } | |
292 | ||
293 | TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, transport_tmp) { | |
294 | TAILQ_REMOVE(&tgt->transports, transport, link); | |
295 | spdk_nvmf_transport_destroy(transport); | |
296 | } | |
297 | ||
298 | destroy_cb_fn = tgt->destroy_cb_fn; | |
299 | destroy_cb_arg = tgt->destroy_cb_arg; | |
300 | ||
301 | free(tgt); | |
302 | ||
303 | if (destroy_cb_fn) { | |
304 | destroy_cb_fn(destroy_cb_arg, 0); | |
305 | } | |
306 | } | |
307 | ||
308 | void | |
309 | spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt, | |
310 | spdk_nvmf_tgt_destroy_done_fn cb_fn, | |
311 | void *cb_arg) | |
312 | { | |
313 | tgt->destroy_cb_fn = cb_fn; | |
314 | tgt->destroy_cb_arg = cb_arg; | |
315 | ||
316 | spdk_io_device_unregister(tgt, spdk_nvmf_tgt_destroy_cb); | |
317 | } | |
318 | ||
319 | static void | |
320 | spdk_nvmf_write_subsystem_config_json(struct spdk_json_write_ctx *w, | |
321 | struct spdk_nvmf_subsystem *subsystem) | |
322 | { | |
323 | struct spdk_nvmf_host *host; | |
324 | struct spdk_nvmf_listener *listener; | |
325 | const struct spdk_nvme_transport_id *trid; | |
326 | struct spdk_nvmf_ns *ns; | |
327 | struct spdk_nvmf_ns_opts ns_opts; | |
328 | uint32_t max_namespaces; | |
329 | char uuid_str[SPDK_UUID_STRING_LEN]; | |
330 | const char *trtype; | |
331 | const char *adrfam; | |
332 | ||
333 | if (spdk_nvmf_subsystem_get_type(subsystem) != SPDK_NVMF_SUBTYPE_NVME) { | |
334 | return; | |
335 | } | |
336 | ||
337 | /* { */ | |
338 | spdk_json_write_object_begin(w); | |
339 | spdk_json_write_named_string(w, "method", "nvmf_subsystem_create"); | |
340 | ||
341 | /* "params" : { */ | |
342 | spdk_json_write_named_object_begin(w, "params"); | |
343 | spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); | |
344 | spdk_json_write_named_bool(w, "allow_any_host", spdk_nvmf_subsystem_get_allow_any_host(subsystem)); | |
345 | spdk_json_write_named_string(w, "serial_number", spdk_nvmf_subsystem_get_sn(subsystem)); | |
346 | ||
347 | max_namespaces = spdk_nvmf_subsystem_get_max_namespaces(subsystem); | |
348 | if (max_namespaces != 0) { | |
349 | spdk_json_write_named_uint32(w, "max_namespaces", max_namespaces); | |
350 | } | |
351 | ||
352 | /* } "params" */ | |
353 | spdk_json_write_object_end(w); | |
354 | ||
355 | /* } */ | |
356 | spdk_json_write_object_end(w); | |
357 | ||
358 | for (listener = spdk_nvmf_subsystem_get_first_listener(subsystem); listener != NULL; | |
359 | listener = spdk_nvmf_subsystem_get_next_listener(subsystem, listener)) { | |
360 | trid = spdk_nvmf_listener_get_trid(listener); | |
361 | ||
362 | trtype = spdk_nvme_transport_id_trtype_str(trid->trtype); | |
363 | adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam); | |
364 | ||
365 | spdk_json_write_object_begin(w); | |
366 | spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_listener"); | |
367 | ||
368 | /* "params" : { */ | |
369 | spdk_json_write_named_object_begin(w, "params"); | |
370 | ||
371 | spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); | |
372 | ||
373 | /* "listen_address" : { */ | |
374 | spdk_json_write_named_object_begin(w, "listen_address"); | |
375 | ||
376 | spdk_json_write_named_string(w, "trtype", trtype); | |
377 | if (adrfam) { | |
378 | spdk_json_write_named_string(w, "adrfam", adrfam); | |
379 | } | |
380 | ||
381 | spdk_json_write_named_string(w, "traddr", trid->traddr); | |
382 | spdk_json_write_named_string(w, "trsvcid", trid->trsvcid); | |
383 | /* } "listen_address" */ | |
384 | spdk_json_write_object_end(w); | |
385 | ||
386 | /* } "params" */ | |
387 | spdk_json_write_object_end(w); | |
388 | ||
389 | /* } */ | |
390 | spdk_json_write_object_end(w); | |
391 | } | |
392 | ||
393 | for (host = spdk_nvmf_subsystem_get_first_host(subsystem); host != NULL; | |
394 | host = spdk_nvmf_subsystem_get_next_host(subsystem, host)) { | |
395 | ||
396 | spdk_json_write_object_begin(w); | |
397 | spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_host"); | |
398 | ||
399 | /* "params" : { */ | |
400 | spdk_json_write_named_object_begin(w, "params"); | |
401 | ||
402 | spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); | |
403 | spdk_json_write_named_string(w, "host", spdk_nvmf_host_get_nqn(host)); | |
404 | ||
405 | /* } "params" */ | |
406 | spdk_json_write_object_end(w); | |
407 | ||
408 | /* } */ | |
409 | spdk_json_write_object_end(w); | |
410 | } | |
411 | ||
412 | for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL; | |
413 | ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) { | |
414 | spdk_nvmf_ns_get_opts(ns, &ns_opts, sizeof(ns_opts)); | |
415 | ||
416 | spdk_json_write_object_begin(w); | |
417 | spdk_json_write_named_string(w, "method", "nvmf_subsystem_add_ns"); | |
418 | ||
419 | /* "params" : { */ | |
420 | spdk_json_write_named_object_begin(w, "params"); | |
421 | ||
422 | spdk_json_write_named_string(w, "nqn", spdk_nvmf_subsystem_get_nqn(subsystem)); | |
423 | ||
424 | /* "namespace" : { */ | |
425 | spdk_json_write_named_object_begin(w, "namespace"); | |
426 | ||
427 | spdk_json_write_named_uint32(w, "nsid", spdk_nvmf_ns_get_id(ns)); | |
428 | spdk_json_write_named_string(w, "bdev_name", spdk_bdev_get_name(spdk_nvmf_ns_get_bdev(ns))); | |
429 | ||
430 | if (!spdk_mem_all_zero(ns_opts.nguid, sizeof(ns_opts.nguid))) { | |
431 | SPDK_STATIC_ASSERT(sizeof(ns_opts.nguid) == sizeof(uint64_t) * 2, "size mismatch"); | |
432 | spdk_json_write_named_string_fmt(w, "nguid", "%016"PRIX64"%016"PRIX64, from_be64(&ns_opts.nguid[0]), | |
433 | from_be64(&ns_opts.nguid[8])); | |
434 | } | |
435 | ||
436 | if (!spdk_mem_all_zero(ns_opts.eui64, sizeof(ns_opts.eui64))) { | |
437 | SPDK_STATIC_ASSERT(sizeof(ns_opts.eui64) == sizeof(uint64_t), "size mismatch"); | |
438 | spdk_json_write_named_string_fmt(w, "eui64", "%016"PRIX64, from_be64(&ns_opts.eui64)); | |
439 | } | |
440 | ||
441 | if (!spdk_mem_all_zero(&ns_opts.uuid, sizeof(ns_opts.uuid))) { | |
442 | spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &ns_opts.uuid); | |
443 | spdk_json_write_named_string(w, "uuid", uuid_str); | |
444 | } | |
445 | ||
446 | /* "namespace" */ | |
447 | spdk_json_write_object_end(w); | |
448 | ||
449 | /* } "params" */ | |
450 | spdk_json_write_object_end(w); | |
451 | ||
452 | /* } */ | |
453 | spdk_json_write_object_end(w); | |
454 | } | |
455 | } | |
456 | ||
457 | void | |
458 | spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt) | |
459 | { | |
460 | struct spdk_nvmf_subsystem *subsystem; | |
461 | struct spdk_nvmf_transport *transport; | |
462 | ||
463 | spdk_json_write_object_begin(w); | |
464 | spdk_json_write_named_string(w, "method", "set_nvmf_target_options"); | |
465 | ||
466 | spdk_json_write_named_object_begin(w, "params"); | |
467 | spdk_json_write_named_uint32(w, "max_queue_depth", tgt->opts.max_queue_depth); | |
468 | spdk_json_write_named_uint32(w, "max_qpairs_per_ctrlr", tgt->opts.max_qpairs_per_ctrlr); | |
469 | spdk_json_write_named_uint32(w, "in_capsule_data_size", tgt->opts.in_capsule_data_size); | |
470 | spdk_json_write_named_uint32(w, "max_io_size", tgt->opts.max_io_size); | |
471 | spdk_json_write_named_uint32(w, "max_subsystems", tgt->opts.max_subsystems); | |
472 | spdk_json_write_named_uint32(w, "io_unit_size", tgt->opts.io_unit_size); | |
473 | spdk_json_write_object_end(w); | |
474 | ||
475 | spdk_json_write_object_end(w); | |
476 | ||
477 | /* write transports */ | |
478 | TAILQ_FOREACH(transport, &tgt->transports, link) { | |
479 | spdk_json_write_object_begin(w); | |
480 | spdk_json_write_named_string(w, "method", "nvmf_create_transport"); | |
481 | ||
482 | spdk_json_write_named_object_begin(w, "params"); | |
483 | spdk_json_write_named_string(w, "trtype", spdk_nvme_transport_id_trtype_str(transport->ops->type)); | |
484 | spdk_json_write_named_uint32(w, "max_queue_depth", transport->opts.max_queue_depth); | |
485 | spdk_json_write_named_uint32(w, "max_qpairs_per_ctrlr", transport->opts.max_qpairs_per_ctrlr); | |
486 | spdk_json_write_named_uint32(w, "in_capsule_data_size", transport->opts.in_capsule_data_size); | |
487 | spdk_json_write_named_uint32(w, "max_io_size", transport->opts.max_io_size); | |
488 | spdk_json_write_named_uint32(w, "io_unit_size", transport->opts.io_unit_size); | |
489 | spdk_json_write_named_uint32(w, "max_aq_depth", transport->opts.max_aq_depth); | |
490 | spdk_json_write_object_end(w); | |
491 | ||
492 | spdk_json_write_object_end(w); | |
493 | } | |
494 | ||
495 | subsystem = spdk_nvmf_subsystem_get_first(tgt); | |
496 | while (subsystem) { | |
497 | spdk_nvmf_write_subsystem_config_json(w, subsystem); | |
498 | subsystem = spdk_nvmf_subsystem_get_next(subsystem); | |
499 | } | |
500 | } | |
501 | ||
502 | void | |
503 | spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt, | |
504 | struct spdk_nvme_transport_id *trid, | |
505 | spdk_nvmf_tgt_listen_done_fn cb_fn, | |
506 | void *cb_arg) | |
507 | { | |
508 | struct spdk_nvmf_transport *transport; | |
509 | int rc; | |
510 | bool propagate = false; | |
511 | ||
512 | transport = spdk_nvmf_tgt_get_transport(tgt, trid->trtype); | |
513 | if (!transport) { | |
514 | struct spdk_nvmf_transport_opts opts; | |
515 | ||
516 | opts.max_queue_depth = tgt->opts.max_queue_depth; | |
517 | opts.max_qpairs_per_ctrlr = tgt->opts.max_qpairs_per_ctrlr; | |
518 | opts.in_capsule_data_size = tgt->opts.in_capsule_data_size; | |
519 | opts.max_io_size = tgt->opts.max_io_size; | |
520 | opts.io_unit_size = tgt->opts.io_unit_size; | |
521 | /* use max_queue depth since tgt. opts. doesn't have max_aq_depth */ | |
522 | opts.max_aq_depth = tgt->opts.max_queue_depth; | |
523 | ||
524 | transport = spdk_nvmf_transport_create(trid->trtype, &opts); | |
525 | if (!transport) { | |
526 | SPDK_ERRLOG("Transport initialization failed\n"); | |
527 | cb_fn(cb_arg, -EINVAL); | |
528 | return; | |
529 | } | |
530 | ||
531 | propagate = true; | |
532 | } | |
533 | ||
534 | rc = spdk_nvmf_transport_listen(transport, trid); | |
535 | if (rc < 0) { | |
536 | SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr); | |
537 | cb_fn(cb_arg, rc); | |
538 | return; | |
539 | } | |
540 | ||
541 | tgt->discovery_genctr++; | |
542 | ||
543 | if (propagate) { | |
544 | spdk_nvmf_tgt_add_transport(tgt, transport, cb_fn, cb_arg); | |
545 | } else { | |
546 | cb_fn(cb_arg, 0); | |
547 | } | |
548 | } | |
549 | ||
550 | struct spdk_nvmf_tgt_add_transport_ctx { | |
551 | struct spdk_nvmf_tgt *tgt; | |
552 | struct spdk_nvmf_transport *transport; | |
553 | spdk_nvmf_tgt_add_transport_done_fn cb_fn; | |
554 | void *cb_arg; | |
555 | }; | |
556 | ||
557 | static void | |
558 | _spdk_nvmf_tgt_add_transport_done(struct spdk_io_channel_iter *i, int status) | |
559 | { | |
560 | struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); | |
561 | ||
562 | ctx->cb_fn(ctx->cb_arg, status); | |
563 | ||
564 | free(ctx); | |
565 | } | |
566 | ||
567 | static void | |
568 | _spdk_nvmf_tgt_add_transport(struct spdk_io_channel_iter *i) | |
569 | { | |
570 | struct spdk_nvmf_tgt_add_transport_ctx *ctx = spdk_io_channel_iter_get_ctx(i); | |
571 | struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); | |
572 | struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch); | |
573 | int rc; | |
574 | ||
575 | rc = spdk_nvmf_poll_group_add_transport(group, ctx->transport); | |
576 | spdk_for_each_channel_continue(i, rc); | |
577 | } | |
578 | ||
579 | void spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt, | |
580 | struct spdk_nvmf_transport *transport, | |
581 | spdk_nvmf_tgt_add_transport_done_fn cb_fn, | |
582 | void *cb_arg) | |
583 | { | |
584 | struct spdk_nvmf_tgt_add_transport_ctx *ctx; | |
585 | ||
586 | if (spdk_nvmf_tgt_get_transport(tgt, transport->ops->type)) { | |
587 | cb_fn(cb_arg, -EEXIST); | |
588 | return; /* transport already created */ | |
589 | } | |
590 | ||
591 | transport->tgt = tgt; | |
592 | TAILQ_INSERT_TAIL(&tgt->transports, transport, link); | |
593 | ||
594 | ctx = calloc(1, sizeof(*ctx)); | |
595 | if (!ctx) { | |
596 | cb_fn(cb_arg, -ENOMEM); | |
597 | return; | |
598 | } | |
599 | ||
600 | ctx->tgt = tgt; | |
601 | ctx->transport = transport; | |
602 | ctx->cb_fn = cb_fn; | |
603 | ctx->cb_arg = cb_arg; | |
604 | ||
605 | spdk_for_each_channel(tgt, | |
606 | _spdk_nvmf_tgt_add_transport, | |
607 | ctx, | |
608 | _spdk_nvmf_tgt_add_transport_done); | |
609 | } | |
610 | ||
611 | struct spdk_nvmf_subsystem * | |
612 | spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn) | |
613 | { | |
614 | struct spdk_nvmf_subsystem *subsystem; | |
615 | uint32_t sid; | |
616 | ||
617 | if (!subnqn) { | |
7c673cae FG |
618 | return NULL; |
619 | } | |
620 | ||
11fdf7f2 TL |
621 | for (sid = 0; sid < tgt->opts.max_subsystems; sid++) { |
622 | subsystem = tgt->subsystems[sid]; | |
623 | if (subsystem == NULL) { | |
624 | continue; | |
625 | } | |
626 | ||
627 | if (strcmp(subnqn, subsystem->subnqn) == 0) { | |
628 | return subsystem; | |
629 | } | |
630 | } | |
631 | ||
632 | return NULL; | |
633 | } | |
634 | ||
635 | struct spdk_nvmf_transport * | |
636 | spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, enum spdk_nvme_transport_type type) | |
637 | { | |
638 | struct spdk_nvmf_transport *transport; | |
639 | ||
640 | TAILQ_FOREACH(transport, &tgt->transports, link) { | |
641 | if (transport->ops->type == type) { | |
642 | return transport; | |
643 | } | |
644 | } | |
645 | ||
646 | return NULL; | |
647 | } | |
648 | ||
649 | void | |
650 | spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt, new_qpair_fn cb_fn) | |
651 | { | |
652 | struct spdk_nvmf_transport *transport, *tmp; | |
653 | ||
654 | TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) { | |
655 | spdk_nvmf_transport_accept(transport, cb_fn); | |
656 | } | |
657 | } | |
658 | ||
659 | struct spdk_nvmf_poll_group * | |
660 | spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt) | |
661 | { | |
662 | struct spdk_io_channel *ch; | |
663 | ||
664 | ch = spdk_get_io_channel(tgt); | |
665 | if (!ch) { | |
666 | SPDK_ERRLOG("Unable to get I/O channel for target\n"); | |
7c673cae FG |
667 | return NULL; |
668 | } | |
669 | ||
11fdf7f2 TL |
670 | return spdk_io_channel_get_ctx(ch); |
671 | } | |
672 | ||
673 | void | |
674 | spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group) | |
675 | { | |
676 | /* This function will put the io_channel associated with this poll group */ | |
677 | spdk_nvmf_tgt_destroy_poll_group_qpairs(group); | |
678 | } | |
679 | ||
680 | int | |
681 | spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group, | |
682 | struct spdk_nvmf_qpair *qpair) | |
683 | { | |
684 | int rc = -1; | |
685 | struct spdk_nvmf_transport_poll_group *tgroup; | |
686 | ||
687 | TAILQ_INIT(&qpair->outstanding); | |
688 | qpair->group = group; | |
689 | spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVATING); | |
690 | ||
691 | TAILQ_INSERT_TAIL(&group->qpairs, qpair, link); | |
692 | ||
693 | TAILQ_FOREACH(tgroup, &group->tgroups, link) { | |
694 | if (tgroup->transport == qpair->transport) { | |
695 | rc = spdk_nvmf_transport_poll_group_add(tgroup, qpair); | |
696 | break; | |
697 | } | |
698 | } | |
699 | ||
700 | if (rc == 0) { | |
701 | spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_ACTIVE); | |
702 | } else { | |
703 | spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_INACTIVE); | |
704 | } | |
705 | ||
706 | return rc; | |
707 | } | |
708 | ||
709 | static | |
710 | void _nvmf_ctrlr_destruct(void *ctx) | |
711 | { | |
712 | struct spdk_nvmf_ctrlr *ctrlr = ctx; | |
713 | ||
714 | spdk_nvmf_ctrlr_destruct(ctrlr); | |
715 | } | |
716 | ||
717 | static void | |
718 | _spdk_nvmf_ctrlr_free_from_qpair(void *ctx) | |
719 | { | |
720 | struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; | |
721 | struct spdk_nvmf_ctrlr *ctrlr = qpair_ctx->ctrlr; | |
722 | uint32_t count; | |
723 | ||
724 | spdk_bit_array_clear(ctrlr->qpair_mask, qpair_ctx->qid); | |
725 | count = spdk_bit_array_count_set(ctrlr->qpair_mask); | |
726 | if (count == 0) { | |
727 | spdk_bit_array_free(&ctrlr->qpair_mask); | |
728 | ||
729 | spdk_thread_send_msg(ctrlr->subsys->thread, _nvmf_ctrlr_destruct, ctrlr); | |
730 | } | |
731 | ||
732 | if (qpair_ctx->cb_fn) { | |
733 | spdk_thread_send_msg(qpair_ctx->thread, qpair_ctx->cb_fn, qpair_ctx->ctx); | |
734 | } | |
735 | free(qpair_ctx); | |
736 | } | |
737 | ||
738 | static void | |
739 | _spdk_nvmf_qpair_destroy(void *ctx, int status) | |
740 | { | |
741 | struct nvmf_qpair_disconnect_ctx *qpair_ctx = ctx; | |
742 | struct spdk_nvmf_qpair *qpair = qpair_ctx->qpair; | |
743 | struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr; | |
744 | ||
745 | assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING); | |
746 | spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_INACTIVE); | |
747 | qpair_ctx->qid = qpair->qid; | |
748 | ||
749 | TAILQ_REMOVE(&qpair->group->qpairs, qpair, link); | |
750 | qpair->group = NULL; | |
751 | ||
752 | spdk_nvmf_transport_qpair_fini(qpair); | |
753 | ||
754 | if (!ctrlr || !ctrlr->thread) { | |
755 | if (qpair_ctx->cb_fn) { | |
756 | spdk_thread_send_msg(qpair_ctx->thread, qpair_ctx->cb_fn, qpair_ctx->ctx); | |
757 | } | |
758 | free(qpair_ctx); | |
759 | return; | |
760 | } | |
761 | ||
762 | qpair_ctx->ctrlr = ctrlr; | |
763 | spdk_thread_send_msg(ctrlr->thread, _spdk_nvmf_ctrlr_free_from_qpair, qpair_ctx); | |
764 | ||
765 | } | |
766 | ||
767 | int | |
768 | spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) | |
769 | { | |
770 | struct nvmf_qpair_disconnect_ctx *qpair_ctx; | |
771 | ||
772 | /* If we get a qpair in the uninitialized state, we can just destroy it immediately */ | |
773 | if (qpair->state == SPDK_NVMF_QPAIR_UNINITIALIZED) { | |
774 | spdk_nvmf_transport_qpair_fini(qpair); | |
775 | if (cb_fn) { | |
776 | cb_fn(ctx); | |
777 | } | |
778 | return 0; | |
779 | } | |
780 | ||
781 | /* The queue pair must be disconnected from the thread that owns it */ | |
782 | assert(qpair->group->thread == spdk_get_thread()); | |
783 | ||
784 | if (qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING || | |
785 | qpair->state == SPDK_NVMF_QPAIR_INACTIVE) { | |
786 | /* This can occur if the connection is killed by the target, | |
787 | * which results in a notification that the connection | |
788 | * died. Send a message to defer the processing of this | |
789 | * callback. This allows the stack to unwind in the case | |
790 | * where a bunch of connections are disconnected in | |
791 | * a loop. */ | |
792 | if (cb_fn) { | |
793 | spdk_thread_send_msg(qpair->group->thread, cb_fn, ctx); | |
794 | } | |
795 | return 0; | |
796 | } | |
797 | ||
798 | assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE); | |
799 | spdk_nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING); | |
800 | ||
801 | qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx)); | |
802 | if (!qpair_ctx) { | |
803 | SPDK_ERRLOG("Unable to allocate context for nvmf_qpair_disconnect\n"); | |
804 | return -ENOMEM; | |
805 | } | |
806 | ||
807 | qpair_ctx->qpair = qpair; | |
808 | qpair_ctx->cb_fn = cb_fn; | |
809 | qpair_ctx->thread = qpair->group->thread; | |
810 | qpair_ctx->ctx = ctx; | |
811 | ||
812 | /* Check for outstanding I/O */ | |
813 | if (!TAILQ_EMPTY(&qpair->outstanding)) { | |
814 | qpair->state_cb = _spdk_nvmf_qpair_destroy; | |
815 | qpair->state_cb_arg = qpair_ctx; | |
816 | spdk_nvmf_qpair_free_aer(qpair); | |
817 | return 0; | |
818 | } | |
819 | ||
820 | _spdk_nvmf_qpair_destroy(qpair_ctx, 0); | |
821 | ||
822 | return 0; | |
823 | } | |
824 | ||
825 | int | |
826 | spdk_nvmf_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, | |
827 | struct spdk_nvme_transport_id *trid) | |
828 | { | |
829 | return spdk_nvmf_transport_qpair_get_peer_trid(qpair, trid); | |
830 | } | |
831 | ||
832 | int | |
833 | spdk_nvmf_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, | |
834 | struct spdk_nvme_transport_id *trid) | |
835 | { | |
836 | return spdk_nvmf_transport_qpair_get_local_trid(qpair, trid); | |
837 | } | |
838 | ||
839 | int | |
840 | spdk_nvmf_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, | |
841 | struct spdk_nvme_transport_id *trid) | |
842 | { | |
843 | return spdk_nvmf_transport_qpair_get_listen_trid(qpair, trid); | |
844 | } | |
845 | ||
846 | int | |
847 | spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group, | |
848 | struct spdk_nvmf_transport *transport) | |
849 | { | |
850 | struct spdk_nvmf_transport_poll_group *tgroup; | |
851 | ||
852 | TAILQ_FOREACH(tgroup, &group->tgroups, link) { | |
853 | if (tgroup->transport == transport) { | |
854 | /* Transport already in the poll group */ | |
855 | return 0; | |
856 | } | |
857 | } | |
858 | ||
859 | tgroup = spdk_nvmf_transport_poll_group_create(transport); | |
860 | if (!tgroup) { | |
861 | SPDK_ERRLOG("Unable to create poll group for transport\n"); | |
862 | return -1; | |
863 | } | |
864 | ||
865 | TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link); | |
866 | ||
867 | return 0; | |
868 | } | |
869 | ||
870 | static int | |
871 | poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, | |
872 | struct spdk_nvmf_subsystem *subsystem) | |
873 | { | |
874 | struct spdk_nvmf_subsystem_poll_group *sgroup; | |
875 | uint32_t new_num_channels, old_num_channels; | |
876 | uint32_t i; | |
877 | struct spdk_nvmf_ns *ns; | |
878 | ||
879 | /* Make sure our poll group has memory for this subsystem allocated */ | |
880 | if (subsystem->id >= group->num_sgroups) { | |
881 | return -ENOMEM; | |
882 | } | |
883 | ||
884 | sgroup = &group->sgroups[subsystem->id]; | |
885 | ||
886 | /* Make sure the array of channels is the correct size */ | |
887 | new_num_channels = subsystem->max_nsid; | |
888 | old_num_channels = sgroup->num_channels; | |
889 | ||
890 | if (old_num_channels == 0) { | |
891 | if (new_num_channels > 0) { | |
892 | /* First allocation */ | |
893 | sgroup->channels = calloc(new_num_channels, sizeof(sgroup->channels[0])); | |
894 | if (!sgroup->channels) { | |
895 | return -ENOMEM; | |
896 | } | |
897 | } | |
898 | } else if (new_num_channels > old_num_channels) { | |
899 | void *buf; | |
900 | ||
901 | /* Make the array larger */ | |
902 | buf = realloc(sgroup->channels, new_num_channels * sizeof(sgroup->channels[0])); | |
903 | if (!buf) { | |
904 | return -ENOMEM; | |
905 | } | |
906 | ||
907 | sgroup->channels = buf; | |
908 | ||
909 | /* Null out the new channels slots */ | |
910 | for (i = old_num_channels; i < new_num_channels; i++) { | |
911 | sgroup->channels[i] = NULL; | |
912 | } | |
913 | } else if (new_num_channels < old_num_channels) { | |
914 | void *buf; | |
915 | ||
916 | /* Free the extra I/O channels */ | |
917 | for (i = new_num_channels; i < old_num_channels; i++) { | |
918 | if (sgroup->channels[i]) { | |
919 | spdk_put_io_channel(sgroup->channels[i]); | |
920 | sgroup->channels[i] = NULL; | |
921 | } | |
922 | } | |
923 | ||
924 | /* Make the array smaller */ | |
925 | if (new_num_channels > 0) { | |
926 | buf = realloc(sgroup->channels, new_num_channels * sizeof(sgroup->channels[0])); | |
927 | if (!buf) { | |
928 | return -ENOMEM; | |
929 | } | |
930 | sgroup->channels = buf; | |
931 | } else { | |
932 | free(sgroup->channels); | |
933 | sgroup->channels = NULL; | |
934 | } | |
935 | } | |
936 | ||
937 | sgroup->num_channels = new_num_channels; | |
938 | ||
939 | /* Detect bdevs that were added or removed */ | |
940 | for (i = 0; i < sgroup->num_channels; i++) { | |
941 | ns = subsystem->ns[i]; | |
942 | if (ns == NULL && sgroup->channels[i] == NULL) { | |
943 | /* Both NULL. Leave empty */ | |
944 | } else if (ns == NULL && sgroup->channels[i] != NULL) { | |
945 | /* There was a channel here, but the namespace is gone. */ | |
946 | spdk_put_io_channel(sgroup->channels[i]); | |
947 | sgroup->channels[i] = NULL; | |
948 | } else if (ns != NULL && sgroup->channels[i] == NULL) { | |
949 | /* A namespace appeared but there is no channel yet */ | |
950 | sgroup->channels[i] = spdk_bdev_get_io_channel(ns->desc); | |
951 | if (sgroup->channels[i] == NULL) { | |
952 | SPDK_ERRLOG("Could not allocate I/O channel.\n"); | |
953 | return -ENOMEM; | |
954 | } | |
955 | } else { | |
956 | /* A namespace was present before and didn't change. */ | |
957 | } | |
958 | } | |
959 | ||
960 | return 0; | |
961 | } | |
962 | ||
963 | int | |
964 | spdk_nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, | |
965 | struct spdk_nvmf_subsystem *subsystem) | |
966 | { | |
967 | return poll_group_update_subsystem(group, subsystem); | |
968 | } | |
969 | ||
970 | int | |
971 | spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, | |
972 | struct spdk_nvmf_subsystem *subsystem, | |
973 | spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) | |
974 | { | |
975 | int rc = 0; | |
976 | struct spdk_nvmf_subsystem_poll_group *sgroup = &group->sgroups[subsystem->id]; | |
977 | ||
978 | TAILQ_INIT(&sgroup->queued); | |
979 | ||
980 | rc = poll_group_update_subsystem(group, subsystem); | |
981 | if (rc) { | |
982 | spdk_nvmf_poll_group_remove_subsystem(group, subsystem, NULL, NULL); | |
983 | goto fini; | |
984 | } | |
985 | ||
986 | sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; | |
987 | fini: | |
988 | if (cb_fn) { | |
989 | cb_fn(cb_arg, rc); | |
990 | } | |
991 | ||
992 | return rc; | |
993 | } | |
994 | ||
995 | static void | |
996 | _nvmf_poll_group_remove_subsystem_cb(void *ctx, int status) | |
997 | { | |
998 | struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; | |
999 | struct spdk_nvmf_subsystem *subsystem; | |
1000 | struct spdk_nvmf_poll_group *group; | |
1001 | struct spdk_nvmf_subsystem_poll_group *sgroup; | |
1002 | spdk_nvmf_poll_group_mod_done cpl_fn = NULL; | |
1003 | void *cpl_ctx = NULL; | |
1004 | uint32_t nsid; | |
1005 | ||
1006 | group = qpair_ctx->group; | |
1007 | subsystem = qpair_ctx->subsystem; | |
1008 | cpl_fn = qpair_ctx->cpl_fn; | |
1009 | cpl_ctx = qpair_ctx->cpl_ctx; | |
1010 | sgroup = &group->sgroups[subsystem->id]; | |
1011 | ||
1012 | if (status) { | |
1013 | goto fini; | |
1014 | } | |
1015 | ||
1016 | for (nsid = 0; nsid < sgroup->num_channels; nsid++) { | |
1017 | if (sgroup->channels[nsid]) { | |
1018 | spdk_put_io_channel(sgroup->channels[nsid]); | |
1019 | sgroup->channels[nsid] = NULL; | |
1020 | } | |
1021 | } | |
1022 | ||
1023 | sgroup->num_channels = 0; | |
1024 | free(sgroup->channels); | |
1025 | sgroup->channels = NULL; | |
1026 | fini: | |
1027 | free(qpair_ctx); | |
1028 | if (cpl_fn) { | |
1029 | cpl_fn(cpl_ctx, status); | |
1030 | } | |
1031 | } | |
1032 | ||
1033 | static void | |
1034 | _nvmf_subsystem_disconnect_next_qpair(void *ctx) | |
1035 | { | |
1036 | struct spdk_nvmf_qpair *qpair; | |
1037 | struct nvmf_qpair_disconnect_many_ctx *qpair_ctx = ctx; | |
1038 | struct spdk_nvmf_subsystem *subsystem; | |
1039 | struct spdk_nvmf_poll_group *group; | |
1040 | int rc = 0; | |
1041 | ||
1042 | group = qpair_ctx->group; | |
1043 | subsystem = qpair_ctx->subsystem; | |
1044 | ||
1045 | TAILQ_FOREACH(qpair, &group->qpairs, link) { | |
1046 | if (qpair->ctrlr->subsys == subsystem) { | |
1047 | break; | |
1048 | } | |
1049 | } | |
1050 | ||
1051 | if (qpair) { | |
1052 | rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_subsystem_disconnect_next_qpair, qpair_ctx); | |
1053 | } | |
1054 | ||
1055 | if (!qpair || rc != 0) { | |
1056 | _nvmf_poll_group_remove_subsystem_cb(ctx, rc); | |
1057 | } | |
1058 | return; | |
7c673cae FG |
1059 | } |
1060 | ||
1061 | void | |
11fdf7f2 TL |
1062 | spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, |
1063 | struct spdk_nvmf_subsystem *subsystem, | |
1064 | spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) | |
7c673cae | 1065 | { |
11fdf7f2 TL |
1066 | struct spdk_nvmf_qpair *qpair; |
1067 | struct spdk_nvmf_subsystem_poll_group *sgroup; | |
1068 | struct nvmf_qpair_disconnect_many_ctx *ctx; | |
1069 | int rc = 0; | |
7c673cae | 1070 | |
11fdf7f2 | 1071 | ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_many_ctx)); |
7c673cae | 1072 | |
11fdf7f2 TL |
1073 | if (!ctx) { |
1074 | SPDK_ERRLOG("Unable to allocate memory for context to remove poll subsystem\n"); | |
1075 | goto fini; | |
1076 | } | |
1077 | ||
1078 | ctx->group = group; | |
1079 | ctx->subsystem = subsystem; | |
1080 | ctx->cpl_fn = cb_fn; | |
1081 | ctx->cpl_ctx = cb_arg; | |
1082 | ||
1083 | sgroup = &group->sgroups[subsystem->id]; | |
1084 | sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE; | |
1085 | ||
1086 | TAILQ_FOREACH(qpair, &group->qpairs, link) { | |
1087 | if (qpair->ctrlr->subsys == subsystem) { | |
1088 | break; | |
1089 | } | |
1090 | } | |
1091 | ||
1092 | if (qpair) { | |
1093 | rc = spdk_nvmf_qpair_disconnect(qpair, _nvmf_subsystem_disconnect_next_qpair, ctx); | |
1094 | } else { | |
1095 | /* call the callback immediately. It will handle any channel iteration */ | |
1096 | _nvmf_poll_group_remove_subsystem_cb(ctx, 0); | |
1097 | } | |
1098 | ||
1099 | if (rc != 0) { | |
1100 | free(ctx); | |
1101 | goto fini; | |
1102 | } | |
1103 | ||
1104 | return; | |
1105 | fini: | |
1106 | if (cb_fn) { | |
1107 | cb_fn(cb_arg, rc); | |
1108 | } | |
7c673cae FG |
1109 | } |
1110 | ||
1111 | void | |
11fdf7f2 TL |
1112 | spdk_nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, |
1113 | struct spdk_nvmf_subsystem *subsystem, | |
1114 | spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) | |
1115 | { | |
1116 | struct spdk_nvmf_subsystem_poll_group *sgroup; | |
1117 | int rc = 0; | |
1118 | ||
1119 | if (subsystem->id >= group->num_sgroups) { | |
1120 | rc = -1; | |
1121 | goto fini; | |
1122 | } | |
1123 | ||
1124 | sgroup = &group->sgroups[subsystem->id]; | |
1125 | if (sgroup == NULL) { | |
1126 | rc = -1; | |
1127 | goto fini; | |
1128 | } | |
1129 | ||
1130 | assert(sgroup->state == SPDK_NVMF_SUBSYSTEM_ACTIVE); | |
1131 | /* TODO: This currently does not quiesce I/O */ | |
1132 | sgroup->state = SPDK_NVMF_SUBSYSTEM_PAUSED; | |
1133 | fini: | |
1134 | if (cb_fn) { | |
1135 | cb_fn(cb_arg, rc); | |
1136 | } | |
1137 | } | |
1138 | ||
1139 | void | |
1140 | spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, | |
1141 | struct spdk_nvmf_subsystem *subsystem, | |
1142 | spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) | |
1143 | { | |
1144 | struct spdk_nvmf_request *req, *tmp; | |
1145 | struct spdk_nvmf_subsystem_poll_group *sgroup; | |
1146 | int rc = 0; | |
1147 | ||
1148 | if (subsystem->id >= group->num_sgroups) { | |
1149 | rc = -1; | |
1150 | goto fini; | |
1151 | } | |
1152 | ||
1153 | sgroup = &group->sgroups[subsystem->id]; | |
1154 | ||
1155 | assert(sgroup->state == SPDK_NVMF_SUBSYSTEM_PAUSED); | |
1156 | ||
1157 | rc = poll_group_update_subsystem(group, subsystem); | |
1158 | if (rc) { | |
1159 | goto fini; | |
1160 | } | |
1161 | ||
1162 | sgroup->state = SPDK_NVMF_SUBSYSTEM_ACTIVE; | |
1163 | ||
1164 | /* Release all queued requests */ | |
1165 | TAILQ_FOREACH_SAFE(req, &sgroup->queued, link, tmp) { | |
1166 | TAILQ_REMOVE(&sgroup->queued, req, link); | |
1167 | spdk_nvmf_request_exec(req); | |
1168 | } | |
1169 | fini: | |
1170 | if (cb_fn) { | |
1171 | cb_fn(cb_arg, rc); | |
1172 | } | |
7c673cae | 1173 | } |