4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk_cunit.h"
36 #include "nvme/nvme_poll_group.c"
37 #include "common/lib/test_env.c"
39 struct spdk_nvme_transport
{
41 TAILQ_ENTRY(spdk_nvme_transport
) link
;
44 struct spdk_nvme_transport t1
= {
48 struct spdk_nvme_transport t2
= {
52 struct spdk_nvme_transport t3
= {
56 struct spdk_nvme_transport t4
= {
60 int64_t g_process_completions_return_value
= 0;
61 int g_destroy_return_value
= 0;
63 TAILQ_HEAD(nvme_transport_list
, spdk_nvme_transport
) g_spdk_nvme_transports
=
64 TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports
);
67 unit_test_disconnected_qpair_cb(struct spdk_nvme_qpair
*qpair
, void *poll_group_ctx
)
72 const struct spdk_nvme_transport
*
73 nvme_get_first_transport(void)
75 return TAILQ_FIRST(&g_spdk_nvme_transports
);
78 const struct spdk_nvme_transport
*
79 nvme_get_next_transport(const struct spdk_nvme_transport
*transport
)
81 return TAILQ_NEXT(transport
, link
);
85 nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair
*qpair
)
87 struct spdk_nvme_transport_poll_group
*tgroup
;
88 struct spdk_nvme_qpair
*iter_qp
, *tmp_iter_qp
;
90 tgroup
= qpair
->poll_group
;
92 STAILQ_FOREACH_SAFE(iter_qp
, &tgroup
->connected_qpairs
, poll_group_stailq
, tmp_iter_qp
) {
93 if (qpair
== iter_qp
) {
94 STAILQ_REMOVE(&tgroup
->connected_qpairs
, qpair
, spdk_nvme_qpair
, poll_group_stailq
);
95 STAILQ_INSERT_TAIL(&tgroup
->disconnected_qpairs
, qpair
, poll_group_stailq
);
100 STAILQ_FOREACH(iter_qp
, &tgroup
->disconnected_qpairs
, poll_group_stailq
) {
101 if (qpair
== iter_qp
) {
110 nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair
*qpair
)
112 struct spdk_nvme_transport_poll_group
*tgroup
;
113 struct spdk_nvme_qpair
*iter_qp
, *tmp_iter_qp
;
115 tgroup
= qpair
->poll_group
;
117 STAILQ_FOREACH_SAFE(iter_qp
, &tgroup
->disconnected_qpairs
, poll_group_stailq
, tmp_iter_qp
) {
118 if (qpair
== iter_qp
) {
119 STAILQ_REMOVE(&tgroup
->disconnected_qpairs
, qpair
, spdk_nvme_qpair
, poll_group_stailq
);
120 STAILQ_INSERT_TAIL(&tgroup
->connected_qpairs
, qpair
, poll_group_stailq
);
125 STAILQ_FOREACH(iter_qp
, &tgroup
->connected_qpairs
, poll_group_stailq
) {
126 if (qpair
== iter_qp
) {
134 struct spdk_nvme_transport_poll_group
*
135 nvme_transport_poll_group_create(const struct spdk_nvme_transport
*transport
)
137 struct spdk_nvme_transport_poll_group
*group
= NULL
;
139 /* TODO: separate this transport function table from the transport specific one. */
140 group
= calloc(1, sizeof(*group
));
142 group
->transport
= transport
;
143 STAILQ_INIT(&group
->connected_qpairs
);
144 STAILQ_INIT(&group
->disconnected_qpairs
);
151 nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group
*tgroup
)
153 return g_destroy_return_value
;
157 nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group
*tgroup
,
158 struct spdk_nvme_qpair
*qpair
)
160 STAILQ_INSERT_TAIL(&tgroup
->connected_qpairs
, qpair
, poll_group_stailq
);
161 qpair
->poll_group
= tgroup
;
167 nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group
*tgroup
,
168 struct spdk_nvme_qpair
*qpair
)
170 struct spdk_nvme_qpair
*iter_qp
, *tmp_iter_qp
;
172 STAILQ_FOREACH_SAFE(iter_qp
, &tgroup
->connected_qpairs
, poll_group_stailq
, tmp_iter_qp
) {
173 if (qpair
== iter_qp
) {
174 STAILQ_REMOVE(&tgroup
->connected_qpairs
, qpair
, spdk_nvme_qpair
, poll_group_stailq
);
179 STAILQ_FOREACH_SAFE(iter_qp
, &tgroup
->disconnected_qpairs
, poll_group_stailq
, tmp_iter_qp
) {
180 if (qpair
== iter_qp
) {
181 STAILQ_REMOVE(&tgroup
->disconnected_qpairs
, qpair
, spdk_nvme_qpair
, poll_group_stailq
);
190 nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group
*group
,
191 uint32_t completions_per_qpair
, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb
)
193 return g_process_completions_return_value
;
197 test_spdk_nvme_poll_group_create(void)
199 struct spdk_nvme_poll_group
*group
;
201 /* basic case - create a poll group with no internal transport poll groups. */
202 group
= spdk_nvme_poll_group_create(NULL
);
204 SPDK_CU_ASSERT_FATAL(group
!= NULL
);
205 CU_ASSERT(STAILQ_EMPTY(&group
->tgroups
));
206 SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group
) == 0);
208 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t1
, link
);
209 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t2
, link
);
210 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t3
, link
);
212 /* advanced case - create a poll group with three internal poll groups. */
213 group
= spdk_nvme_poll_group_create(NULL
);
214 CU_ASSERT(STAILQ_EMPTY(&group
->tgroups
));
215 SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group
) == 0);
217 /* Failing case - failed to allocate a poll group. */
218 MOCK_SET(calloc
, NULL
);
219 group
= spdk_nvme_poll_group_create(NULL
);
220 CU_ASSERT(group
== NULL
);
223 TAILQ_REMOVE(&g_spdk_nvme_transports
, &t1
, link
);
224 TAILQ_REMOVE(&g_spdk_nvme_transports
, &t2
, link
);
225 TAILQ_REMOVE(&g_spdk_nvme_transports
, &t3
, link
);
229 test_spdk_nvme_poll_group_add_remove(void)
231 struct spdk_nvme_poll_group
*group
;
232 struct spdk_nvme_transport_poll_group
*tgroup
= NULL
, *tmp_tgroup
, *tgroup_1
= NULL
,
235 struct spdk_nvme_qpair
*qpair
;
236 struct spdk_nvme_qpair qpair1_1
= {0};
237 struct spdk_nvme_qpair qpair1_2
= {0};
238 struct spdk_nvme_qpair qpair2_1
= {0};
239 struct spdk_nvme_qpair qpair2_2
= {0};
240 struct spdk_nvme_qpair qpair4_1
= {0};
241 struct spdk_nvme_qpair qpair4_2
= {0};
244 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t1
, link
);
245 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t2
, link
);
246 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t3
, link
);
248 group
= spdk_nvme_poll_group_create(NULL
);
249 SPDK_CU_ASSERT_FATAL(group
!= NULL
);
250 CU_ASSERT(STAILQ_EMPTY(&group
->tgroups
));
252 /* Add qpairs to a single transport. */
253 qpair1_1
.transport
= &t1
;
254 qpair1_1
.state
= NVME_QPAIR_DISCONNECTED
;
255 qpair1_2
.transport
= &t1
;
256 qpair1_2
.state
= NVME_QPAIR_ENABLED
;
257 CU_ASSERT(spdk_nvme_poll_group_add(group
, &qpair1_1
) == 0);
258 CU_ASSERT(spdk_nvme_poll_group_add(group
, &qpair1_2
) == -EINVAL
);
259 STAILQ_FOREACH(tmp_tgroup
, &group
->tgroups
, link
) {
260 if (tmp_tgroup
->transport
== &t1
) {
263 CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup
->connected_qpairs
));
268 SPDK_CU_ASSERT_FATAL(tgroup
!= NULL
);
269 qpair
= STAILQ_FIRST(&tgroup
->connected_qpairs
);
270 SPDK_CU_ASSERT_FATAL(qpair
== &qpair1_1
);
271 qpair
= STAILQ_NEXT(qpair
, poll_group_stailq
);
272 CU_ASSERT(qpair
== NULL
);
274 /* Add qpairs to a second transport. */
275 qpair2_1
.transport
= &t2
;
276 qpair2_2
.transport
= &t2
;
277 CU_ASSERT(spdk_nvme_poll_group_add(group
, &qpair2_1
) == 0);
278 CU_ASSERT(spdk_nvme_poll_group_add(group
, &qpair2_2
) == 0);
279 qpair4_1
.transport
= &t4
;
280 qpair4_2
.transport
= &t4
;
281 /* Add qpairs for a transport that doesn't exist. */
282 CU_ASSERT(spdk_nvme_poll_group_add(group
, &qpair4_1
) == -ENODEV
);
283 CU_ASSERT(spdk_nvme_poll_group_add(group
, &qpair4_2
) == -ENODEV
);
285 STAILQ_FOREACH(tmp_tgroup
, &group
->tgroups
, link
) {
286 if (tmp_tgroup
->transport
== &t1
) {
287 tgroup_1
= tmp_tgroup
;
288 } else if (tmp_tgroup
->transport
== &t2
) {
289 tgroup_2
= tmp_tgroup
;
291 CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup
->connected_qpairs
));
296 SPDK_CU_ASSERT_FATAL(tgroup_1
!= NULL
);
297 qpair
= STAILQ_FIRST(&tgroup_1
->connected_qpairs
);
298 SPDK_CU_ASSERT_FATAL(qpair
== &qpair1_1
);
299 qpair
= STAILQ_NEXT(qpair
, poll_group_stailq
);
300 CU_ASSERT(qpair
== NULL
);
301 SPDK_CU_ASSERT_FATAL(tgroup_2
!= NULL
);
302 qpair
= STAILQ_FIRST(&tgroup_2
->connected_qpairs
);
303 SPDK_CU_ASSERT_FATAL(qpair
== &qpair2_1
);
304 qpair
= STAILQ_NEXT(qpair
, poll_group_stailq
);
305 SPDK_CU_ASSERT_FATAL(qpair
== &qpair2_2
);
306 qpair
= STAILQ_NEXT(qpair
, poll_group_stailq
);
307 CU_ASSERT(qpair
== NULL
);
309 /* Try removing a qpair that belongs to a transport not in our poll group. */
310 CU_ASSERT(spdk_nvme_poll_group_remove(group
, &qpair4_1
) == -ENODEV
);
312 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t4
, link
);
313 CU_ASSERT(spdk_nvme_poll_group_add(group
, &qpair4_1
) == 0);
314 CU_ASSERT(spdk_nvme_poll_group_add(group
, &qpair4_2
) == 0);
315 STAILQ_FOREACH(tmp_tgroup
, &group
->tgroups
, link
) {
316 if (tmp_tgroup
->transport
== &t1
) {
317 tgroup_1
= tmp_tgroup
;
318 } else if (tmp_tgroup
->transport
== &t2
) {
319 tgroup_2
= tmp_tgroup
;
320 } else if (tmp_tgroup
->transport
== &t4
) {
321 tgroup_4
= tmp_tgroup
;
323 CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup
->connected_qpairs
));
326 SPDK_CU_ASSERT_FATAL(tgroup_1
!= NULL
);
327 qpair
= STAILQ_FIRST(&tgroup_1
->connected_qpairs
);
328 SPDK_CU_ASSERT_FATAL(qpair
== &qpair1_1
);
329 qpair
= STAILQ_NEXT(qpair
, poll_group_stailq
);
330 CU_ASSERT(qpair
== NULL
);
331 SPDK_CU_ASSERT_FATAL(tgroup_2
!= NULL
);
332 qpair
= STAILQ_FIRST(&tgroup_2
->connected_qpairs
);
333 SPDK_CU_ASSERT_FATAL(qpair
== &qpair2_1
);
334 qpair
= STAILQ_NEXT(qpair
, poll_group_stailq
);
335 SPDK_CU_ASSERT_FATAL(qpair
== &qpair2_2
);
336 qpair
= STAILQ_NEXT(qpair
, poll_group_stailq
);
337 CU_ASSERT(qpair
== NULL
);
338 SPDK_CU_ASSERT_FATAL(tgroup_4
!= NULL
);
339 qpair
= STAILQ_FIRST(&tgroup_4
->connected_qpairs
);
340 SPDK_CU_ASSERT_FATAL(qpair
== &qpair4_1
);
341 qpair
= STAILQ_NEXT(qpair
, poll_group_stailq
);
342 SPDK_CU_ASSERT_FATAL(qpair
== &qpair4_2
);
343 qpair
= STAILQ_NEXT(qpair
, poll_group_stailq
);
344 CU_ASSERT(qpair
== NULL
);
346 /* remove all qpairs */
347 CU_ASSERT(spdk_nvme_poll_group_remove(group
, &qpair1_1
) == 0);
348 CU_ASSERT(spdk_nvme_poll_group_remove(group
, &qpair2_1
) == 0);
349 CU_ASSERT(spdk_nvme_poll_group_remove(group
, &qpair2_2
) == 0);
350 CU_ASSERT(spdk_nvme_poll_group_remove(group
, &qpair4_1
) == 0);
351 CU_ASSERT(spdk_nvme_poll_group_remove(group
, &qpair4_2
) == 0);
352 /* Confirm the fourth transport group was created. */
354 STAILQ_FOREACH_SAFE(tgroup
, &group
->tgroups
, link
, tmp_tgroup
) {
355 CU_ASSERT(STAILQ_EMPTY(&tgroup
->connected_qpairs
));
356 STAILQ_REMOVE(&group
->tgroups
, tgroup
, spdk_nvme_transport_poll_group
, link
);
361 SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group
) == 0);
363 TAILQ_REMOVE(&g_spdk_nvme_transports
, &t1
, link
);
364 TAILQ_REMOVE(&g_spdk_nvme_transports
, &t2
, link
);
365 TAILQ_REMOVE(&g_spdk_nvme_transports
, &t3
, link
);
366 TAILQ_REMOVE(&g_spdk_nvme_transports
, &t4
, link
);
370 test_spdk_nvme_poll_group_process_completions(void)
372 struct spdk_nvme_poll_group
*group
;
373 struct spdk_nvme_transport_poll_group
*tgroup
, *tmp_tgroup
;
374 struct spdk_nvme_qpair qpair1_1
= {0};
376 group
= spdk_nvme_poll_group_create(NULL
);
377 SPDK_CU_ASSERT_FATAL(group
!= NULL
);
379 /* If we don't have any transport poll groups, we shouldn't get any completions. */
380 g_process_completions_return_value
= 32;
381 CU_ASSERT(spdk_nvme_poll_group_process_completions(group
, 128,
382 unit_test_disconnected_qpair_cb
) == 0);
383 SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group
) == 0);
385 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t1
, link
);
386 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t2
, link
);
387 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t3
, link
);
389 /* try it with three transport poll groups. */
390 group
= spdk_nvme_poll_group_create(NULL
);
391 SPDK_CU_ASSERT_FATAL(group
!= NULL
);
392 qpair1_1
.state
= NVME_QPAIR_DISCONNECTED
;
393 qpair1_1
.transport
= &t1
;
394 CU_ASSERT(spdk_nvme_poll_group_add(group
, &qpair1_1
) == 0);
395 qpair1_1
.state
= NVME_QPAIR_ENABLED
;
396 CU_ASSERT(nvme_poll_group_connect_qpair(&qpair1_1
) == 0);
397 CU_ASSERT(spdk_nvme_poll_group_process_completions(group
, 128,
398 unit_test_disconnected_qpair_cb
) == 32);
399 CU_ASSERT(spdk_nvme_poll_group_remove(group
, &qpair1_1
) == 0);
400 STAILQ_FOREACH_SAFE(tgroup
, &group
->tgroups
, link
, tmp_tgroup
) {
401 CU_ASSERT(STAILQ_EMPTY(&tgroup
->connected_qpairs
));
402 STAILQ_REMOVE(&group
->tgroups
, tgroup
, spdk_nvme_transport_poll_group
, link
);
405 SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group
) == 0);
407 TAILQ_REMOVE(&g_spdk_nvme_transports
, &t1
, link
);
408 TAILQ_REMOVE(&g_spdk_nvme_transports
, &t2
, link
);
409 TAILQ_REMOVE(&g_spdk_nvme_transports
, &t3
, link
);
413 test_spdk_nvme_poll_group_destroy(void)
415 struct spdk_nvme_poll_group
*group
;
416 struct spdk_nvme_transport_poll_group
*tgroup
, *tgroup_1
, *tgroup_2
;
417 struct spdk_nvme_qpair qpair1_1
= {0};
420 /* Simple destruction of empty poll group. */
421 group
= spdk_nvme_poll_group_create(NULL
);
422 SPDK_CU_ASSERT_FATAL(group
!= NULL
);
423 SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group
) == 0);
425 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t1
, link
);
426 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t2
, link
);
427 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports
, &t3
, link
);
428 group
= spdk_nvme_poll_group_create(NULL
);
429 SPDK_CU_ASSERT_FATAL(group
!= NULL
);
431 qpair1_1
.transport
= &t1
;
432 CU_ASSERT(spdk_nvme_poll_group_add(group
, &qpair1_1
) == 0);
434 /* Don't remove busy poll groups. */
435 g_destroy_return_value
= -EBUSY
;
436 SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group
) == -EBUSY
);
437 STAILQ_FOREACH(tgroup
, &group
->tgroups
, link
) {
440 CU_ASSERT(num_tgroups
== 1);
442 /* destroy poll group with internal poll groups. */
443 g_destroy_return_value
= 0;
444 tgroup_1
= STAILQ_FIRST(&group
->tgroups
);
445 tgroup_2
= STAILQ_NEXT(tgroup_1
, link
);
446 CU_ASSERT(tgroup_2
== NULL
)
447 SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group
) == 0);
452 main(int argc
, char **argv
)
454 CU_pSuite suite
= NULL
;
455 unsigned int num_failures
;
457 if (CU_initialize_registry() != CUE_SUCCESS
) {
458 return CU_get_error();
461 suite
= CU_add_suite("nvme_ns_cmd", NULL
, NULL
);
463 CU_cleanup_registry();
464 return CU_get_error();
468 CU_add_test(suite
, "nvme_poll_group_create_test", test_spdk_nvme_poll_group_create
) == NULL
||
469 CU_add_test(suite
, "nvme_poll_group_add_remove_test",
470 test_spdk_nvme_poll_group_add_remove
) == NULL
||
471 CU_add_test(suite
, "nvme_poll_group_process_completions",
472 test_spdk_nvme_poll_group_process_completions
) == NULL
||
473 CU_add_test(suite
, "nvme_poll_group_destroy_test", test_spdk_nvme_poll_group_destroy
) == NULL
475 CU_cleanup_registry();
476 return CU_get_error();
479 CU_basic_set_mode(CU_BRM_VERBOSE
);
480 CU_basic_run_tests();
481 num_failures
= CU_get_number_of_failures();
482 CU_cleanup_registry();