]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/thunderbolt/test.c
Merge tag 'drm-next-2021-04-30' of git://anongit.freedesktop.org/drm/drm
[mirror_ubuntu-jammy-kernel.git] / drivers / thunderbolt / test.c
CommitLineData
54509f50
MW
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KUnit tests
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include <kunit/test.h>
10#include <linux/idr.h>
11
12#include "tb.h"
40c14d9f 13#include "tunnel.h"
54509f50
MW
14
15static int __ida_init(struct kunit_resource *res, void *context)
16{
17 struct ida *ida = context;
18
19 ida_init(ida);
71fa1a44 20 res->data = ida;
54509f50
MW
21 return 0;
22}
23
24static void __ida_destroy(struct kunit_resource *res)
25{
71fa1a44 26 struct ida *ida = res->data;
54509f50
MW
27
28 ida_destroy(ida);
29}
30
31static void kunit_ida_init(struct kunit *test, struct ida *ida)
32{
33 kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
34}
35
36static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
37 u8 upstream_port, u8 max_port_number)
38{
39 struct tb_switch *sw;
40 size_t size;
41 int i;
42
43 sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
44 if (!sw)
45 return NULL;
46
47 sw->config.upstream_port_number = upstream_port;
48 sw->config.depth = tb_route_length(route);
49 sw->config.route_hi = upper_32_bits(route);
50 sw->config.route_lo = lower_32_bits(route);
51 sw->config.enabled = 0;
52 sw->config.max_port_number = max_port_number;
53
54 size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
55 sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
56 if (!sw->ports)
57 return NULL;
58
59 for (i = 0; i <= sw->config.max_port_number; i++) {
60 sw->ports[i].sw = sw;
61 sw->ports[i].port = i;
62 sw->ports[i].config.port_number = i;
63 if (i) {
64 kunit_ida_init(test, &sw->ports[i].in_hopids);
65 kunit_ida_init(test, &sw->ports[i].out_hopids);
66 }
67 }
68
69 return sw;
70}
71
72static struct tb_switch *alloc_host(struct kunit *test)
73{
74 struct tb_switch *sw;
75
76 sw = alloc_switch(test, 0, 7, 13);
77 if (!sw)
78 return NULL;
79
80 sw->config.vendor_id = 0x8086;
81 sw->config.device_id = 0x9a1b;
82
83 sw->ports[0].config.type = TB_TYPE_PORT;
84 sw->ports[0].config.max_in_hop_id = 7;
85 sw->ports[0].config.max_out_hop_id = 7;
86
87 sw->ports[1].config.type = TB_TYPE_PORT;
88 sw->ports[1].config.max_in_hop_id = 19;
89 sw->ports[1].config.max_out_hop_id = 19;
90 sw->ports[1].dual_link_port = &sw->ports[2];
91
92 sw->ports[2].config.type = TB_TYPE_PORT;
93 sw->ports[2].config.max_in_hop_id = 19;
94 sw->ports[2].config.max_out_hop_id = 19;
95 sw->ports[2].dual_link_port = &sw->ports[1];
96 sw->ports[2].link_nr = 1;
97
98 sw->ports[3].config.type = TB_TYPE_PORT;
99 sw->ports[3].config.max_in_hop_id = 19;
100 sw->ports[3].config.max_out_hop_id = 19;
101 sw->ports[3].dual_link_port = &sw->ports[4];
102
103 sw->ports[4].config.type = TB_TYPE_PORT;
104 sw->ports[4].config.max_in_hop_id = 19;
105 sw->ports[4].config.max_out_hop_id = 19;
106 sw->ports[4].dual_link_port = &sw->ports[3];
107 sw->ports[4].link_nr = 1;
108
109 sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
110 sw->ports[5].config.max_in_hop_id = 9;
111 sw->ports[5].config.max_out_hop_id = 9;
112 sw->ports[5].cap_adap = -1;
113
114 sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
115 sw->ports[6].config.max_in_hop_id = 9;
116 sw->ports[6].config.max_out_hop_id = 9;
117 sw->ports[6].cap_adap = -1;
118
119 sw->ports[7].config.type = TB_TYPE_NHI;
120 sw->ports[7].config.max_in_hop_id = 11;
121 sw->ports[7].config.max_out_hop_id = 11;
5adab6cc 122 sw->ports[7].config.nfc_credits = 0x41800000;
54509f50
MW
123
124 sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
125 sw->ports[8].config.max_in_hop_id = 8;
126 sw->ports[8].config.max_out_hop_id = 8;
127
128 sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
129 sw->ports[9].config.max_in_hop_id = 8;
130 sw->ports[9].config.max_out_hop_id = 8;
131
132 sw->ports[10].disabled = true;
133 sw->ports[11].disabled = true;
134
135 sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
136 sw->ports[12].config.max_in_hop_id = 8;
137 sw->ports[12].config.max_out_hop_id = 8;
138
139 sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
140 sw->ports[13].config.max_in_hop_id = 8;
141 sw->ports[13].config.max_out_hop_id = 8;
142
143 return sw;
144}
145
146static struct tb_switch *alloc_dev_default(struct kunit *test,
147 struct tb_switch *parent,
148 u64 route, bool bonded)
149{
150 struct tb_port *port, *upstream_port;
151 struct tb_switch *sw;
152
153 sw = alloc_switch(test, route, 1, 19);
154 if (!sw)
155 return NULL;
156
157 sw->config.vendor_id = 0x8086;
158 sw->config.device_id = 0x15ef;
159
160 sw->ports[0].config.type = TB_TYPE_PORT;
161 sw->ports[0].config.max_in_hop_id = 8;
162 sw->ports[0].config.max_out_hop_id = 8;
163
164 sw->ports[1].config.type = TB_TYPE_PORT;
165 sw->ports[1].config.max_in_hop_id = 19;
166 sw->ports[1].config.max_out_hop_id = 19;
167 sw->ports[1].dual_link_port = &sw->ports[2];
168
169 sw->ports[2].config.type = TB_TYPE_PORT;
170 sw->ports[2].config.max_in_hop_id = 19;
171 sw->ports[2].config.max_out_hop_id = 19;
172 sw->ports[2].dual_link_port = &sw->ports[1];
173 sw->ports[2].link_nr = 1;
174
175 sw->ports[3].config.type = TB_TYPE_PORT;
176 sw->ports[3].config.max_in_hop_id = 19;
177 sw->ports[3].config.max_out_hop_id = 19;
178 sw->ports[3].dual_link_port = &sw->ports[4];
179
180 sw->ports[4].config.type = TB_TYPE_PORT;
181 sw->ports[4].config.max_in_hop_id = 19;
182 sw->ports[4].config.max_out_hop_id = 19;
183 sw->ports[4].dual_link_port = &sw->ports[3];
184 sw->ports[4].link_nr = 1;
185
186 sw->ports[5].config.type = TB_TYPE_PORT;
187 sw->ports[5].config.max_in_hop_id = 19;
188 sw->ports[5].config.max_out_hop_id = 19;
189 sw->ports[5].dual_link_port = &sw->ports[6];
190
191 sw->ports[6].config.type = TB_TYPE_PORT;
192 sw->ports[6].config.max_in_hop_id = 19;
193 sw->ports[6].config.max_out_hop_id = 19;
194 sw->ports[6].dual_link_port = &sw->ports[5];
195 sw->ports[6].link_nr = 1;
196
197 sw->ports[7].config.type = TB_TYPE_PORT;
198 sw->ports[7].config.max_in_hop_id = 19;
199 sw->ports[7].config.max_out_hop_id = 19;
200 sw->ports[7].dual_link_port = &sw->ports[8];
201
202 sw->ports[8].config.type = TB_TYPE_PORT;
203 sw->ports[8].config.max_in_hop_id = 19;
204 sw->ports[8].config.max_out_hop_id = 19;
205 sw->ports[8].dual_link_port = &sw->ports[7];
206 sw->ports[8].link_nr = 1;
207
208 sw->ports[9].config.type = TB_TYPE_PCIE_UP;
209 sw->ports[9].config.max_in_hop_id = 8;
210 sw->ports[9].config.max_out_hop_id = 8;
211
212 sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
213 sw->ports[10].config.max_in_hop_id = 8;
214 sw->ports[10].config.max_out_hop_id = 8;
215
216 sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
217 sw->ports[11].config.max_in_hop_id = 8;
218 sw->ports[11].config.max_out_hop_id = 8;
219
220 sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
221 sw->ports[12].config.max_in_hop_id = 8;
222 sw->ports[12].config.max_out_hop_id = 8;
223
224 sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
225 sw->ports[13].config.max_in_hop_id = 9;
226 sw->ports[13].config.max_out_hop_id = 9;
227 sw->ports[13].cap_adap = -1;
228
229 sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
230 sw->ports[14].config.max_in_hop_id = 9;
231 sw->ports[14].config.max_out_hop_id = 9;
232 sw->ports[14].cap_adap = -1;
233
234 sw->ports[15].disabled = true;
235
236 sw->ports[16].config.type = TB_TYPE_USB3_UP;
237 sw->ports[16].config.max_in_hop_id = 8;
238 sw->ports[16].config.max_out_hop_id = 8;
239
240 sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
241 sw->ports[17].config.max_in_hop_id = 8;
242 sw->ports[17].config.max_out_hop_id = 8;
243
244 sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
245 sw->ports[18].config.max_in_hop_id = 8;
246 sw->ports[18].config.max_out_hop_id = 8;
247
248 sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
249 sw->ports[19].config.max_in_hop_id = 8;
250 sw->ports[19].config.max_out_hop_id = 8;
251
252 if (!parent)
253 return sw;
254
255 /* Link them */
256 upstream_port = tb_upstream_port(sw);
257 port = tb_port_at(route, parent);
258 port->remote = upstream_port;
259 upstream_port->remote = port;
260 if (port->dual_link_port && upstream_port->dual_link_port) {
261 port->dual_link_port->remote = upstream_port->dual_link_port;
262 upstream_port->dual_link_port->remote = port->dual_link_port;
263 }
264
265 if (bonded) {
266 /* Bonding is used */
267 port->bonded = true;
268 port->dual_link_port->bonded = true;
269 upstream_port->bonded = true;
270 upstream_port->dual_link_port->bonded = true;
271 }
272
273 return sw;
274}
275
276static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
277 struct tb_switch *parent,
278 u64 route, bool bonded)
279{
280 struct tb_switch *sw;
281
282 sw = alloc_dev_default(test, parent, route, bonded);
283 if (!sw)
284 return NULL;
285
286 sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
287 sw->ports[13].config.max_in_hop_id = 9;
288 sw->ports[13].config.max_out_hop_id = 9;
289
290 sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
291 sw->ports[14].config.max_in_hop_id = 9;
292 sw->ports[14].config.max_out_hop_id = 9;
293
294 return sw;
295}
296
297static void tb_test_path_basic(struct kunit *test)
298{
299 struct tb_port *src_port, *dst_port, *p;
300 struct tb_switch *host;
301
302 host = alloc_host(test);
303
304 src_port = &host->ports[5];
305 dst_port = src_port;
306
307 p = tb_next_port_on_path(src_port, dst_port, NULL);
308 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
309
310 p = tb_next_port_on_path(src_port, dst_port, p);
311 KUNIT_EXPECT_TRUE(test, !p);
312}
313
314static void tb_test_path_not_connected_walk(struct kunit *test)
315{
316 struct tb_port *src_port, *dst_port, *p;
317 struct tb_switch *host, *dev;
318
319 host = alloc_host(test);
320 /* No connection between host and dev */
321 dev = alloc_dev_default(test, NULL, 3, true);
322
323 src_port = &host->ports[12];
324 dst_port = &dev->ports[16];
325
326 p = tb_next_port_on_path(src_port, dst_port, NULL);
327 KUNIT_EXPECT_PTR_EQ(test, p, src_port);
328
329 p = tb_next_port_on_path(src_port, dst_port, p);
330 KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
331
332 p = tb_next_port_on_path(src_port, dst_port, p);
333 KUNIT_EXPECT_TRUE(test, !p);
334
335 /* Other direction */
336
337 p = tb_next_port_on_path(dst_port, src_port, NULL);
338 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
339
340 p = tb_next_port_on_path(dst_port, src_port, p);
341 KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
342
343 p = tb_next_port_on_path(dst_port, src_port, p);
344 KUNIT_EXPECT_TRUE(test, !p);
345}
346
347struct port_expectation {
348 u64 route;
349 u8 port;
350 enum tb_port_type type;
351};
352
353static void tb_test_path_single_hop_walk(struct kunit *test)
354{
355 /*
356 * Walks from Host PCIe downstream port to Device #1 PCIe
357 * upstream port.
358 *
359 * [Host]
360 * 1 |
361 * 1 |
362 * [Device]
363 */
364 static const struct port_expectation test_data[] = {
365 { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
366 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
367 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
368 { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
369 };
370 struct tb_port *src_port, *dst_port, *p;
371 struct tb_switch *host, *dev;
372 int i;
373
374 host = alloc_host(test);
375 dev = alloc_dev_default(test, host, 1, true);
376
377 src_port = &host->ports[8];
378 dst_port = &dev->ports[9];
379
380 /* Walk both directions */
381
382 i = 0;
383 tb_for_each_port_on_path(src_port, dst_port, p) {
384 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
385 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
386 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
387 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
388 test_data[i].type);
389 i++;
390 }
391
392 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
393
394 i = ARRAY_SIZE(test_data) - 1;
395 tb_for_each_port_on_path(dst_port, src_port, p) {
396 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
397 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
398 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
399 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
400 test_data[i].type);
401 i--;
402 }
403
404 KUNIT_EXPECT_EQ(test, i, -1);
405}
406
407static void tb_test_path_daisy_chain_walk(struct kunit *test)
408{
409 /*
410 * Walks from Host DP IN to Device #2 DP OUT.
411 *
412 * [Host]
413 * 1 |
414 * 1 |
415 * [Device #1]
416 * 3 /
417 * 1 /
418 * [Device #2]
419 */
420 static const struct port_expectation test_data[] = {
421 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
422 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
423 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
424 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
425 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
426 { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
427 };
428 struct tb_port *src_port, *dst_port, *p;
429 struct tb_switch *host, *dev1, *dev2;
430 int i;
431
432 host = alloc_host(test);
433 dev1 = alloc_dev_default(test, host, 0x1, true);
434 dev2 = alloc_dev_default(test, dev1, 0x301, true);
435
436 src_port = &host->ports[5];
437 dst_port = &dev2->ports[13];
438
439 /* Walk both directions */
440
441 i = 0;
442 tb_for_each_port_on_path(src_port, dst_port, p) {
443 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
444 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
445 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
446 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
447 test_data[i].type);
448 i++;
449 }
450
451 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
452
453 i = ARRAY_SIZE(test_data) - 1;
454 tb_for_each_port_on_path(dst_port, src_port, p) {
455 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
456 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
457 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
458 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
459 test_data[i].type);
460 i--;
461 }
462
463 KUNIT_EXPECT_EQ(test, i, -1);
464}
465
466static void tb_test_path_simple_tree_walk(struct kunit *test)
467{
468 /*
469 * Walks from Host DP IN to Device #3 DP OUT.
470 *
471 * [Host]
472 * 1 |
473 * 1 |
474 * [Device #1]
475 * 3 / | 5 \ 7
476 * 1 / | \ 1
477 * [Device #2] | [Device #4]
478 * | 1
479 * [Device #3]
480 */
481 static const struct port_expectation test_data[] = {
482 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
483 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
484 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
485 { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
486 { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
487 { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
488 };
489 struct tb_port *src_port, *dst_port, *p;
490 struct tb_switch *host, *dev1, *dev3;
491 int i;
492
493 host = alloc_host(test);
494 dev1 = alloc_dev_default(test, host, 0x1, true);
495 alloc_dev_default(test, dev1, 0x301, true);
496 dev3 = alloc_dev_default(test, dev1, 0x501, true);
497 alloc_dev_default(test, dev1, 0x701, true);
498
499 src_port = &host->ports[5];
500 dst_port = &dev3->ports[13];
501
502 /* Walk both directions */
503
504 i = 0;
505 tb_for_each_port_on_path(src_port, dst_port, p) {
506 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
507 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
508 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
509 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
510 test_data[i].type);
511 i++;
512 }
513
514 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
515
516 i = ARRAY_SIZE(test_data) - 1;
517 tb_for_each_port_on_path(dst_port, src_port, p) {
518 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
519 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
520 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
521 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
522 test_data[i].type);
523 i--;
524 }
525
526 KUNIT_EXPECT_EQ(test, i, -1);
527}
528
529static void tb_test_path_complex_tree_walk(struct kunit *test)
530{
531 /*
532 * Walks from Device #3 DP IN to Device #9 DP OUT.
533 *
534 * [Host]
535 * 1 |
536 * 1 |
537 * [Device #1]
538 * 3 / | 5 \ 7
539 * 1 / | \ 1
540 * [Device #2] | [Device #5]
541 * 5 | | 1 \ 7
542 * 1 | [Device #4] \ 1
543 * [Device #3] [Device #6]
544 * 3 /
545 * 1 /
546 * [Device #7]
547 * 3 / | 5
548 * 1 / |
549 * [Device #8] | 1
550 * [Device #9]
551 */
552 static const struct port_expectation test_data[] = {
553 { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
554 { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
555 { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
556 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
557 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
558 { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
559 { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
560 { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
561 { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
562 { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
563 { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
564 { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
565 { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
566 { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
567 };
568 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
569 struct tb_port *src_port, *dst_port, *p;
570 int i;
571
572 host = alloc_host(test);
573 dev1 = alloc_dev_default(test, host, 0x1, true);
574 dev2 = alloc_dev_default(test, dev1, 0x301, true);
575 dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
576 alloc_dev_default(test, dev1, 0x501, true);
577 dev5 = alloc_dev_default(test, dev1, 0x701, true);
578 dev6 = alloc_dev_default(test, dev5, 0x70701, true);
579 dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
580 alloc_dev_default(test, dev7, 0x303070701, true);
581 dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
582
583 src_port = &dev3->ports[13];
584 dst_port = &dev9->ports[14];
585
586 /* Walk both directions */
587
588 i = 0;
589 tb_for_each_port_on_path(src_port, dst_port, p) {
590 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
591 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
592 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
593 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
594 test_data[i].type);
595 i++;
596 }
597
598 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
599
600 i = ARRAY_SIZE(test_data) - 1;
601 tb_for_each_port_on_path(dst_port, src_port, p) {
602 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
603 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
604 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
605 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
606 test_data[i].type);
607 i--;
608 }
609
610 KUNIT_EXPECT_EQ(test, i, -1);
611}
612
613static void tb_test_path_max_length_walk(struct kunit *test)
614{
615 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
616 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
617 struct tb_port *src_port, *dst_port, *p;
618 int i;
619
620 /*
621 * Walks from Device #6 DP IN to Device #12 DP OUT.
622 *
623 * [Host]
624 * 1 / \ 3
625 * 1 / \ 1
626 * [Device #1] [Device #7]
627 * 3 | | 3
628 * 1 | | 1
629 * [Device #2] [Device #8]
630 * 3 | | 3
631 * 1 | | 1
632 * [Device #3] [Device #9]
633 * 3 | | 3
634 * 1 | | 1
635 * [Device #4] [Device #10]
636 * 3 | | 3
637 * 1 | | 1
638 * [Device #5] [Device #11]
639 * 3 | | 3
640 * 1 | | 1
641 * [Device #6] [Device #12]
642 */
643 static const struct port_expectation test_data[] = {
644 { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
645 { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
646 { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
647 { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
648 { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
649 { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
650 { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
651 { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
652 { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
653 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
654 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
655 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
656 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
657 { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
658 { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
659 { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
660 { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
661 { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
662 { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
663 { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
664 { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
665 { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
666 { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
667 { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
668 { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
669 { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
670 };
671
672 host = alloc_host(test);
673 dev1 = alloc_dev_default(test, host, 0x1, true);
674 dev2 = alloc_dev_default(test, dev1, 0x301, true);
675 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
676 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
677 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
678 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
679 dev7 = alloc_dev_default(test, host, 0x3, true);
680 dev8 = alloc_dev_default(test, dev7, 0x303, true);
681 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
682 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
683 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
684 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
685
686 src_port = &dev6->ports[13];
687 dst_port = &dev12->ports[13];
688
689 /* Walk both directions */
690
691 i = 0;
692 tb_for_each_port_on_path(src_port, dst_port, p) {
693 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
694 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
695 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
696 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
697 test_data[i].type);
698 i++;
699 }
700
701 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
702
703 i = ARRAY_SIZE(test_data) - 1;
704 tb_for_each_port_on_path(dst_port, src_port, p) {
705 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
706 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
707 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
708 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
709 test_data[i].type);
710 i--;
711 }
712
713 KUNIT_EXPECT_EQ(test, i, -1);
714}
715
716static void tb_test_path_not_connected(struct kunit *test)
717{
718 struct tb_switch *host, *dev1, *dev2;
719 struct tb_port *down, *up;
720 struct tb_path *path;
721
722 host = alloc_host(test);
723 dev1 = alloc_dev_default(test, host, 0x3, false);
724 /* Not connected to anything */
725 dev2 = alloc_dev_default(test, NULL, 0x303, false);
726
727 down = &dev1->ports[10];
728 up = &dev2->ports[9];
729
730 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
731 KUNIT_ASSERT_TRUE(test, path == NULL);
732 path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
733 KUNIT_ASSERT_TRUE(test, path == NULL);
734}
735
736struct hop_expectation {
737 u64 route;
738 u8 in_port;
739 enum tb_port_type in_type;
740 u8 out_port;
741 enum tb_port_type out_type;
742};
743
744static void tb_test_path_not_bonded_lane0(struct kunit *test)
745{
746 /*
747 * PCIe path from host to device using lane 0.
748 *
749 * [Host]
750 * 3 |: 4
751 * 1 |: 2
752 * [Device]
753 */
754 static const struct hop_expectation test_data[] = {
755 {
756 .route = 0x0,
757 .in_port = 9,
758 .in_type = TB_TYPE_PCIE_DOWN,
759 .out_port = 3,
760 .out_type = TB_TYPE_PORT,
761 },
762 {
763 .route = 0x3,
764 .in_port = 1,
765 .in_type = TB_TYPE_PORT,
766 .out_port = 9,
767 .out_type = TB_TYPE_PCIE_UP,
768 },
769 };
770 struct tb_switch *host, *dev;
771 struct tb_port *down, *up;
772 struct tb_path *path;
773 int i;
774
775 host = alloc_host(test);
776 dev = alloc_dev_default(test, host, 0x3, false);
777
778 down = &host->ports[9];
779 up = &dev->ports[9];
780
781 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
782 KUNIT_ASSERT_TRUE(test, path != NULL);
783 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
784 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
785 const struct tb_port *in_port, *out_port;
786
787 in_port = path->hops[i].in_port;
788 out_port = path->hops[i].out_port;
789
790 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
791 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
792 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
793 test_data[i].in_type);
794 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
795 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
796 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
797 test_data[i].out_type);
798 }
799 tb_path_free(path);
800}
801
802static void tb_test_path_not_bonded_lane1(struct kunit *test)
803{
804 /*
805 * DP Video path from host to device using lane 1. Paths like
806 * these are only used with Thunderbolt 1 devices where lane
807 * bonding is not possible. USB4 specifically does not allow
808 * paths like this (you either use lane 0 where lane 1 is
809 * disabled or both lanes are bonded).
810 *
811 * [Host]
812 * 1 :| 2
813 * 1 :| 2
814 * [Device]
815 */
816 static const struct hop_expectation test_data[] = {
817 {
818 .route = 0x0,
819 .in_port = 5,
820 .in_type = TB_TYPE_DP_HDMI_IN,
821 .out_port = 2,
822 .out_type = TB_TYPE_PORT,
823 },
824 {
825 .route = 0x1,
826 .in_port = 2,
827 .in_type = TB_TYPE_PORT,
828 .out_port = 13,
829 .out_type = TB_TYPE_DP_HDMI_OUT,
830 },
831 };
832 struct tb_switch *host, *dev;
833 struct tb_port *in, *out;
834 struct tb_path *path;
835 int i;
836
837 host = alloc_host(test);
838 dev = alloc_dev_default(test, host, 0x1, false);
839
840 in = &host->ports[5];
841 out = &dev->ports[13];
842
843 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
844 KUNIT_ASSERT_TRUE(test, path != NULL);
845 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
846 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
847 const struct tb_port *in_port, *out_port;
848
849 in_port = path->hops[i].in_port;
850 out_port = path->hops[i].out_port;
851
852 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
853 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
854 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
855 test_data[i].in_type);
856 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
857 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
858 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
859 test_data[i].out_type);
860 }
861 tb_path_free(path);
862}
863
864static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
865{
866 /*
867 * DP Video path from host to device 3 using lane 1.
868 *
869 * [Host]
870 * 1 :| 2
871 * 1 :| 2
872 * [Device #1]
873 * 7 :| 8
874 * 1 :| 2
875 * [Device #2]
876 * 5 :| 6
877 * 1 :| 2
878 * [Device #3]
879 */
880 static const struct hop_expectation test_data[] = {
881 {
882 .route = 0x0,
883 .in_port = 5,
884 .in_type = TB_TYPE_DP_HDMI_IN,
885 .out_port = 2,
886 .out_type = TB_TYPE_PORT,
887 },
888 {
889 .route = 0x1,
890 .in_port = 2,
891 .in_type = TB_TYPE_PORT,
892 .out_port = 8,
893 .out_type = TB_TYPE_PORT,
894 },
895 {
896 .route = 0x701,
897 .in_port = 2,
898 .in_type = TB_TYPE_PORT,
899 .out_port = 6,
900 .out_type = TB_TYPE_PORT,
901 },
902 {
903 .route = 0x50701,
904 .in_port = 2,
905 .in_type = TB_TYPE_PORT,
906 .out_port = 13,
907 .out_type = TB_TYPE_DP_HDMI_OUT,
908 },
909 };
910 struct tb_switch *host, *dev1, *dev2, *dev3;
911 struct tb_port *in, *out;
912 struct tb_path *path;
913 int i;
914
915 host = alloc_host(test);
916 dev1 = alloc_dev_default(test, host, 0x1, false);
917 dev2 = alloc_dev_default(test, dev1, 0x701, false);
918 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
919
920 in = &host->ports[5];
921 out = &dev3->ports[13];
922
923 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
924 KUNIT_ASSERT_TRUE(test, path != NULL);
925 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
926 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
927 const struct tb_port *in_port, *out_port;
928
929 in_port = path->hops[i].in_port;
930 out_port = path->hops[i].out_port;
931
932 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
933 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
934 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
935 test_data[i].in_type);
936 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
937 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
938 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
939 test_data[i].out_type);
940 }
941 tb_path_free(path);
942}
943
944static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
945{
946 /*
947 * DP Video path from device 3 to host using lane 1.
948 *
949 * [Host]
950 * 1 :| 2
951 * 1 :| 2
952 * [Device #1]
953 * 7 :| 8
954 * 1 :| 2
955 * [Device #2]
956 * 5 :| 6
957 * 1 :| 2
958 * [Device #3]
959 */
960 static const struct hop_expectation test_data[] = {
961 {
962 .route = 0x50701,
963 .in_port = 13,
964 .in_type = TB_TYPE_DP_HDMI_IN,
965 .out_port = 2,
966 .out_type = TB_TYPE_PORT,
967 },
968 {
969 .route = 0x701,
970 .in_port = 6,
971 .in_type = TB_TYPE_PORT,
972 .out_port = 2,
973 .out_type = TB_TYPE_PORT,
974 },
975 {
976 .route = 0x1,
977 .in_port = 8,
978 .in_type = TB_TYPE_PORT,
979 .out_port = 2,
980 .out_type = TB_TYPE_PORT,
981 },
982 {
983 .route = 0x0,
984 .in_port = 2,
985 .in_type = TB_TYPE_PORT,
986 .out_port = 5,
987 .out_type = TB_TYPE_DP_HDMI_IN,
988 },
989 };
990 struct tb_switch *host, *dev1, *dev2, *dev3;
991 struct tb_port *in, *out;
992 struct tb_path *path;
993 int i;
994
995 host = alloc_host(test);
996 dev1 = alloc_dev_default(test, host, 0x1, false);
997 dev2 = alloc_dev_default(test, dev1, 0x701, false);
998 dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
999
1000 in = &dev3->ports[13];
1001 out = &host->ports[5];
1002
1003 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1004 KUNIT_ASSERT_TRUE(test, path != NULL);
1005 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1006 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1007 const struct tb_port *in_port, *out_port;
1008
1009 in_port = path->hops[i].in_port;
1010 out_port = path->hops[i].out_port;
1011
1012 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1013 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1014 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1015 test_data[i].in_type);
1016 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1017 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1018 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1019 test_data[i].out_type);
1020 }
1021 tb_path_free(path);
1022}
1023
1024static void tb_test_path_mixed_chain(struct kunit *test)
1025{
1026 /*
1027 * DP Video path from host to device 4 where first and last link
1028 * is bonded.
1029 *
1030 * [Host]
1031 * 1 |
1032 * 1 |
1033 * [Device #1]
1034 * 7 :| 8
1035 * 1 :| 2
1036 * [Device #2]
1037 * 5 :| 6
1038 * 1 :| 2
1039 * [Device #3]
1040 * 3 |
1041 * 1 |
1042 * [Device #4]
1043 */
1044 static const struct hop_expectation test_data[] = {
1045 {
1046 .route = 0x0,
1047 .in_port = 5,
1048 .in_type = TB_TYPE_DP_HDMI_IN,
1049 .out_port = 1,
1050 .out_type = TB_TYPE_PORT,
1051 },
1052 {
1053 .route = 0x1,
1054 .in_port = 1,
1055 .in_type = TB_TYPE_PORT,
1056 .out_port = 8,
1057 .out_type = TB_TYPE_PORT,
1058 },
1059 {
1060 .route = 0x701,
1061 .in_port = 2,
1062 .in_type = TB_TYPE_PORT,
1063 .out_port = 6,
1064 .out_type = TB_TYPE_PORT,
1065 },
1066 {
1067 .route = 0x50701,
1068 .in_port = 2,
1069 .in_type = TB_TYPE_PORT,
1070 .out_port = 3,
1071 .out_type = TB_TYPE_PORT,
1072 },
1073 {
1074 .route = 0x3050701,
1075 .in_port = 1,
1076 .in_type = TB_TYPE_PORT,
1077 .out_port = 13,
1078 .out_type = TB_TYPE_DP_HDMI_OUT,
1079 },
1080 };
1081 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1082 struct tb_port *in, *out;
1083 struct tb_path *path;
1084 int i;
1085
1086 host = alloc_host(test);
1087 dev1 = alloc_dev_default(test, host, 0x1, true);
1088 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1089 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1090 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1091
1092 in = &host->ports[5];
1093 out = &dev4->ports[13];
1094
1095 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1096 KUNIT_ASSERT_TRUE(test, path != NULL);
1097 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1098 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1099 const struct tb_port *in_port, *out_port;
1100
1101 in_port = path->hops[i].in_port;
1102 out_port = path->hops[i].out_port;
1103
1104 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1105 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1106 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1107 test_data[i].in_type);
1108 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1109 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1110 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1111 test_data[i].out_type);
1112 }
1113 tb_path_free(path);
1114}
1115
1116static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1117{
1118 /*
1119 * DP Video path from device 4 to host where first and last link
1120 * is bonded.
1121 *
1122 * [Host]
1123 * 1 |
1124 * 1 |
1125 * [Device #1]
1126 * 7 :| 8
1127 * 1 :| 2
1128 * [Device #2]
1129 * 5 :| 6
1130 * 1 :| 2
1131 * [Device #3]
1132 * 3 |
1133 * 1 |
1134 * [Device #4]
1135 */
1136 static const struct hop_expectation test_data[] = {
1137 {
1138 .route = 0x3050701,
1139 .in_port = 13,
1140 .in_type = TB_TYPE_DP_HDMI_OUT,
1141 .out_port = 1,
1142 .out_type = TB_TYPE_PORT,
1143 },
1144 {
1145 .route = 0x50701,
1146 .in_port = 3,
1147 .in_type = TB_TYPE_PORT,
1148 .out_port = 2,
1149 .out_type = TB_TYPE_PORT,
1150 },
1151 {
1152 .route = 0x701,
1153 .in_port = 6,
1154 .in_type = TB_TYPE_PORT,
1155 .out_port = 2,
1156 .out_type = TB_TYPE_PORT,
1157 },
1158 {
1159 .route = 0x1,
1160 .in_port = 8,
1161 .in_type = TB_TYPE_PORT,
1162 .out_port = 1,
1163 .out_type = TB_TYPE_PORT,
1164 },
1165 {
1166 .route = 0x0,
1167 .in_port = 1,
1168 .in_type = TB_TYPE_PORT,
1169 .out_port = 5,
1170 .out_type = TB_TYPE_DP_HDMI_IN,
1171 },
1172 };
1173 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1174 struct tb_port *in, *out;
1175 struct tb_path *path;
1176 int i;
1177
1178 host = alloc_host(test);
1179 dev1 = alloc_dev_default(test, host, 0x1, true);
1180 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1181 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1182 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1183
1184 in = &dev4->ports[13];
1185 out = &host->ports[5];
1186
1187 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1188 KUNIT_ASSERT_TRUE(test, path != NULL);
1189 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1190 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1191 const struct tb_port *in_port, *out_port;
1192
1193 in_port = path->hops[i].in_port;
1194 out_port = path->hops[i].out_port;
1195
1196 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1197 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1198 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1199 test_data[i].in_type);
1200 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1201 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1202 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1203 test_data[i].out_type);
1204 }
1205 tb_path_free(path);
1206}
1207
40c14d9f
MW
1208static void tb_test_tunnel_pcie(struct kunit *test)
1209{
1210 struct tb_switch *host, *dev1, *dev2;
1211 struct tb_tunnel *tunnel1, *tunnel2;
1212 struct tb_port *down, *up;
1213
1214 /*
1215 * Create PCIe tunnel between host and two devices.
1216 *
1217 * [Host]
1218 * 1 |
1219 * 1 |
1220 * [Device #1]
1221 * 5 |
1222 * 1 |
1223 * [Device #2]
1224 */
1225 host = alloc_host(test);
1226 dev1 = alloc_dev_default(test, host, 0x1, true);
1227 dev2 = alloc_dev_default(test, dev1, 0x501, true);
1228
1229 down = &host->ports[8];
1230 up = &dev1->ports[9];
1231 tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1232 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1233 KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
1234 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1235 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1236 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
1237 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1238 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1239 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1240 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1241 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1242 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1243
1244 down = &dev1->ports[10];
1245 up = &dev2->ports[9];
1246 tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1247 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1248 KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
1249 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1250 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1251 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
1252 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1253 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1254 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1255 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1256 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1257 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1258
1259 tb_tunnel_free(tunnel2);
1260 tb_tunnel_free(tunnel1);
1261}
1262
1263static void tb_test_tunnel_dp(struct kunit *test)
1264{
1265 struct tb_switch *host, *dev;
1266 struct tb_port *in, *out;
1267 struct tb_tunnel *tunnel;
1268
1269 /*
1270 * Create DP tunnel between Host and Device
1271 *
1272 * [Host]
1273 * 1 |
1274 * 1 |
1275 * [Device]
1276 */
1277 host = alloc_host(test);
1278 dev = alloc_dev_default(test, host, 0x3, true);
1279
1280 in = &host->ports[5];
1281 out = &dev->ports[13];
1282
1283 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1284 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1285 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1286 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1287 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1288 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1289 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1290 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1291 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1292 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1293 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1294 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1295 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1296 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1297 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1298 tb_tunnel_free(tunnel);
1299}
1300
1301static void tb_test_tunnel_dp_chain(struct kunit *test)
1302{
1303 struct tb_switch *host, *dev1, *dev4;
1304 struct tb_port *in, *out;
1305 struct tb_tunnel *tunnel;
1306
1307 /*
1308 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1309 *
1310 * [Host]
1311 * 1 |
1312 * 1 |
1313 * [Device #1]
1314 * 3 / | 5 \ 7
1315 * 1 / | \ 1
1316 * [Device #2] | [Device #4]
1317 * | 1
1318 * [Device #3]
1319 */
1320 host = alloc_host(test);
1321 dev1 = alloc_dev_default(test, host, 0x1, true);
1322 alloc_dev_default(test, dev1, 0x301, true);
1323 alloc_dev_default(test, dev1, 0x501, true);
1324 dev4 = alloc_dev_default(test, dev1, 0x701, true);
1325
1326 in = &host->ports[5];
1327 out = &dev4->ports[14];
1328
1329 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1330 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1331 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1332 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1333 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1334 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1335 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1336 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1337 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1338 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1339 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1340 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1341 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1342 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1343 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1344 tb_tunnel_free(tunnel);
1345}
1346
1347static void tb_test_tunnel_dp_tree(struct kunit *test)
1348{
1349 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1350 struct tb_port *in, *out;
1351 struct tb_tunnel *tunnel;
1352
1353 /*
1354 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1355 *
1356 * [Host]
1357 * 3 |
1358 * 1 |
1359 * [Device #1]
1360 * 3 / | 5 \ 7
1361 * 1 / | \ 1
1362 * [Device #2] | [Device #4]
1363 * | 1
1364 * [Device #3]
1365 * | 5
1366 * | 1
1367 * [Device #5]
1368 */
1369 host = alloc_host(test);
1370 dev1 = alloc_dev_default(test, host, 0x3, true);
1371 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1372 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1373 alloc_dev_default(test, dev1, 0x703, true);
1374 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1375
1376 in = &dev2->ports[13];
1377 out = &dev5->ports[13];
1378
1379 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1380 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1381 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1382 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1383 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1384 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1385 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1386 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1387 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1388 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1389 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1390 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1391 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1392 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1393 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1394 tb_tunnel_free(tunnel);
1395}
1396
1397static void tb_test_tunnel_dp_max_length(struct kunit *test)
1398{
1399 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1400 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1401 struct tb_port *in, *out;
1402 struct tb_tunnel *tunnel;
1403
1404 /*
1405 * Creates DP tunnel from Device #6 to Device #12.
1406 *
1407 * [Host]
1408 * 1 / \ 3
1409 * 1 / \ 1
1410 * [Device #1] [Device #7]
1411 * 3 | | 3
1412 * 1 | | 1
1413 * [Device #2] [Device #8]
1414 * 3 | | 3
1415 * 1 | | 1
1416 * [Device #3] [Device #9]
1417 * 3 | | 3
1418 * 1 | | 1
1419 * [Device #4] [Device #10]
1420 * 3 | | 3
1421 * 1 | | 1
1422 * [Device #5] [Device #11]
1423 * 3 | | 3
1424 * 1 | | 1
1425 * [Device #6] [Device #12]
1426 */
1427 host = alloc_host(test);
1428 dev1 = alloc_dev_default(test, host, 0x1, true);
1429 dev2 = alloc_dev_default(test, dev1, 0x301, true);
1430 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1431 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1432 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1433 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1434 dev7 = alloc_dev_default(test, host, 0x3, true);
1435 dev8 = alloc_dev_default(test, dev7, 0x303, true);
1436 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1437 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1438 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1439 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1440
1441 in = &dev6->ports[13];
1442 out = &dev12->ports[13];
1443
1444 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1445 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1446 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1447 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1448 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1449 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1450 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1451 /* First hop */
1452 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1453 /* Middle */
1454 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1455 &host->ports[1]);
1456 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1457 &host->ports[3]);
1458 /* Last */
1459 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1460 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1461 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1462 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1463 &host->ports[1]);
1464 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1465 &host->ports[3]);
1466 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1467 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1468 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1469 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1470 &host->ports[3]);
1471 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1472 &host->ports[1]);
1473 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1474 tb_tunnel_free(tunnel);
1475}
1476
1477static void tb_test_tunnel_usb3(struct kunit *test)
1478{
1479 struct tb_switch *host, *dev1, *dev2;
1480 struct tb_tunnel *tunnel1, *tunnel2;
1481 struct tb_port *down, *up;
1482
1483 /*
1484 * Create USB3 tunnel between host and two devices.
1485 *
1486 * [Host]
1487 * 1 |
1488 * 1 |
1489 * [Device #1]
1490 * \ 7
1491 * \ 1
1492 * [Device #2]
1493 */
1494 host = alloc_host(test);
1495 dev1 = alloc_dev_default(test, host, 0x1, true);
1496 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1497
1498 down = &host->ports[12];
1499 up = &dev1->ports[16];
1500 tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1501 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1502 KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
1503 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1504 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1505 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
1506 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1507 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1508 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1509 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1510 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1511 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1512
1513 down = &dev1->ports[17];
1514 up = &dev2->ports[16];
1515 tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1516 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1517 KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
1518 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1519 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1520 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
1521 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1522 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1523 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1524 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1525 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1526 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1527
1528 tb_tunnel_free(tunnel2);
1529 tb_tunnel_free(tunnel1);
1530}
1531
1532static void tb_test_tunnel_port_on_path(struct kunit *test)
1533{
1534 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1535 struct tb_port *in, *out, *port;
1536 struct tb_tunnel *dp_tunnel;
1537
1538 /*
1539 * [Host]
1540 * 3 |
1541 * 1 |
1542 * [Device #1]
1543 * 3 / | 5 \ 7
1544 * 1 / | \ 1
1545 * [Device #2] | [Device #4]
1546 * | 1
1547 * [Device #3]
1548 * | 5
1549 * | 1
1550 * [Device #5]
1551 */
1552 host = alloc_host(test);
1553 dev1 = alloc_dev_default(test, host, 0x3, true);
1554 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1555 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1556 dev4 = alloc_dev_default(test, dev1, 0x703, true);
1557 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1558
1559 in = &dev2->ports[13];
1560 out = &dev5->ports[13];
1561
1562 dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1563 KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
1564
1565 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1566 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1567
1568 port = &host->ports[8];
1569 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1570
1571 port = &host->ports[3];
1572 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1573
1574 port = &dev1->ports[1];
1575 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1576
1577 port = &dev1->ports[3];
1578 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1579
1580 port = &dev1->ports[5];
1581 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1582
1583 port = &dev1->ports[7];
1584 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1585
1586 port = &dev3->ports[1];
1587 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1588
1589 port = &dev5->ports[1];
1590 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1591
1592 port = &dev4->ports[1];
1593 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1594
1595 tb_tunnel_free(dp_tunnel);
1596}
1597
5adab6cc
MW
1598static void tb_test_tunnel_dma(struct kunit *test)
1599{
1600 struct tb_port *nhi, *port;
1601 struct tb_tunnel *tunnel;
1602 struct tb_switch *host;
1603
1604 /*
1605 * Create DMA tunnel from NHI to port 1 and back.
1606 *
1607 * [Host 1]
1608 * 1 ^ In HopID 1 -> Out HopID 8
1609 * |
1610 * v In HopID 8 -> Out HopID 1
1611 * ............ Domain border
1612 * |
1613 * [Host 2]
1614 */
1615 host = alloc_host(test);
1616 nhi = &host->ports[7];
1617 port = &host->ports[1];
1618
1619 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1620 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1621 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
1622 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1623 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1624 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1625 /* RX path */
1626 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1627 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1628 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1629 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1630 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1631 /* TX path */
1632 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1633 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1634 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1635 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1636 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1637
1638 tb_tunnel_free(tunnel);
1639}
1640
1641static void tb_test_tunnel_dma_rx(struct kunit *test)
1642{
1643 struct tb_port *nhi, *port;
1644 struct tb_tunnel *tunnel;
1645 struct tb_switch *host;
1646
1647 /*
1648 * Create DMA RX tunnel from port 1 to NHI.
1649 *
1650 * [Host 1]
1651 * 1 ^
1652 * |
1653 * | In HopID 15 -> Out HopID 2
1654 * ............ Domain border
1655 * |
1656 * [Host 2]
1657 */
1658 host = alloc_host(test);
1659 nhi = &host->ports[7];
1660 port = &host->ports[1];
1661
1662 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1663 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1664 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
1665 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1666 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1667 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1);
1668 /* RX path */
1669 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1670 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1671 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1672 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1673 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1674
1675 tb_tunnel_free(tunnel);
1676}
1677
1678static void tb_test_tunnel_dma_tx(struct kunit *test)
1679{
1680 struct tb_port *nhi, *port;
1681 struct tb_tunnel *tunnel;
1682 struct tb_switch *host;
1683
1684 /*
1685 * Create DMA TX tunnel from NHI to port 1.
1686 *
1687 * [Host 1]
1688 * 1 | In HopID 2 -> Out HopID 15
1689 * |
1690 * v
1691 * ............ Domain border
1692 * |
1693 * [Host 2]
1694 */
1695 host = alloc_host(test);
1696 nhi = &host->ports[7];
1697 port = &host->ports[1];
1698
1699 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1700 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1701 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
1702 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1703 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1704 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1);
1705 /* TX path */
1706 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1707 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1708 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1709 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1710 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1711
1712 tb_tunnel_free(tunnel);
1713}
1714
1715static void tb_test_tunnel_dma_chain(struct kunit *test)
1716{
1717 struct tb_switch *host, *dev1, *dev2;
1718 struct tb_port *nhi, *port;
1719 struct tb_tunnel *tunnel;
1720
1721 /*
1722 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1723 *
1724 * [Host 1]
1725 * 1 ^ In HopID 1 -> Out HopID x
1726 * |
1727 * 1 | In HopID x -> Out HopID 1
1728 * [Device #1]
1729 * 7 \
1730 * 1 \
1731 * [Device #2]
1732 * 3 | In HopID x -> Out HopID 8
1733 * |
1734 * v In HopID 8 -> Out HopID x
1735 * ............ Domain border
1736 * |
1737 * [Host 2]
1738 */
1739 host = alloc_host(test);
1740 dev1 = alloc_dev_default(test, host, 0x1, true);
1741 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1742
1743 nhi = &host->ports[7];
1744 port = &dev2->ports[3];
1745 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1746 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1747 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
1748 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1749 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1750 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1751 /* RX path */
1752 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1753 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1754 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1755 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1756 &dev2->ports[1]);
1757 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1758 &dev1->ports[7]);
1759 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1760 &dev1->ports[1]);
1761 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1762 &host->ports[1]);
1763 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1764 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1765 /* TX path */
1766 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1767 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1768 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1769 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1770 &dev1->ports[1]);
1771 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1772 &dev1->ports[7]);
1773 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1774 &dev2->ports[1]);
1775 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1776 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1777
1778 tb_tunnel_free(tunnel);
1779}
1780
1781static void tb_test_tunnel_dma_match(struct kunit *test)
1782{
1783 struct tb_port *nhi, *port;
1784 struct tb_tunnel *tunnel;
1785 struct tb_switch *host;
1786
1787 host = alloc_host(test);
1788 nhi = &host->ports[7];
1789 port = &host->ports[1];
1790
1791 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1792 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1793
1794 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1795 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1796 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1797 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1798 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1799 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1800 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1801 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1802 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1803 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1804
1805 tb_tunnel_free(tunnel);
1806
1807 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1808 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1809 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1810 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1811 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1812 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1813 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1814 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1815 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1816
1817 tb_tunnel_free(tunnel);
1818
1819 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
1820 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1821 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
1822 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1823 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
1824 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1825 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1826 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
1827 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1828
1829 tb_tunnel_free(tunnel);
1830}
1831
15a4c7e8
MW
1832static const u32 root_directory[] = {
1833 0x55584401, /* "UXD" v1 */
1834 0x00000018, /* Root directory length */
1835 0x76656e64, /* "vend" */
1836 0x6f726964, /* "orid" */
1837 0x76000001, /* "v" R 1 */
1838 0x00000a27, /* Immediate value, ! Vendor ID */
1839 0x76656e64, /* "vend" */
1840 0x6f726964, /* "orid" */
1841 0x74000003, /* "t" R 3 */
1842 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
1843 0x64657669, /* "devi" */
1844 0x63656964, /* "ceid" */
1845 0x76000001, /* "v" R 1 */
1846 0x0000000a, /* Immediate value, ! Device ID */
1847 0x64657669, /* "devi" */
1848 0x63656964, /* "ceid" */
1849 0x74000003, /* "t" R 3 */
1850 0x0000001d, /* Text leaf offset, (“Macintosh”) */
1851 0x64657669, /* "devi" */
1852 0x63657276, /* "cerv" */
1853 0x76000001, /* "v" R 1 */
1854 0x80000100, /* Immediate value, Device Revision */
1855 0x6e657477, /* "netw" */
1856 0x6f726b00, /* "ork" */
1857 0x44000014, /* "D" R 20 */
1858 0x00000021, /* Directory data offset, (Network Directory) */
1859 0x4170706c, /* "Appl" */
1860 0x6520496e, /* "e In" */
1861 0x632e0000, /* "c." ! */
1862 0x4d616369, /* "Maci" */
1863 0x6e746f73, /* "ntos" */
1864 0x68000000, /* "h" */
1865 0x00000000, /* padding */
1866 0xca8961c6, /* Directory UUID, Network Directory */
1867 0x9541ce1c, /* Directory UUID, Network Directory */
1868 0x5949b8bd, /* Directory UUID, Network Directory */
1869 0x4f5a5f2e, /* Directory UUID, Network Directory */
1870 0x70727463, /* "prtc" */
1871 0x69640000, /* "id" */
1872 0x76000001, /* "v" R 1 */
1873 0x00000001, /* Immediate value, Network Protocol ID */
1874 0x70727463, /* "prtc" */
1875 0x76657273, /* "vers" */
1876 0x76000001, /* "v" R 1 */
1877 0x00000001, /* Immediate value, Network Protocol Version */
1878 0x70727463, /* "prtc" */
1879 0x72657673, /* "revs" */
1880 0x76000001, /* "v" R 1 */
1881 0x00000001, /* Immediate value, Network Protocol Revision */
1882 0x70727463, /* "prtc" */
1883 0x73746e73, /* "stns" */
1884 0x76000001, /* "v" R 1 */
1885 0x00000000, /* Immediate value, Network Protocol Settings */
1886};
1887
1888static const uuid_t network_dir_uuid =
1889 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
1890 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
1891
1892static void tb_test_property_parse(struct kunit *test)
1893{
1894 struct tb_property_dir *dir, *network_dir;
1895 struct tb_property *p;
1896
1897 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
1898 KUNIT_ASSERT_TRUE(test, dir != NULL);
1899
1900 p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
1901 KUNIT_ASSERT_TRUE(test, !p);
1902
1903 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
1904 KUNIT_ASSERT_TRUE(test, p != NULL);
1905 KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
1906
1907 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
1908 KUNIT_ASSERT_TRUE(test, p != NULL);
1909 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0xa27);
1910
1911 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
1912 KUNIT_ASSERT_TRUE(test, p != NULL);
1913 KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
1914
1915 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
1916 KUNIT_ASSERT_TRUE(test, p != NULL);
1917 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0xa);
1918
1919 p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
1920 KUNIT_ASSERT_TRUE(test, !p);
1921
1922 p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
1923 KUNIT_ASSERT_TRUE(test, p != NULL);
1924
1925 network_dir = p->value.dir;
1926 KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
1927
1928 p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
1929 KUNIT_ASSERT_TRUE(test, p != NULL);
1930 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
1931
1932 p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
1933 KUNIT_ASSERT_TRUE(test, p != NULL);
1934 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
1935
1936 p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
1937 KUNIT_ASSERT_TRUE(test, p != NULL);
1938 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
1939
1940 p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
1941 KUNIT_ASSERT_TRUE(test, p != NULL);
1942 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x0);
1943
1944 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
1945 KUNIT_EXPECT_TRUE(test, !p);
1946 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
1947 KUNIT_EXPECT_TRUE(test, !p);
1948
1949 tb_property_free_dir(dir);
1950}
1951
1952static void tb_test_property_format(struct kunit *test)
1953{
1954 struct tb_property_dir *dir;
1955 ssize_t block_len;
1956 u32 *block;
1957 int ret, i;
1958
1959 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
1960 KUNIT_ASSERT_TRUE(test, dir != NULL);
1961
1962 ret = tb_property_format_dir(dir, NULL, 0);
1963 KUNIT_ASSERT_EQ(test, ret, (int)ARRAY_SIZE(root_directory));
1964
1965 block_len = ret;
1966
1967 block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
1968 KUNIT_ASSERT_TRUE(test, block != NULL);
1969
1970 ret = tb_property_format_dir(dir, block, block_len);
1971 KUNIT_EXPECT_EQ(test, ret, 0);
1972
1973 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
1974 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
1975
1976 tb_property_free_dir(dir);
1977}
1978
1979static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
1980 struct tb_property_dir *d2)
1981{
1982 struct tb_property *p1, *p2, *tmp;
1983 int n1, n2, i;
1984
1985 if (d1->uuid) {
1986 KUNIT_ASSERT_TRUE(test, d2->uuid != NULL);
1987 KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
1988 } else {
1989 KUNIT_ASSERT_TRUE(test, d2->uuid == NULL);
1990 }
1991
1992 n1 = 0;
1993 tb_property_for_each(d1, tmp)
1994 n1++;
1995 KUNIT_ASSERT_NE(test, n1, 0);
1996
1997 n2 = 0;
1998 tb_property_for_each(d2, tmp)
1999 n2++;
2000 KUNIT_ASSERT_NE(test, n2, 0);
2001
2002 KUNIT_ASSERT_EQ(test, n1, n2);
2003
2004 p1 = NULL;
2005 p2 = NULL;
2006 for (i = 0; i < n1; i++) {
2007 p1 = tb_property_get_next(d1, p1);
2008 KUNIT_ASSERT_TRUE(test, p1 != NULL);
2009 p2 = tb_property_get_next(d2, p2);
2010 KUNIT_ASSERT_TRUE(test, p2 != NULL);
2011
2012 KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2013 KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2014 KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2015
2016 switch (p1->type) {
2017 case TB_PROPERTY_TYPE_DIRECTORY:
2018 KUNIT_ASSERT_TRUE(test, p1->value.dir != NULL);
2019 KUNIT_ASSERT_TRUE(test, p2->value.dir != NULL);
2020 compare_dirs(test, p1->value.dir, p2->value.dir);
2021 break;
2022
2023 case TB_PROPERTY_TYPE_DATA:
2024 KUNIT_ASSERT_TRUE(test, p1->value.data != NULL);
2025 KUNIT_ASSERT_TRUE(test, p2->value.data != NULL);
2026 KUNIT_ASSERT_TRUE(test,
2027 !memcmp(p1->value.data, p2->value.data,
2028 p1->length * 4)
2029 );
2030 break;
2031
2032 case TB_PROPERTY_TYPE_TEXT:
2033 KUNIT_ASSERT_TRUE(test, p1->value.text != NULL);
2034 KUNIT_ASSERT_TRUE(test, p2->value.text != NULL);
2035 KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2036 break;
2037
2038 case TB_PROPERTY_TYPE_VALUE:
2039 KUNIT_ASSERT_EQ(test, p1->value.immediate,
2040 p2->value.immediate);
2041 break;
2042 default:
2043 KUNIT_FAIL(test, "unexpected property type");
2044 break;
2045 }
2046 }
2047}
2048
2049static void tb_test_property_copy(struct kunit *test)
2050{
2051 struct tb_property_dir *src, *dst;
2052 u32 *block;
2053 int ret, i;
2054
2055 src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2056 KUNIT_ASSERT_TRUE(test, src != NULL);
2057
2058 dst = tb_property_copy_dir(src);
2059 KUNIT_ASSERT_TRUE(test, dst != NULL);
2060
2061 /* Compare the structures */
2062 compare_dirs(test, src, dst);
2063
2064 /* Compare the resulting property block */
2065 ret = tb_property_format_dir(dst, NULL, 0);
2066 KUNIT_ASSERT_EQ(test, ret, (int)ARRAY_SIZE(root_directory));
2067
2068 block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2069 KUNIT_ASSERT_TRUE(test, block != NULL);
2070
2071 ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2072 KUNIT_EXPECT_TRUE(test, !ret);
2073
2074 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2075 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2076
2077 tb_property_free_dir(dst);
2078 tb_property_free_dir(src);
2079}
2080
54509f50
MW
2081static struct kunit_case tb_test_cases[] = {
2082 KUNIT_CASE(tb_test_path_basic),
2083 KUNIT_CASE(tb_test_path_not_connected_walk),
2084 KUNIT_CASE(tb_test_path_single_hop_walk),
2085 KUNIT_CASE(tb_test_path_daisy_chain_walk),
2086 KUNIT_CASE(tb_test_path_simple_tree_walk),
2087 KUNIT_CASE(tb_test_path_complex_tree_walk),
2088 KUNIT_CASE(tb_test_path_max_length_walk),
2089 KUNIT_CASE(tb_test_path_not_connected),
2090 KUNIT_CASE(tb_test_path_not_bonded_lane0),
2091 KUNIT_CASE(tb_test_path_not_bonded_lane1),
2092 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2093 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2094 KUNIT_CASE(tb_test_path_mixed_chain),
2095 KUNIT_CASE(tb_test_path_mixed_chain_reverse),
40c14d9f
MW
2096 KUNIT_CASE(tb_test_tunnel_pcie),
2097 KUNIT_CASE(tb_test_tunnel_dp),
2098 KUNIT_CASE(tb_test_tunnel_dp_chain),
2099 KUNIT_CASE(tb_test_tunnel_dp_tree),
2100 KUNIT_CASE(tb_test_tunnel_dp_max_length),
2101 KUNIT_CASE(tb_test_tunnel_port_on_path),
2102 KUNIT_CASE(tb_test_tunnel_usb3),
5adab6cc
MW
2103 KUNIT_CASE(tb_test_tunnel_dma),
2104 KUNIT_CASE(tb_test_tunnel_dma_rx),
2105 KUNIT_CASE(tb_test_tunnel_dma_tx),
2106 KUNIT_CASE(tb_test_tunnel_dma_chain),
2107 KUNIT_CASE(tb_test_tunnel_dma_match),
15a4c7e8
MW
2108 KUNIT_CASE(tb_test_property_parse),
2109 KUNIT_CASE(tb_test_property_format),
2110 KUNIT_CASE(tb_test_property_copy),
54509f50
MW
2111 { }
2112};
2113
2114static struct kunit_suite tb_test_suite = {
2115 .name = "thunderbolt",
2116 .test_cases = tb_test_cases,
2117};
2c6ea4e2
MW
2118
2119static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
2120
2121int tb_test_init(void)
2122{
2123 return __kunit_test_suites_init(tb_test_suites);
2124}
2125
2126void tb_test_exit(void)
2127{
2128 return __kunit_test_suites_exit(tb_test_suites);
2129}