]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/thunderbolt/tb.c
Merge tag 'timers-core-2021-04-26' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - bus logic (NHI independent)
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13
14 #include "tb.h"
15 #include "tb_regs.h"
16 #include "tunnel.h"
17
18 /**
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
21 * @dp_resources: List of available DP resources for DP tunneling
22 * @hotplug_active: tb_handle_hotplug will stop progressing plug
23 * events and exit if this is not set (it needs to
24 * acquire the lock one more time). Used to drain wq
25 * after cfg has been paused.
26 * @remove_work: Work used to remove any unplugged routers after
27 * runtime resume
28 */
29 struct tb_cm {
30 struct list_head tunnel_list;
31 struct list_head dp_resources;
32 bool hotplug_active;
33 struct delayed_work remove_work;
34 };
35
36 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
37 {
38 return ((void *)tcm - sizeof(struct tb));
39 }
40
41 struct tb_hotplug_event {
42 struct work_struct work;
43 struct tb *tb;
44 u64 route;
45 u8 port;
46 bool unplug;
47 };
48
49 static void tb_handle_hotplug(struct work_struct *work);
50
51 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
52 {
53 struct tb_hotplug_event *ev;
54
55 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
56 if (!ev)
57 return;
58
59 ev->tb = tb;
60 ev->route = route;
61 ev->port = port;
62 ev->unplug = unplug;
63 INIT_WORK(&ev->work, tb_handle_hotplug);
64 queue_work(tb->wq, &ev->work);
65 }
66
67 /* enumeration & hot plug handling */
68
69 static void tb_add_dp_resources(struct tb_switch *sw)
70 {
71 struct tb_cm *tcm = tb_priv(sw->tb);
72 struct tb_port *port;
73
74 tb_switch_for_each_port(sw, port) {
75 if (!tb_port_is_dpin(port))
76 continue;
77
78 if (!tb_switch_query_dp_resource(sw, port))
79 continue;
80
81 list_add_tail(&port->list, &tcm->dp_resources);
82 tb_port_dbg(port, "DP IN resource available\n");
83 }
84 }
85
86 static void tb_remove_dp_resources(struct tb_switch *sw)
87 {
88 struct tb_cm *tcm = tb_priv(sw->tb);
89 struct tb_port *port, *tmp;
90
91 /* Clear children resources first */
92 tb_switch_for_each_port(sw, port) {
93 if (tb_port_has_remote(port))
94 tb_remove_dp_resources(port->remote->sw);
95 }
96
97 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
98 if (port->sw == sw) {
99 tb_port_dbg(port, "DP OUT resource unavailable\n");
100 list_del_init(&port->list);
101 }
102 }
103 }
104
105 static void tb_discover_tunnels(struct tb_switch *sw)
106 {
107 struct tb *tb = sw->tb;
108 struct tb_cm *tcm = tb_priv(tb);
109 struct tb_port *port;
110
111 tb_switch_for_each_port(sw, port) {
112 struct tb_tunnel *tunnel = NULL;
113
114 switch (port->config.type) {
115 case TB_TYPE_DP_HDMI_IN:
116 tunnel = tb_tunnel_discover_dp(tb, port);
117 break;
118
119 case TB_TYPE_PCIE_DOWN:
120 tunnel = tb_tunnel_discover_pci(tb, port);
121 break;
122
123 case TB_TYPE_USB3_DOWN:
124 tunnel = tb_tunnel_discover_usb3(tb, port);
125 break;
126
127 default:
128 break;
129 }
130
131 if (!tunnel)
132 continue;
133
134 if (tb_tunnel_is_pci(tunnel)) {
135 struct tb_switch *parent = tunnel->dst_port->sw;
136
137 while (parent != tunnel->src_port->sw) {
138 parent->boot = true;
139 parent = tb_switch_parent(parent);
140 }
141 } else if (tb_tunnel_is_dp(tunnel)) {
142 /* Keep the domain from powering down */
143 pm_runtime_get_sync(&tunnel->src_port->sw->dev);
144 pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
145 }
146
147 list_add_tail(&tunnel->list, &tcm->tunnel_list);
148 }
149
150 tb_switch_for_each_port(sw, port) {
151 if (tb_port_has_remote(port))
152 tb_discover_tunnels(port->remote->sw);
153 }
154 }
155
156 static int tb_port_configure_xdomain(struct tb_port *port)
157 {
158 /*
159 * XDomain paths currently only support single lane so we must
160 * disable the other lane according to USB4 spec.
161 */
162 tb_port_disable(port->dual_link_port);
163
164 if (tb_switch_is_usb4(port->sw))
165 return usb4_port_configure_xdomain(port);
166 return tb_lc_configure_xdomain(port);
167 }
168
169 static void tb_port_unconfigure_xdomain(struct tb_port *port)
170 {
171 if (tb_switch_is_usb4(port->sw))
172 usb4_port_unconfigure_xdomain(port);
173 else
174 tb_lc_unconfigure_xdomain(port);
175
176 tb_port_enable(port->dual_link_port);
177 }
178
179 static void tb_scan_xdomain(struct tb_port *port)
180 {
181 struct tb_switch *sw = port->sw;
182 struct tb *tb = sw->tb;
183 struct tb_xdomain *xd;
184 u64 route;
185
186 if (!tb_is_xdomain_enabled())
187 return;
188
189 route = tb_downstream_route(port);
190 xd = tb_xdomain_find_by_route(tb, route);
191 if (xd) {
192 tb_xdomain_put(xd);
193 return;
194 }
195
196 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
197 NULL);
198 if (xd) {
199 tb_port_at(route, sw)->xdomain = xd;
200 tb_port_configure_xdomain(port);
201 tb_xdomain_add(xd);
202 }
203 }
204
205 static int tb_enable_tmu(struct tb_switch *sw)
206 {
207 int ret;
208
209 /* If it is already enabled in correct mode, don't touch it */
210 if (tb_switch_tmu_is_enabled(sw))
211 return 0;
212
213 ret = tb_switch_tmu_disable(sw);
214 if (ret)
215 return ret;
216
217 ret = tb_switch_tmu_post_time(sw);
218 if (ret)
219 return ret;
220
221 return tb_switch_tmu_enable(sw);
222 }
223
224 /**
225 * tb_find_unused_port() - return the first inactive port on @sw
226 * @sw: Switch to find the port on
227 * @type: Port type to look for
228 */
229 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
230 enum tb_port_type type)
231 {
232 struct tb_port *port;
233
234 tb_switch_for_each_port(sw, port) {
235 if (tb_is_upstream_port(port))
236 continue;
237 if (port->config.type != type)
238 continue;
239 if (!port->cap_adap)
240 continue;
241 if (tb_port_is_enabled(port))
242 continue;
243 return port;
244 }
245 return NULL;
246 }
247
248 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
249 const struct tb_port *port)
250 {
251 struct tb_port *down;
252
253 down = usb4_switch_map_usb3_down(sw, port);
254 if (down && !tb_usb3_port_is_enabled(down))
255 return down;
256 return NULL;
257 }
258
259 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
260 struct tb_port *src_port,
261 struct tb_port *dst_port)
262 {
263 struct tb_cm *tcm = tb_priv(tb);
264 struct tb_tunnel *tunnel;
265
266 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
267 if (tunnel->type == type &&
268 ((src_port && src_port == tunnel->src_port) ||
269 (dst_port && dst_port == tunnel->dst_port))) {
270 return tunnel;
271 }
272 }
273
274 return NULL;
275 }
276
277 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
278 struct tb_port *src_port,
279 struct tb_port *dst_port)
280 {
281 struct tb_port *port, *usb3_down;
282 struct tb_switch *sw;
283
284 /* Pick the router that is deepest in the topology */
285 if (dst_port->sw->config.depth > src_port->sw->config.depth)
286 sw = dst_port->sw;
287 else
288 sw = src_port->sw;
289
290 /* Can't be the host router */
291 if (sw == tb->root_switch)
292 return NULL;
293
294 /* Find the downstream USB4 port that leads to this router */
295 port = tb_port_at(tb_route(sw), tb->root_switch);
296 /* Find the corresponding host router USB3 downstream port */
297 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
298 if (!usb3_down)
299 return NULL;
300
301 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
302 }
303
304 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
305 struct tb_port *dst_port, int *available_up, int *available_down)
306 {
307 int usb3_consumed_up, usb3_consumed_down, ret;
308 struct tb_cm *tcm = tb_priv(tb);
309 struct tb_tunnel *tunnel;
310 struct tb_port *port;
311
312 tb_port_dbg(dst_port, "calculating available bandwidth\n");
313
314 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
315 if (tunnel) {
316 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
317 &usb3_consumed_down);
318 if (ret)
319 return ret;
320 } else {
321 usb3_consumed_up = 0;
322 usb3_consumed_down = 0;
323 }
324
325 *available_up = *available_down = 40000;
326
327 /* Find the minimum available bandwidth over all links */
328 tb_for_each_port_on_path(src_port, dst_port, port) {
329 int link_speed, link_width, up_bw, down_bw;
330
331 if (!tb_port_is_null(port))
332 continue;
333
334 if (tb_is_upstream_port(port)) {
335 link_speed = port->sw->link_speed;
336 } else {
337 link_speed = tb_port_get_link_speed(port);
338 if (link_speed < 0)
339 return link_speed;
340 }
341
342 link_width = port->bonded ? 2 : 1;
343
344 up_bw = link_speed * link_width * 1000; /* Mb/s */
345 /* Leave 10% guard band */
346 up_bw -= up_bw / 10;
347 down_bw = up_bw;
348
349 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
350
351 /*
352 * Find all DP tunnels that cross the port and reduce
353 * their consumed bandwidth from the available.
354 */
355 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
356 int dp_consumed_up, dp_consumed_down;
357
358 if (!tb_tunnel_is_dp(tunnel))
359 continue;
360
361 if (!tb_tunnel_port_on_path(tunnel, port))
362 continue;
363
364 ret = tb_tunnel_consumed_bandwidth(tunnel,
365 &dp_consumed_up,
366 &dp_consumed_down);
367 if (ret)
368 return ret;
369
370 up_bw -= dp_consumed_up;
371 down_bw -= dp_consumed_down;
372 }
373
374 /*
375 * If USB3 is tunneled from the host router down to the
376 * branch leading to port we need to take USB3 consumed
377 * bandwidth into account regardless whether it actually
378 * crosses the port.
379 */
380 up_bw -= usb3_consumed_up;
381 down_bw -= usb3_consumed_down;
382
383 if (up_bw < *available_up)
384 *available_up = up_bw;
385 if (down_bw < *available_down)
386 *available_down = down_bw;
387 }
388
389 if (*available_up < 0)
390 *available_up = 0;
391 if (*available_down < 0)
392 *available_down = 0;
393
394 return 0;
395 }
396
397 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
398 struct tb_port *src_port,
399 struct tb_port *dst_port)
400 {
401 struct tb_tunnel *tunnel;
402
403 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
404 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
405 }
406
407 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
408 struct tb_port *dst_port)
409 {
410 int ret, available_up, available_down;
411 struct tb_tunnel *tunnel;
412
413 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
414 if (!tunnel)
415 return;
416
417 tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
418
419 /*
420 * Calculate available bandwidth for the first hop USB3 tunnel.
421 * That determines the whole USB3 bandwidth for this branch.
422 */
423 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
424 &available_up, &available_down);
425 if (ret) {
426 tb_warn(tb, "failed to calculate available bandwidth\n");
427 return;
428 }
429
430 tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
431 available_up, available_down);
432
433 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
434 }
435
436 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
437 {
438 struct tb_switch *parent = tb_switch_parent(sw);
439 int ret, available_up, available_down;
440 struct tb_port *up, *down, *port;
441 struct tb_cm *tcm = tb_priv(tb);
442 struct tb_tunnel *tunnel;
443
444 if (!tb_acpi_may_tunnel_usb3()) {
445 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
446 return 0;
447 }
448
449 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
450 if (!up)
451 return 0;
452
453 if (!sw->link_usb4)
454 return 0;
455
456 /*
457 * Look up available down port. Since we are chaining it should
458 * be found right above this switch.
459 */
460 port = tb_port_at(tb_route(sw), parent);
461 down = tb_find_usb3_down(parent, port);
462 if (!down)
463 return 0;
464
465 if (tb_route(parent)) {
466 struct tb_port *parent_up;
467 /*
468 * Check first that the parent switch has its upstream USB3
469 * port enabled. Otherwise the chain is not complete and
470 * there is no point setting up a new tunnel.
471 */
472 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
473 if (!parent_up || !tb_port_is_enabled(parent_up))
474 return 0;
475
476 /* Make all unused bandwidth available for the new tunnel */
477 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
478 if (ret)
479 return ret;
480 }
481
482 ret = tb_available_bandwidth(tb, down, up, &available_up,
483 &available_down);
484 if (ret)
485 goto err_reclaim;
486
487 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
488 available_up, available_down);
489
490 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
491 available_down);
492 if (!tunnel) {
493 ret = -ENOMEM;
494 goto err_reclaim;
495 }
496
497 if (tb_tunnel_activate(tunnel)) {
498 tb_port_info(up,
499 "USB3 tunnel activation failed, aborting\n");
500 ret = -EIO;
501 goto err_free;
502 }
503
504 list_add_tail(&tunnel->list, &tcm->tunnel_list);
505 if (tb_route(parent))
506 tb_reclaim_usb3_bandwidth(tb, down, up);
507
508 return 0;
509
510 err_free:
511 tb_tunnel_free(tunnel);
512 err_reclaim:
513 if (tb_route(parent))
514 tb_reclaim_usb3_bandwidth(tb, down, up);
515
516 return ret;
517 }
518
519 static int tb_create_usb3_tunnels(struct tb_switch *sw)
520 {
521 struct tb_port *port;
522 int ret;
523
524 if (!tb_acpi_may_tunnel_usb3())
525 return 0;
526
527 if (tb_route(sw)) {
528 ret = tb_tunnel_usb3(sw->tb, sw);
529 if (ret)
530 return ret;
531 }
532
533 tb_switch_for_each_port(sw, port) {
534 if (!tb_port_has_remote(port))
535 continue;
536 ret = tb_create_usb3_tunnels(port->remote->sw);
537 if (ret)
538 return ret;
539 }
540
541 return 0;
542 }
543
544 static void tb_scan_port(struct tb_port *port);
545
546 /*
547 * tb_scan_switch() - scan for and initialize downstream switches
548 */
549 static void tb_scan_switch(struct tb_switch *sw)
550 {
551 struct tb_port *port;
552
553 pm_runtime_get_sync(&sw->dev);
554
555 tb_switch_for_each_port(sw, port)
556 tb_scan_port(port);
557
558 pm_runtime_mark_last_busy(&sw->dev);
559 pm_runtime_put_autosuspend(&sw->dev);
560 }
561
562 /*
563 * tb_scan_port() - check for and initialize switches below port
564 */
565 static void tb_scan_port(struct tb_port *port)
566 {
567 struct tb_cm *tcm = tb_priv(port->sw->tb);
568 struct tb_port *upstream_port;
569 struct tb_switch *sw;
570
571 if (tb_is_upstream_port(port))
572 return;
573
574 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
575 !tb_dp_port_is_enabled(port)) {
576 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
577 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
578 false);
579 return;
580 }
581
582 if (port->config.type != TB_TYPE_PORT)
583 return;
584 if (port->dual_link_port && port->link_nr)
585 return; /*
586 * Downstream switch is reachable through two ports.
587 * Only scan on the primary port (link_nr == 0).
588 */
589 if (tb_wait_for_port(port, false) <= 0)
590 return;
591 if (port->remote) {
592 tb_port_dbg(port, "port already has a remote\n");
593 return;
594 }
595
596 tb_retimer_scan(port);
597
598 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
599 tb_downstream_route(port));
600 if (IS_ERR(sw)) {
601 /*
602 * If there is an error accessing the connected switch
603 * it may be connected to another domain. Also we allow
604 * the other domain to be connected to a max depth switch.
605 */
606 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
607 tb_scan_xdomain(port);
608 return;
609 }
610
611 if (tb_switch_configure(sw)) {
612 tb_switch_put(sw);
613 return;
614 }
615
616 /*
617 * If there was previously another domain connected remove it
618 * first.
619 */
620 if (port->xdomain) {
621 tb_xdomain_remove(port->xdomain);
622 tb_port_unconfigure_xdomain(port);
623 port->xdomain = NULL;
624 }
625
626 /*
627 * Do not send uevents until we have discovered all existing
628 * tunnels and know which switches were authorized already by
629 * the boot firmware.
630 */
631 if (!tcm->hotplug_active)
632 dev_set_uevent_suppress(&sw->dev, true);
633
634 /*
635 * At the moment Thunderbolt 2 and beyond (devices with LC) we
636 * can support runtime PM.
637 */
638 sw->rpm = sw->generation > 1;
639
640 if (tb_switch_add(sw)) {
641 tb_switch_put(sw);
642 return;
643 }
644
645 /* Link the switches using both links if available */
646 upstream_port = tb_upstream_port(sw);
647 port->remote = upstream_port;
648 upstream_port->remote = port;
649 if (port->dual_link_port && upstream_port->dual_link_port) {
650 port->dual_link_port->remote = upstream_port->dual_link_port;
651 upstream_port->dual_link_port->remote = port->dual_link_port;
652 }
653
654 /* Enable lane bonding if supported */
655 tb_switch_lane_bonding_enable(sw);
656 /* Set the link configured */
657 tb_switch_configure_link(sw);
658
659 if (tb_enable_tmu(sw))
660 tb_sw_warn(sw, "failed to enable TMU\n");
661
662 /* Scan upstream retimers */
663 tb_retimer_scan(upstream_port);
664
665 /*
666 * Create USB 3.x tunnels only when the switch is plugged to the
667 * domain. This is because we scan the domain also during discovery
668 * and want to discover existing USB 3.x tunnels before we create
669 * any new.
670 */
671 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
672 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
673
674 tb_add_dp_resources(sw);
675 tb_scan_switch(sw);
676 }
677
678 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
679 {
680 struct tb_port *src_port, *dst_port;
681 struct tb *tb;
682
683 if (!tunnel)
684 return;
685
686 tb_tunnel_deactivate(tunnel);
687 list_del(&tunnel->list);
688
689 tb = tunnel->tb;
690 src_port = tunnel->src_port;
691 dst_port = tunnel->dst_port;
692
693 switch (tunnel->type) {
694 case TB_TUNNEL_DP:
695 /*
696 * In case of DP tunnel make sure the DP IN resource is
697 * deallocated properly.
698 */
699 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
700 /* Now we can allow the domain to runtime suspend again */
701 pm_runtime_mark_last_busy(&dst_port->sw->dev);
702 pm_runtime_put_autosuspend(&dst_port->sw->dev);
703 pm_runtime_mark_last_busy(&src_port->sw->dev);
704 pm_runtime_put_autosuspend(&src_port->sw->dev);
705 fallthrough;
706
707 case TB_TUNNEL_USB3:
708 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
709 break;
710
711 default:
712 /*
713 * PCIe and DMA tunnels do not consume guaranteed
714 * bandwidth.
715 */
716 break;
717 }
718
719 tb_tunnel_free(tunnel);
720 }
721
722 /*
723 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
724 */
725 static void tb_free_invalid_tunnels(struct tb *tb)
726 {
727 struct tb_cm *tcm = tb_priv(tb);
728 struct tb_tunnel *tunnel;
729 struct tb_tunnel *n;
730
731 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
732 if (tb_tunnel_is_invalid(tunnel))
733 tb_deactivate_and_free_tunnel(tunnel);
734 }
735 }
736
737 /*
738 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
739 */
740 static void tb_free_unplugged_children(struct tb_switch *sw)
741 {
742 struct tb_port *port;
743
744 tb_switch_for_each_port(sw, port) {
745 if (!tb_port_has_remote(port))
746 continue;
747
748 if (port->remote->sw->is_unplugged) {
749 tb_retimer_remove_all(port);
750 tb_remove_dp_resources(port->remote->sw);
751 tb_switch_unconfigure_link(port->remote->sw);
752 tb_switch_lane_bonding_disable(port->remote->sw);
753 tb_switch_remove(port->remote->sw);
754 port->remote = NULL;
755 if (port->dual_link_port)
756 port->dual_link_port->remote = NULL;
757 } else {
758 tb_free_unplugged_children(port->remote->sw);
759 }
760 }
761 }
762
763 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
764 const struct tb_port *port)
765 {
766 struct tb_port *down = NULL;
767
768 /*
769 * To keep plugging devices consistently in the same PCIe
770 * hierarchy, do mapping here for switch downstream PCIe ports.
771 */
772 if (tb_switch_is_usb4(sw)) {
773 down = usb4_switch_map_pcie_down(sw, port);
774 } else if (!tb_route(sw)) {
775 int phy_port = tb_phy_port_from_link(port->port);
776 int index;
777
778 /*
779 * Hard-coded Thunderbolt port to PCIe down port mapping
780 * per controller.
781 */
782 if (tb_switch_is_cactus_ridge(sw) ||
783 tb_switch_is_alpine_ridge(sw))
784 index = !phy_port ? 6 : 7;
785 else if (tb_switch_is_falcon_ridge(sw))
786 index = !phy_port ? 6 : 8;
787 else if (tb_switch_is_titan_ridge(sw))
788 index = !phy_port ? 8 : 9;
789 else
790 goto out;
791
792 /* Validate the hard-coding */
793 if (WARN_ON(index > sw->config.max_port_number))
794 goto out;
795
796 down = &sw->ports[index];
797 }
798
799 if (down) {
800 if (WARN_ON(!tb_port_is_pcie_down(down)))
801 goto out;
802 if (tb_pci_port_is_enabled(down))
803 goto out;
804
805 return down;
806 }
807
808 out:
809 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
810 }
811
812 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
813 {
814 struct tb_port *host_port, *port;
815 struct tb_cm *tcm = tb_priv(tb);
816
817 host_port = tb_route(in->sw) ?
818 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
819
820 list_for_each_entry(port, &tcm->dp_resources, list) {
821 if (!tb_port_is_dpout(port))
822 continue;
823
824 if (tb_port_is_enabled(port)) {
825 tb_port_dbg(port, "in use\n");
826 continue;
827 }
828
829 tb_port_dbg(port, "DP OUT available\n");
830
831 /*
832 * Keep the DP tunnel under the topology starting from
833 * the same host router downstream port.
834 */
835 if (host_port && tb_route(port->sw)) {
836 struct tb_port *p;
837
838 p = tb_port_at(tb_route(port->sw), tb->root_switch);
839 if (p != host_port)
840 continue;
841 }
842
843 return port;
844 }
845
846 return NULL;
847 }
848
849 static void tb_tunnel_dp(struct tb *tb)
850 {
851 int available_up, available_down, ret;
852 struct tb_cm *tcm = tb_priv(tb);
853 struct tb_port *port, *in, *out;
854 struct tb_tunnel *tunnel;
855
856 if (!tb_acpi_may_tunnel_dp()) {
857 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
858 return;
859 }
860
861 /*
862 * Find pair of inactive DP IN and DP OUT adapters and then
863 * establish a DP tunnel between them.
864 */
865 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
866
867 in = NULL;
868 out = NULL;
869 list_for_each_entry(port, &tcm->dp_resources, list) {
870 if (!tb_port_is_dpin(port))
871 continue;
872
873 if (tb_port_is_enabled(port)) {
874 tb_port_dbg(port, "in use\n");
875 continue;
876 }
877
878 tb_port_dbg(port, "DP IN available\n");
879
880 out = tb_find_dp_out(tb, port);
881 if (out) {
882 in = port;
883 break;
884 }
885 }
886
887 if (!in) {
888 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
889 return;
890 }
891 if (!out) {
892 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
893 return;
894 }
895
896 /*
897 * DP stream needs the domain to be active so runtime resume
898 * both ends of the tunnel.
899 *
900 * This should bring the routers in the middle active as well
901 * and keeps the domain from runtime suspending while the DP
902 * tunnel is active.
903 */
904 pm_runtime_get_sync(&in->sw->dev);
905 pm_runtime_get_sync(&out->sw->dev);
906
907 if (tb_switch_alloc_dp_resource(in->sw, in)) {
908 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
909 goto err_rpm_put;
910 }
911
912 /* Make all unused USB3 bandwidth available for the new DP tunnel */
913 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
914 if (ret) {
915 tb_warn(tb, "failed to release unused bandwidth\n");
916 goto err_dealloc_dp;
917 }
918
919 ret = tb_available_bandwidth(tb, in, out, &available_up,
920 &available_down);
921 if (ret)
922 goto err_reclaim;
923
924 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
925 available_up, available_down);
926
927 tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
928 if (!tunnel) {
929 tb_port_dbg(out, "could not allocate DP tunnel\n");
930 goto err_reclaim;
931 }
932
933 if (tb_tunnel_activate(tunnel)) {
934 tb_port_info(out, "DP tunnel activation failed, aborting\n");
935 goto err_free;
936 }
937
938 list_add_tail(&tunnel->list, &tcm->tunnel_list);
939 tb_reclaim_usb3_bandwidth(tb, in, out);
940 return;
941
942 err_free:
943 tb_tunnel_free(tunnel);
944 err_reclaim:
945 tb_reclaim_usb3_bandwidth(tb, in, out);
946 err_dealloc_dp:
947 tb_switch_dealloc_dp_resource(in->sw, in);
948 err_rpm_put:
949 pm_runtime_mark_last_busy(&out->sw->dev);
950 pm_runtime_put_autosuspend(&out->sw->dev);
951 pm_runtime_mark_last_busy(&in->sw->dev);
952 pm_runtime_put_autosuspend(&in->sw->dev);
953 }
954
955 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
956 {
957 struct tb_port *in, *out;
958 struct tb_tunnel *tunnel;
959
960 if (tb_port_is_dpin(port)) {
961 tb_port_dbg(port, "DP IN resource unavailable\n");
962 in = port;
963 out = NULL;
964 } else {
965 tb_port_dbg(port, "DP OUT resource unavailable\n");
966 in = NULL;
967 out = port;
968 }
969
970 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
971 tb_deactivate_and_free_tunnel(tunnel);
972 list_del_init(&port->list);
973
974 /*
975 * See if there is another DP OUT port that can be used for
976 * to create another tunnel.
977 */
978 tb_tunnel_dp(tb);
979 }
980
981 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
982 {
983 struct tb_cm *tcm = tb_priv(tb);
984 struct tb_port *p;
985
986 if (tb_port_is_enabled(port))
987 return;
988
989 list_for_each_entry(p, &tcm->dp_resources, list) {
990 if (p == port)
991 return;
992 }
993
994 tb_port_dbg(port, "DP %s resource available\n",
995 tb_port_is_dpin(port) ? "IN" : "OUT");
996 list_add_tail(&port->list, &tcm->dp_resources);
997
998 /* Look for suitable DP IN <-> DP OUT pairs now */
999 tb_tunnel_dp(tb);
1000 }
1001
1002 static void tb_disconnect_and_release_dp(struct tb *tb)
1003 {
1004 struct tb_cm *tcm = tb_priv(tb);
1005 struct tb_tunnel *tunnel, *n;
1006
1007 /*
1008 * Tear down all DP tunnels and release their resources. They
1009 * will be re-established after resume based on plug events.
1010 */
1011 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1012 if (tb_tunnel_is_dp(tunnel))
1013 tb_deactivate_and_free_tunnel(tunnel);
1014 }
1015
1016 while (!list_empty(&tcm->dp_resources)) {
1017 struct tb_port *port;
1018
1019 port = list_first_entry(&tcm->dp_resources,
1020 struct tb_port, list);
1021 list_del_init(&port->list);
1022 }
1023 }
1024
1025 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1026 {
1027 struct tb_tunnel *tunnel;
1028 struct tb_port *up;
1029
1030 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1031 if (WARN_ON(!up))
1032 return -ENODEV;
1033
1034 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1035 if (WARN_ON(!tunnel))
1036 return -ENODEV;
1037
1038 tb_tunnel_deactivate(tunnel);
1039 list_del(&tunnel->list);
1040 tb_tunnel_free(tunnel);
1041 return 0;
1042 }
1043
1044 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1045 {
1046 struct tb_port *up, *down, *port;
1047 struct tb_cm *tcm = tb_priv(tb);
1048 struct tb_switch *parent_sw;
1049 struct tb_tunnel *tunnel;
1050
1051 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1052 if (!up)
1053 return 0;
1054
1055 /*
1056 * Look up available down port. Since we are chaining it should
1057 * be found right above this switch.
1058 */
1059 parent_sw = tb_to_switch(sw->dev.parent);
1060 port = tb_port_at(tb_route(sw), parent_sw);
1061 down = tb_find_pcie_down(parent_sw, port);
1062 if (!down)
1063 return 0;
1064
1065 tunnel = tb_tunnel_alloc_pci(tb, up, down);
1066 if (!tunnel)
1067 return -ENOMEM;
1068
1069 if (tb_tunnel_activate(tunnel)) {
1070 tb_port_info(up,
1071 "PCIe tunnel activation failed, aborting\n");
1072 tb_tunnel_free(tunnel);
1073 return -EIO;
1074 }
1075
1076 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1077 return 0;
1078 }
1079
1080 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1081 {
1082 struct tb_cm *tcm = tb_priv(tb);
1083 struct tb_port *nhi_port, *dst_port;
1084 struct tb_tunnel *tunnel;
1085 struct tb_switch *sw;
1086
1087 sw = tb_to_switch(xd->dev.parent);
1088 dst_port = tb_port_at(xd->route, sw);
1089 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1090
1091 mutex_lock(&tb->lock);
1092 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
1093 xd->transmit_path, xd->receive_ring,
1094 xd->receive_path);
1095 if (!tunnel) {
1096 mutex_unlock(&tb->lock);
1097 return -ENOMEM;
1098 }
1099
1100 if (tb_tunnel_activate(tunnel)) {
1101 tb_port_info(nhi_port,
1102 "DMA tunnel activation failed, aborting\n");
1103 tb_tunnel_free(tunnel);
1104 mutex_unlock(&tb->lock);
1105 return -EIO;
1106 }
1107
1108 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1109 mutex_unlock(&tb->lock);
1110 return 0;
1111 }
1112
1113 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1114 {
1115 struct tb_port *dst_port;
1116 struct tb_tunnel *tunnel;
1117 struct tb_switch *sw;
1118
1119 sw = tb_to_switch(xd->dev.parent);
1120 dst_port = tb_port_at(xd->route, sw);
1121
1122 /*
1123 * It is possible that the tunnel was already teared down (in
1124 * case of cable disconnect) so it is fine if we cannot find it
1125 * here anymore.
1126 */
1127 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
1128 tb_deactivate_and_free_tunnel(tunnel);
1129 }
1130
1131 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1132 {
1133 if (!xd->is_unplugged) {
1134 mutex_lock(&tb->lock);
1135 __tb_disconnect_xdomain_paths(tb, xd);
1136 mutex_unlock(&tb->lock);
1137 }
1138 return 0;
1139 }
1140
1141 /* hotplug handling */
1142
1143 /*
1144 * tb_handle_hotplug() - handle hotplug event
1145 *
1146 * Executes on tb->wq.
1147 */
1148 static void tb_handle_hotplug(struct work_struct *work)
1149 {
1150 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1151 struct tb *tb = ev->tb;
1152 struct tb_cm *tcm = tb_priv(tb);
1153 struct tb_switch *sw;
1154 struct tb_port *port;
1155
1156 /* Bring the domain back from sleep if it was suspended */
1157 pm_runtime_get_sync(&tb->dev);
1158
1159 mutex_lock(&tb->lock);
1160 if (!tcm->hotplug_active)
1161 goto out; /* during init, suspend or shutdown */
1162
1163 sw = tb_switch_find_by_route(tb, ev->route);
1164 if (!sw) {
1165 tb_warn(tb,
1166 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1167 ev->route, ev->port, ev->unplug);
1168 goto out;
1169 }
1170 if (ev->port > sw->config.max_port_number) {
1171 tb_warn(tb,
1172 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1173 ev->route, ev->port, ev->unplug);
1174 goto put_sw;
1175 }
1176 port = &sw->ports[ev->port];
1177 if (tb_is_upstream_port(port)) {
1178 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1179 ev->route, ev->port, ev->unplug);
1180 goto put_sw;
1181 }
1182
1183 pm_runtime_get_sync(&sw->dev);
1184
1185 if (ev->unplug) {
1186 tb_retimer_remove_all(port);
1187
1188 if (tb_port_has_remote(port)) {
1189 tb_port_dbg(port, "switch unplugged\n");
1190 tb_sw_set_unplugged(port->remote->sw);
1191 tb_free_invalid_tunnels(tb);
1192 tb_remove_dp_resources(port->remote->sw);
1193 tb_switch_tmu_disable(port->remote->sw);
1194 tb_switch_unconfigure_link(port->remote->sw);
1195 tb_switch_lane_bonding_disable(port->remote->sw);
1196 tb_switch_remove(port->remote->sw);
1197 port->remote = NULL;
1198 if (port->dual_link_port)
1199 port->dual_link_port->remote = NULL;
1200 /* Maybe we can create another DP tunnel */
1201 tb_tunnel_dp(tb);
1202 } else if (port->xdomain) {
1203 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1204
1205 tb_port_dbg(port, "xdomain unplugged\n");
1206 /*
1207 * Service drivers are unbound during
1208 * tb_xdomain_remove() so setting XDomain as
1209 * unplugged here prevents deadlock if they call
1210 * tb_xdomain_disable_paths(). We will tear down
1211 * the path below.
1212 */
1213 xd->is_unplugged = true;
1214 tb_xdomain_remove(xd);
1215 port->xdomain = NULL;
1216 __tb_disconnect_xdomain_paths(tb, xd);
1217 tb_xdomain_put(xd);
1218 tb_port_unconfigure_xdomain(port);
1219 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1220 tb_dp_resource_unavailable(tb, port);
1221 } else {
1222 tb_port_dbg(port,
1223 "got unplug event for disconnected port, ignoring\n");
1224 }
1225 } else if (port->remote) {
1226 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1227 } else {
1228 if (tb_port_is_null(port)) {
1229 tb_port_dbg(port, "hotplug: scanning\n");
1230 tb_scan_port(port);
1231 if (!port->remote)
1232 tb_port_dbg(port, "hotplug: no switch found\n");
1233 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1234 tb_dp_resource_available(tb, port);
1235 }
1236 }
1237
1238 pm_runtime_mark_last_busy(&sw->dev);
1239 pm_runtime_put_autosuspend(&sw->dev);
1240
1241 put_sw:
1242 tb_switch_put(sw);
1243 out:
1244 mutex_unlock(&tb->lock);
1245
1246 pm_runtime_mark_last_busy(&tb->dev);
1247 pm_runtime_put_autosuspend(&tb->dev);
1248
1249 kfree(ev);
1250 }
1251
1252 /*
1253 * tb_schedule_hotplug_handler() - callback function for the control channel
1254 *
1255 * Delegates to tb_handle_hotplug.
1256 */
1257 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1258 const void *buf, size_t size)
1259 {
1260 const struct cfg_event_pkg *pkg = buf;
1261 u64 route;
1262
1263 if (type != TB_CFG_PKG_EVENT) {
1264 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1265 return;
1266 }
1267
1268 route = tb_cfg_get_route(&pkg->header);
1269
1270 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1271 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1272 pkg->port);
1273 }
1274
1275 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1276 }
1277
1278 static void tb_stop(struct tb *tb)
1279 {
1280 struct tb_cm *tcm = tb_priv(tb);
1281 struct tb_tunnel *tunnel;
1282 struct tb_tunnel *n;
1283
1284 cancel_delayed_work(&tcm->remove_work);
1285 /* tunnels are only present after everything has been initialized */
1286 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1287 /*
1288 * DMA tunnels require the driver to be functional so we
1289 * tear them down. Other protocol tunnels can be left
1290 * intact.
1291 */
1292 if (tb_tunnel_is_dma(tunnel))
1293 tb_tunnel_deactivate(tunnel);
1294 tb_tunnel_free(tunnel);
1295 }
1296 tb_switch_remove(tb->root_switch);
1297 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1298 }
1299
1300 static int tb_scan_finalize_switch(struct device *dev, void *data)
1301 {
1302 if (tb_is_switch(dev)) {
1303 struct tb_switch *sw = tb_to_switch(dev);
1304
1305 /*
1306 * If we found that the switch was already setup by the
1307 * boot firmware, mark it as authorized now before we
1308 * send uevent to userspace.
1309 */
1310 if (sw->boot)
1311 sw->authorized = 1;
1312
1313 dev_set_uevent_suppress(dev, false);
1314 kobject_uevent(&dev->kobj, KOBJ_ADD);
1315 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1316 }
1317
1318 return 0;
1319 }
1320
1321 static int tb_start(struct tb *tb)
1322 {
1323 struct tb_cm *tcm = tb_priv(tb);
1324 int ret;
1325
1326 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1327 if (IS_ERR(tb->root_switch))
1328 return PTR_ERR(tb->root_switch);
1329
1330 /*
1331 * ICM firmware upgrade needs running firmware and in native
1332 * mode that is not available so disable firmware upgrade of the
1333 * root switch.
1334 */
1335 tb->root_switch->no_nvm_upgrade = true;
1336 /* All USB4 routers support runtime PM */
1337 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1338
1339 ret = tb_switch_configure(tb->root_switch);
1340 if (ret) {
1341 tb_switch_put(tb->root_switch);
1342 return ret;
1343 }
1344
1345 /* Announce the switch to the world */
1346 ret = tb_switch_add(tb->root_switch);
1347 if (ret) {
1348 tb_switch_put(tb->root_switch);
1349 return ret;
1350 }
1351
1352 /* Enable TMU if it is off */
1353 tb_switch_tmu_enable(tb->root_switch);
1354 /* Full scan to discover devices added before the driver was loaded. */
1355 tb_scan_switch(tb->root_switch);
1356 /* Find out tunnels created by the boot firmware */
1357 tb_discover_tunnels(tb->root_switch);
1358 /*
1359 * If the boot firmware did not create USB 3.x tunnels create them
1360 * now for the whole topology.
1361 */
1362 tb_create_usb3_tunnels(tb->root_switch);
1363 /* Add DP IN resources for the root switch */
1364 tb_add_dp_resources(tb->root_switch);
1365 /* Make the discovered switches available to the userspace */
1366 device_for_each_child(&tb->root_switch->dev, NULL,
1367 tb_scan_finalize_switch);
1368
1369 /* Allow tb_handle_hotplug to progress events */
1370 tcm->hotplug_active = true;
1371 return 0;
1372 }
1373
1374 static int tb_suspend_noirq(struct tb *tb)
1375 {
1376 struct tb_cm *tcm = tb_priv(tb);
1377
1378 tb_dbg(tb, "suspending...\n");
1379 tb_disconnect_and_release_dp(tb);
1380 tb_switch_suspend(tb->root_switch, false);
1381 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1382 tb_dbg(tb, "suspend finished\n");
1383
1384 return 0;
1385 }
1386
1387 static void tb_restore_children(struct tb_switch *sw)
1388 {
1389 struct tb_port *port;
1390
1391 /* No need to restore if the router is already unplugged */
1392 if (sw->is_unplugged)
1393 return;
1394
1395 if (tb_enable_tmu(sw))
1396 tb_sw_warn(sw, "failed to restore TMU configuration\n");
1397
1398 tb_switch_for_each_port(sw, port) {
1399 if (!tb_port_has_remote(port) && !port->xdomain)
1400 continue;
1401
1402 if (port->remote) {
1403 tb_switch_lane_bonding_enable(port->remote->sw);
1404 tb_switch_configure_link(port->remote->sw);
1405
1406 tb_restore_children(port->remote->sw);
1407 } else if (port->xdomain) {
1408 tb_port_configure_xdomain(port);
1409 }
1410 }
1411 }
1412
1413 static int tb_resume_noirq(struct tb *tb)
1414 {
1415 struct tb_cm *tcm = tb_priv(tb);
1416 struct tb_tunnel *tunnel, *n;
1417
1418 tb_dbg(tb, "resuming...\n");
1419
1420 /* remove any pci devices the firmware might have setup */
1421 tb_switch_reset(tb->root_switch);
1422
1423 tb_switch_resume(tb->root_switch);
1424 tb_free_invalid_tunnels(tb);
1425 tb_free_unplugged_children(tb->root_switch);
1426 tb_restore_children(tb->root_switch);
1427 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1428 tb_tunnel_restart(tunnel);
1429 if (!list_empty(&tcm->tunnel_list)) {
1430 /*
1431 * the pcie links need some time to get going.
1432 * 100ms works for me...
1433 */
1434 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1435 msleep(100);
1436 }
1437 /* Allow tb_handle_hotplug to progress events */
1438 tcm->hotplug_active = true;
1439 tb_dbg(tb, "resume finished\n");
1440
1441 return 0;
1442 }
1443
1444 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1445 {
1446 struct tb_port *port;
1447 int ret = 0;
1448
1449 tb_switch_for_each_port(sw, port) {
1450 if (tb_is_upstream_port(port))
1451 continue;
1452 if (port->xdomain && port->xdomain->is_unplugged) {
1453 tb_retimer_remove_all(port);
1454 tb_xdomain_remove(port->xdomain);
1455 tb_port_unconfigure_xdomain(port);
1456 port->xdomain = NULL;
1457 ret++;
1458 } else if (port->remote) {
1459 ret += tb_free_unplugged_xdomains(port->remote->sw);
1460 }
1461 }
1462
1463 return ret;
1464 }
1465
1466 static int tb_freeze_noirq(struct tb *tb)
1467 {
1468 struct tb_cm *tcm = tb_priv(tb);
1469
1470 tcm->hotplug_active = false;
1471 return 0;
1472 }
1473
1474 static int tb_thaw_noirq(struct tb *tb)
1475 {
1476 struct tb_cm *tcm = tb_priv(tb);
1477
1478 tcm->hotplug_active = true;
1479 return 0;
1480 }
1481
1482 static void tb_complete(struct tb *tb)
1483 {
1484 /*
1485 * Release any unplugged XDomains and if there is a case where
1486 * another domain is swapped in place of unplugged XDomain we
1487 * need to run another rescan.
1488 */
1489 mutex_lock(&tb->lock);
1490 if (tb_free_unplugged_xdomains(tb->root_switch))
1491 tb_scan_switch(tb->root_switch);
1492 mutex_unlock(&tb->lock);
1493 }
1494
1495 static int tb_runtime_suspend(struct tb *tb)
1496 {
1497 struct tb_cm *tcm = tb_priv(tb);
1498
1499 mutex_lock(&tb->lock);
1500 tb_switch_suspend(tb->root_switch, true);
1501 tcm->hotplug_active = false;
1502 mutex_unlock(&tb->lock);
1503
1504 return 0;
1505 }
1506
1507 static void tb_remove_work(struct work_struct *work)
1508 {
1509 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1510 struct tb *tb = tcm_to_tb(tcm);
1511
1512 mutex_lock(&tb->lock);
1513 if (tb->root_switch) {
1514 tb_free_unplugged_children(tb->root_switch);
1515 tb_free_unplugged_xdomains(tb->root_switch);
1516 }
1517 mutex_unlock(&tb->lock);
1518 }
1519
1520 static int tb_runtime_resume(struct tb *tb)
1521 {
1522 struct tb_cm *tcm = tb_priv(tb);
1523 struct tb_tunnel *tunnel, *n;
1524
1525 mutex_lock(&tb->lock);
1526 tb_switch_resume(tb->root_switch);
1527 tb_free_invalid_tunnels(tb);
1528 tb_restore_children(tb->root_switch);
1529 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1530 tb_tunnel_restart(tunnel);
1531 tcm->hotplug_active = true;
1532 mutex_unlock(&tb->lock);
1533
1534 /*
1535 * Schedule cleanup of any unplugged devices. Run this in a
1536 * separate thread to avoid possible deadlock if the device
1537 * removal runtime resumes the unplugged device.
1538 */
1539 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1540 return 0;
1541 }
1542
1543 static const struct tb_cm_ops tb_cm_ops = {
1544 .start = tb_start,
1545 .stop = tb_stop,
1546 .suspend_noirq = tb_suspend_noirq,
1547 .resume_noirq = tb_resume_noirq,
1548 .freeze_noirq = tb_freeze_noirq,
1549 .thaw_noirq = tb_thaw_noirq,
1550 .complete = tb_complete,
1551 .runtime_suspend = tb_runtime_suspend,
1552 .runtime_resume = tb_runtime_resume,
1553 .handle_event = tb_handle_event,
1554 .disapprove_switch = tb_disconnect_pci,
1555 .approve_switch = tb_tunnel_pci,
1556 .approve_xdomain_paths = tb_approve_xdomain_paths,
1557 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1558 };
1559
1560 struct tb *tb_probe(struct tb_nhi *nhi)
1561 {
1562 struct tb_cm *tcm;
1563 struct tb *tb;
1564
1565 tb = tb_domain_alloc(nhi, sizeof(*tcm));
1566 if (!tb)
1567 return NULL;
1568
1569 if (tb_acpi_may_tunnel_pcie())
1570 tb->security_level = TB_SECURITY_USER;
1571 else
1572 tb->security_level = TB_SECURITY_NOPCIE;
1573
1574 tb->cm_ops = &tb_cm_ops;
1575
1576 tcm = tb_priv(tb);
1577 INIT_LIST_HEAD(&tcm->tunnel_list);
1578 INIT_LIST_HEAD(&tcm->dp_resources);
1579 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
1580
1581 tb_dbg(tb, "using software connection manager\n");
1582
1583 return tb;
1584 }