1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
18 #define TB_TIMEOUT 100 /* ms */
21 * struct tb_cm - Simple Thunderbolt connection manager
22 * @tunnel_list: List of active tunnels
23 * @dp_resources: List of available DP resources for DP tunneling
24 * @hotplug_active: tb_handle_hotplug will stop progressing plug
25 * events and exit if this is not set (it needs to
26 * acquire the lock one more time). Used to drain wq
27 * after cfg has been paused.
28 * @remove_work: Work used to remove any unplugged routers after
32 struct list_head tunnel_list
;
33 struct list_head dp_resources
;
35 struct delayed_work remove_work
;
38 static inline struct tb
*tcm_to_tb(struct tb_cm
*tcm
)
40 return ((void *)tcm
- sizeof(struct tb
));
43 struct tb_hotplug_event
{
44 struct work_struct work
;
51 static void tb_handle_hotplug(struct work_struct
*work
);
53 static void tb_queue_hotplug(struct tb
*tb
, u64 route
, u8 port
, bool unplug
)
55 struct tb_hotplug_event
*ev
;
57 ev
= kmalloc(sizeof(*ev
), GFP_KERNEL
);
65 INIT_WORK(&ev
->work
, tb_handle_hotplug
);
66 queue_work(tb
->wq
, &ev
->work
);
69 /* enumeration & hot plug handling */
71 static void tb_add_dp_resources(struct tb_switch
*sw
)
73 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
76 tb_switch_for_each_port(sw
, port
) {
77 if (!tb_port_is_dpin(port
))
80 if (!tb_switch_query_dp_resource(sw
, port
))
83 list_add_tail(&port
->list
, &tcm
->dp_resources
);
84 tb_port_dbg(port
, "DP IN resource available\n");
88 static void tb_remove_dp_resources(struct tb_switch
*sw
)
90 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
91 struct tb_port
*port
, *tmp
;
93 /* Clear children resources first */
94 tb_switch_for_each_port(sw
, port
) {
95 if (tb_port_has_remote(port
))
96 tb_remove_dp_resources(port
->remote
->sw
);
99 list_for_each_entry_safe(port
, tmp
, &tcm
->dp_resources
, list
) {
100 if (port
->sw
== sw
) {
101 tb_port_dbg(port
, "DP OUT resource unavailable\n");
102 list_del_init(&port
->list
);
107 static void tb_discover_tunnels(struct tb_switch
*sw
)
109 struct tb
*tb
= sw
->tb
;
110 struct tb_cm
*tcm
= tb_priv(tb
);
111 struct tb_port
*port
;
113 tb_switch_for_each_port(sw
, port
) {
114 struct tb_tunnel
*tunnel
= NULL
;
116 switch (port
->config
.type
) {
117 case TB_TYPE_DP_HDMI_IN
:
118 tunnel
= tb_tunnel_discover_dp(tb
, port
);
121 case TB_TYPE_PCIE_DOWN
:
122 tunnel
= tb_tunnel_discover_pci(tb
, port
);
125 case TB_TYPE_USB3_DOWN
:
126 tunnel
= tb_tunnel_discover_usb3(tb
, port
);
136 if (tb_tunnel_is_pci(tunnel
)) {
137 struct tb_switch
*parent
= tunnel
->dst_port
->sw
;
139 while (parent
!= tunnel
->src_port
->sw
) {
141 parent
= tb_switch_parent(parent
);
143 } else if (tb_tunnel_is_dp(tunnel
)) {
144 /* Keep the domain from powering down */
145 pm_runtime_get_sync(&tunnel
->src_port
->sw
->dev
);
146 pm_runtime_get_sync(&tunnel
->dst_port
->sw
->dev
);
149 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
152 tb_switch_for_each_port(sw
, port
) {
153 if (tb_port_has_remote(port
))
154 tb_discover_tunnels(port
->remote
->sw
);
158 static int tb_port_configure_xdomain(struct tb_port
*port
)
161 * XDomain paths currently only support single lane so we must
162 * disable the other lane according to USB4 spec.
164 tb_port_disable(port
->dual_link_port
);
166 if (tb_switch_is_usb4(port
->sw
))
167 return usb4_port_configure_xdomain(port
);
168 return tb_lc_configure_xdomain(port
);
171 static void tb_port_unconfigure_xdomain(struct tb_port
*port
)
173 if (tb_switch_is_usb4(port
->sw
))
174 usb4_port_unconfigure_xdomain(port
);
176 tb_lc_unconfigure_xdomain(port
);
178 tb_port_enable(port
->dual_link_port
);
181 static void tb_scan_xdomain(struct tb_port
*port
)
183 struct tb_switch
*sw
= port
->sw
;
184 struct tb
*tb
= sw
->tb
;
185 struct tb_xdomain
*xd
;
188 if (!tb_is_xdomain_enabled())
191 route
= tb_downstream_route(port
);
192 xd
= tb_xdomain_find_by_route(tb
, route
);
198 xd
= tb_xdomain_alloc(tb
, &sw
->dev
, route
, tb
->root_switch
->uuid
,
201 tb_port_at(route
, sw
)->xdomain
= xd
;
202 tb_port_configure_xdomain(port
);
207 static int tb_enable_tmu(struct tb_switch
*sw
)
211 /* If it is already enabled in correct mode, don't touch it */
212 if (tb_switch_tmu_is_enabled(sw
))
215 ret
= tb_switch_tmu_disable(sw
);
219 ret
= tb_switch_tmu_post_time(sw
);
223 return tb_switch_tmu_enable(sw
);
227 * tb_find_unused_port() - return the first inactive port on @sw
228 * @sw: Switch to find the port on
229 * @type: Port type to look for
231 static struct tb_port
*tb_find_unused_port(struct tb_switch
*sw
,
232 enum tb_port_type type
)
234 struct tb_port
*port
;
236 tb_switch_for_each_port(sw
, port
) {
237 if (tb_is_upstream_port(port
))
239 if (port
->config
.type
!= type
)
243 if (tb_port_is_enabled(port
))
250 static struct tb_port
*tb_find_usb3_down(struct tb_switch
*sw
,
251 const struct tb_port
*port
)
253 struct tb_port
*down
;
255 down
= usb4_switch_map_usb3_down(sw
, port
);
256 if (down
&& !tb_usb3_port_is_enabled(down
))
261 static struct tb_tunnel
*tb_find_tunnel(struct tb
*tb
, enum tb_tunnel_type type
,
262 struct tb_port
*src_port
,
263 struct tb_port
*dst_port
)
265 struct tb_cm
*tcm
= tb_priv(tb
);
266 struct tb_tunnel
*tunnel
;
268 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
269 if (tunnel
->type
== type
&&
270 ((src_port
&& src_port
== tunnel
->src_port
) ||
271 (dst_port
&& dst_port
== tunnel
->dst_port
))) {
279 static struct tb_tunnel
*tb_find_first_usb3_tunnel(struct tb
*tb
,
280 struct tb_port
*src_port
,
281 struct tb_port
*dst_port
)
283 struct tb_port
*port
, *usb3_down
;
284 struct tb_switch
*sw
;
286 /* Pick the router that is deepest in the topology */
287 if (dst_port
->sw
->config
.depth
> src_port
->sw
->config
.depth
)
292 /* Can't be the host router */
293 if (sw
== tb
->root_switch
)
296 /* Find the downstream USB4 port that leads to this router */
297 port
= tb_port_at(tb_route(sw
), tb
->root_switch
);
298 /* Find the corresponding host router USB3 downstream port */
299 usb3_down
= usb4_switch_map_usb3_down(tb
->root_switch
, port
);
303 return tb_find_tunnel(tb
, TB_TUNNEL_USB3
, usb3_down
, NULL
);
306 static int tb_available_bandwidth(struct tb
*tb
, struct tb_port
*src_port
,
307 struct tb_port
*dst_port
, int *available_up
, int *available_down
)
309 int usb3_consumed_up
, usb3_consumed_down
, ret
;
310 struct tb_cm
*tcm
= tb_priv(tb
);
311 struct tb_tunnel
*tunnel
;
312 struct tb_port
*port
;
314 tb_port_dbg(dst_port
, "calculating available bandwidth\n");
316 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
318 ret
= tb_tunnel_consumed_bandwidth(tunnel
, &usb3_consumed_up
,
319 &usb3_consumed_down
);
323 usb3_consumed_up
= 0;
324 usb3_consumed_down
= 0;
327 *available_up
= *available_down
= 40000;
329 /* Find the minimum available bandwidth over all links */
330 tb_for_each_port_on_path(src_port
, dst_port
, port
) {
331 int link_speed
, link_width
, up_bw
, down_bw
;
333 if (!tb_port_is_null(port
))
336 if (tb_is_upstream_port(port
)) {
337 link_speed
= port
->sw
->link_speed
;
339 link_speed
= tb_port_get_link_speed(port
);
344 link_width
= port
->bonded
? 2 : 1;
346 up_bw
= link_speed
* link_width
* 1000; /* Mb/s */
347 /* Leave 10% guard band */
351 tb_port_dbg(port
, "link total bandwidth %d Mb/s\n", up_bw
);
354 * Find all DP tunnels that cross the port and reduce
355 * their consumed bandwidth from the available.
357 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
358 int dp_consumed_up
, dp_consumed_down
;
360 if (!tb_tunnel_is_dp(tunnel
))
363 if (!tb_tunnel_port_on_path(tunnel
, port
))
366 ret
= tb_tunnel_consumed_bandwidth(tunnel
,
372 up_bw
-= dp_consumed_up
;
373 down_bw
-= dp_consumed_down
;
377 * If USB3 is tunneled from the host router down to the
378 * branch leading to port we need to take USB3 consumed
379 * bandwidth into account regardless whether it actually
382 up_bw
-= usb3_consumed_up
;
383 down_bw
-= usb3_consumed_down
;
385 if (up_bw
< *available_up
)
386 *available_up
= up_bw
;
387 if (down_bw
< *available_down
)
388 *available_down
= down_bw
;
391 if (*available_up
< 0)
393 if (*available_down
< 0)
399 static int tb_release_unused_usb3_bandwidth(struct tb
*tb
,
400 struct tb_port
*src_port
,
401 struct tb_port
*dst_port
)
403 struct tb_tunnel
*tunnel
;
405 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
406 return tunnel
? tb_tunnel_release_unused_bandwidth(tunnel
) : 0;
409 static void tb_reclaim_usb3_bandwidth(struct tb
*tb
, struct tb_port
*src_port
,
410 struct tb_port
*dst_port
)
412 int ret
, available_up
, available_down
;
413 struct tb_tunnel
*tunnel
;
415 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
419 tb_dbg(tb
, "reclaiming unused bandwidth for USB3\n");
422 * Calculate available bandwidth for the first hop USB3 tunnel.
423 * That determines the whole USB3 bandwidth for this branch.
425 ret
= tb_available_bandwidth(tb
, tunnel
->src_port
, tunnel
->dst_port
,
426 &available_up
, &available_down
);
428 tb_warn(tb
, "failed to calculate available bandwidth\n");
432 tb_dbg(tb
, "available bandwidth for USB3 %d/%d Mb/s\n",
433 available_up
, available_down
);
435 tb_tunnel_reclaim_available_bandwidth(tunnel
, &available_up
, &available_down
);
438 static int tb_tunnel_usb3(struct tb
*tb
, struct tb_switch
*sw
)
440 struct tb_switch
*parent
= tb_switch_parent(sw
);
441 int ret
, available_up
, available_down
;
442 struct tb_port
*up
, *down
, *port
;
443 struct tb_cm
*tcm
= tb_priv(tb
);
444 struct tb_tunnel
*tunnel
;
446 if (!tb_acpi_may_tunnel_usb3()) {
447 tb_dbg(tb
, "USB3 tunneling disabled, not creating tunnel\n");
451 up
= tb_switch_find_port(sw
, TB_TYPE_USB3_UP
);
459 * Look up available down port. Since we are chaining it should
460 * be found right above this switch.
462 port
= tb_port_at(tb_route(sw
), parent
);
463 down
= tb_find_usb3_down(parent
, port
);
467 if (tb_route(parent
)) {
468 struct tb_port
*parent_up
;
470 * Check first that the parent switch has its upstream USB3
471 * port enabled. Otherwise the chain is not complete and
472 * there is no point setting up a new tunnel.
474 parent_up
= tb_switch_find_port(parent
, TB_TYPE_USB3_UP
);
475 if (!parent_up
|| !tb_port_is_enabled(parent_up
))
478 /* Make all unused bandwidth available for the new tunnel */
479 ret
= tb_release_unused_usb3_bandwidth(tb
, down
, up
);
484 ret
= tb_available_bandwidth(tb
, down
, up
, &available_up
,
489 tb_port_dbg(up
, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
490 available_up
, available_down
);
492 tunnel
= tb_tunnel_alloc_usb3(tb
, up
, down
, available_up
,
499 if (tb_tunnel_activate(tunnel
)) {
501 "USB3 tunnel activation failed, aborting\n");
506 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
507 if (tb_route(parent
))
508 tb_reclaim_usb3_bandwidth(tb
, down
, up
);
513 tb_tunnel_free(tunnel
);
515 if (tb_route(parent
))
516 tb_reclaim_usb3_bandwidth(tb
, down
, up
);
521 static int tb_create_usb3_tunnels(struct tb_switch
*sw
)
523 struct tb_port
*port
;
526 if (!tb_acpi_may_tunnel_usb3())
530 ret
= tb_tunnel_usb3(sw
->tb
, sw
);
535 tb_switch_for_each_port(sw
, port
) {
536 if (!tb_port_has_remote(port
))
538 ret
= tb_create_usb3_tunnels(port
->remote
->sw
);
546 static void tb_scan_port(struct tb_port
*port
);
549 * tb_scan_switch() - scan for and initialize downstream switches
551 static void tb_scan_switch(struct tb_switch
*sw
)
553 struct tb_port
*port
;
555 pm_runtime_get_sync(&sw
->dev
);
557 tb_switch_for_each_port(sw
, port
)
560 pm_runtime_mark_last_busy(&sw
->dev
);
561 pm_runtime_put_autosuspend(&sw
->dev
);
565 * tb_scan_port() - check for and initialize switches below port
567 static void tb_scan_port(struct tb_port
*port
)
569 struct tb_cm
*tcm
= tb_priv(port
->sw
->tb
);
570 struct tb_port
*upstream_port
;
571 struct tb_switch
*sw
;
573 if (tb_is_upstream_port(port
))
576 if (tb_port_is_dpout(port
) && tb_dp_port_hpd_is_active(port
) == 1 &&
577 !tb_dp_port_is_enabled(port
)) {
578 tb_port_dbg(port
, "DP adapter HPD set, queuing hotplug\n");
579 tb_queue_hotplug(port
->sw
->tb
, tb_route(port
->sw
), port
->port
,
584 if (port
->config
.type
!= TB_TYPE_PORT
)
586 if (port
->dual_link_port
&& port
->link_nr
)
588 * Downstream switch is reachable through two ports.
589 * Only scan on the primary port (link_nr == 0).
591 if (tb_wait_for_port(port
, false) <= 0)
594 tb_port_dbg(port
, "port already has a remote\n");
598 tb_retimer_scan(port
);
600 sw
= tb_switch_alloc(port
->sw
->tb
, &port
->sw
->dev
,
601 tb_downstream_route(port
));
604 * If there is an error accessing the connected switch
605 * it may be connected to another domain. Also we allow
606 * the other domain to be connected to a max depth switch.
608 if (PTR_ERR(sw
) == -EIO
|| PTR_ERR(sw
) == -EADDRNOTAVAIL
)
609 tb_scan_xdomain(port
);
613 if (tb_switch_configure(sw
)) {
619 * If there was previously another domain connected remove it
623 tb_xdomain_remove(port
->xdomain
);
624 tb_port_unconfigure_xdomain(port
);
625 port
->xdomain
= NULL
;
629 * Do not send uevents until we have discovered all existing
630 * tunnels and know which switches were authorized already by
633 if (!tcm
->hotplug_active
)
634 dev_set_uevent_suppress(&sw
->dev
, true);
637 * At the moment Thunderbolt 2 and beyond (devices with LC) we
638 * can support runtime PM.
640 sw
->rpm
= sw
->generation
> 1;
642 if (tb_switch_add(sw
)) {
647 /* Link the switches using both links if available */
648 upstream_port
= tb_upstream_port(sw
);
649 port
->remote
= upstream_port
;
650 upstream_port
->remote
= port
;
651 if (port
->dual_link_port
&& upstream_port
->dual_link_port
) {
652 port
->dual_link_port
->remote
= upstream_port
->dual_link_port
;
653 upstream_port
->dual_link_port
->remote
= port
->dual_link_port
;
656 /* Enable lane bonding if supported */
657 tb_switch_lane_bonding_enable(sw
);
658 /* Set the link configured */
659 tb_switch_configure_link(sw
);
661 if (tb_enable_tmu(sw
))
662 tb_sw_warn(sw
, "failed to enable TMU\n");
664 /* Scan upstream retimers */
665 tb_retimer_scan(upstream_port
);
668 * Create USB 3.x tunnels only when the switch is plugged to the
669 * domain. This is because we scan the domain also during discovery
670 * and want to discover existing USB 3.x tunnels before we create
673 if (tcm
->hotplug_active
&& tb_tunnel_usb3(sw
->tb
, sw
))
674 tb_sw_warn(sw
, "USB3 tunnel creation failed\n");
676 tb_add_dp_resources(sw
);
680 static void tb_deactivate_and_free_tunnel(struct tb_tunnel
*tunnel
)
682 struct tb_port
*src_port
, *dst_port
;
688 tb_tunnel_deactivate(tunnel
);
689 list_del(&tunnel
->list
);
692 src_port
= tunnel
->src_port
;
693 dst_port
= tunnel
->dst_port
;
695 switch (tunnel
->type
) {
698 * In case of DP tunnel make sure the DP IN resource is
699 * deallocated properly.
701 tb_switch_dealloc_dp_resource(src_port
->sw
, src_port
);
702 /* Now we can allow the domain to runtime suspend again */
703 pm_runtime_mark_last_busy(&dst_port
->sw
->dev
);
704 pm_runtime_put_autosuspend(&dst_port
->sw
->dev
);
705 pm_runtime_mark_last_busy(&src_port
->sw
->dev
);
706 pm_runtime_put_autosuspend(&src_port
->sw
->dev
);
710 tb_reclaim_usb3_bandwidth(tb
, src_port
, dst_port
);
715 * PCIe and DMA tunnels do not consume guaranteed
721 tb_tunnel_free(tunnel
);
725 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
727 static void tb_free_invalid_tunnels(struct tb
*tb
)
729 struct tb_cm
*tcm
= tb_priv(tb
);
730 struct tb_tunnel
*tunnel
;
733 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
734 if (tb_tunnel_is_invalid(tunnel
))
735 tb_deactivate_and_free_tunnel(tunnel
);
740 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
742 static void tb_free_unplugged_children(struct tb_switch
*sw
)
744 struct tb_port
*port
;
746 tb_switch_for_each_port(sw
, port
) {
747 if (!tb_port_has_remote(port
))
750 if (port
->remote
->sw
->is_unplugged
) {
751 tb_retimer_remove_all(port
);
752 tb_remove_dp_resources(port
->remote
->sw
);
753 tb_switch_unconfigure_link(port
->remote
->sw
);
754 tb_switch_lane_bonding_disable(port
->remote
->sw
);
755 tb_switch_remove(port
->remote
->sw
);
757 if (port
->dual_link_port
)
758 port
->dual_link_port
->remote
= NULL
;
760 tb_free_unplugged_children(port
->remote
->sw
);
765 static struct tb_port
*tb_find_pcie_down(struct tb_switch
*sw
,
766 const struct tb_port
*port
)
768 struct tb_port
*down
= NULL
;
771 * To keep plugging devices consistently in the same PCIe
772 * hierarchy, do mapping here for switch downstream PCIe ports.
774 if (tb_switch_is_usb4(sw
)) {
775 down
= usb4_switch_map_pcie_down(sw
, port
);
776 } else if (!tb_route(sw
)) {
777 int phy_port
= tb_phy_port_from_link(port
->port
);
781 * Hard-coded Thunderbolt port to PCIe down port mapping
784 if (tb_switch_is_cactus_ridge(sw
) ||
785 tb_switch_is_alpine_ridge(sw
))
786 index
= !phy_port
? 6 : 7;
787 else if (tb_switch_is_falcon_ridge(sw
))
788 index
= !phy_port
? 6 : 8;
789 else if (tb_switch_is_titan_ridge(sw
))
790 index
= !phy_port
? 8 : 9;
794 /* Validate the hard-coding */
795 if (WARN_ON(index
> sw
->config
.max_port_number
))
798 down
= &sw
->ports
[index
];
802 if (WARN_ON(!tb_port_is_pcie_down(down
)))
804 if (tb_pci_port_is_enabled(down
))
811 return tb_find_unused_port(sw
, TB_TYPE_PCIE_DOWN
);
814 static struct tb_port
*tb_find_dp_out(struct tb
*tb
, struct tb_port
*in
)
816 struct tb_port
*host_port
, *port
;
817 struct tb_cm
*tcm
= tb_priv(tb
);
819 host_port
= tb_route(in
->sw
) ?
820 tb_port_at(tb_route(in
->sw
), tb
->root_switch
) : NULL
;
822 list_for_each_entry(port
, &tcm
->dp_resources
, list
) {
823 if (!tb_port_is_dpout(port
))
826 if (tb_port_is_enabled(port
)) {
827 tb_port_dbg(port
, "in use\n");
831 tb_port_dbg(port
, "DP OUT available\n");
834 * Keep the DP tunnel under the topology starting from
835 * the same host router downstream port.
837 if (host_port
&& tb_route(port
->sw
)) {
840 p
= tb_port_at(tb_route(port
->sw
), tb
->root_switch
);
851 static void tb_tunnel_dp(struct tb
*tb
)
853 int available_up
, available_down
, ret
;
854 struct tb_cm
*tcm
= tb_priv(tb
);
855 struct tb_port
*port
, *in
, *out
;
856 struct tb_tunnel
*tunnel
;
858 if (!tb_acpi_may_tunnel_dp()) {
859 tb_dbg(tb
, "DP tunneling disabled, not creating tunnel\n");
864 * Find pair of inactive DP IN and DP OUT adapters and then
865 * establish a DP tunnel between them.
867 tb_dbg(tb
, "looking for DP IN <-> DP OUT pairs:\n");
871 list_for_each_entry(port
, &tcm
->dp_resources
, list
) {
872 if (!tb_port_is_dpin(port
))
875 if (tb_port_is_enabled(port
)) {
876 tb_port_dbg(port
, "in use\n");
880 tb_port_dbg(port
, "DP IN available\n");
882 out
= tb_find_dp_out(tb
, port
);
890 tb_dbg(tb
, "no suitable DP IN adapter available, not tunneling\n");
894 tb_dbg(tb
, "no suitable DP OUT adapter available, not tunneling\n");
899 * DP stream needs the domain to be active so runtime resume
900 * both ends of the tunnel.
902 * This should bring the routers in the middle active as well
903 * and keeps the domain from runtime suspending while the DP
906 pm_runtime_get_sync(&in
->sw
->dev
);
907 pm_runtime_get_sync(&out
->sw
->dev
);
909 if (tb_switch_alloc_dp_resource(in
->sw
, in
)) {
910 tb_port_dbg(in
, "no resource available for DP IN, not tunneling\n");
914 /* Make all unused USB3 bandwidth available for the new DP tunnel */
915 ret
= tb_release_unused_usb3_bandwidth(tb
, in
, out
);
917 tb_warn(tb
, "failed to release unused bandwidth\n");
921 ret
= tb_available_bandwidth(tb
, in
, out
, &available_up
,
926 tb_dbg(tb
, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
927 available_up
, available_down
);
929 tunnel
= tb_tunnel_alloc_dp(tb
, in
, out
, available_up
, available_down
);
931 tb_port_dbg(out
, "could not allocate DP tunnel\n");
935 if (tb_tunnel_activate(tunnel
)) {
936 tb_port_info(out
, "DP tunnel activation failed, aborting\n");
940 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
941 tb_reclaim_usb3_bandwidth(tb
, in
, out
);
945 tb_tunnel_free(tunnel
);
947 tb_reclaim_usb3_bandwidth(tb
, in
, out
);
949 tb_switch_dealloc_dp_resource(in
->sw
, in
);
951 pm_runtime_mark_last_busy(&out
->sw
->dev
);
952 pm_runtime_put_autosuspend(&out
->sw
->dev
);
953 pm_runtime_mark_last_busy(&in
->sw
->dev
);
954 pm_runtime_put_autosuspend(&in
->sw
->dev
);
957 static void tb_dp_resource_unavailable(struct tb
*tb
, struct tb_port
*port
)
959 struct tb_port
*in
, *out
;
960 struct tb_tunnel
*tunnel
;
962 if (tb_port_is_dpin(port
)) {
963 tb_port_dbg(port
, "DP IN resource unavailable\n");
967 tb_port_dbg(port
, "DP OUT resource unavailable\n");
972 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_DP
, in
, out
);
973 tb_deactivate_and_free_tunnel(tunnel
);
974 list_del_init(&port
->list
);
977 * See if there is another DP OUT port that can be used for
978 * to create another tunnel.
983 static void tb_dp_resource_available(struct tb
*tb
, struct tb_port
*port
)
985 struct tb_cm
*tcm
= tb_priv(tb
);
988 if (tb_port_is_enabled(port
))
991 list_for_each_entry(p
, &tcm
->dp_resources
, list
) {
996 tb_port_dbg(port
, "DP %s resource available\n",
997 tb_port_is_dpin(port
) ? "IN" : "OUT");
998 list_add_tail(&port
->list
, &tcm
->dp_resources
);
1000 /* Look for suitable DP IN <-> DP OUT pairs now */
1004 static void tb_disconnect_and_release_dp(struct tb
*tb
)
1006 struct tb_cm
*tcm
= tb_priv(tb
);
1007 struct tb_tunnel
*tunnel
, *n
;
1010 * Tear down all DP tunnels and release their resources. They
1011 * will be re-established after resume based on plug events.
1013 list_for_each_entry_safe_reverse(tunnel
, n
, &tcm
->tunnel_list
, list
) {
1014 if (tb_tunnel_is_dp(tunnel
))
1015 tb_deactivate_and_free_tunnel(tunnel
);
1018 while (!list_empty(&tcm
->dp_resources
)) {
1019 struct tb_port
*port
;
1021 port
= list_first_entry(&tcm
->dp_resources
,
1022 struct tb_port
, list
);
1023 list_del_init(&port
->list
);
1027 static int tb_disconnect_pci(struct tb
*tb
, struct tb_switch
*sw
)
1029 struct tb_tunnel
*tunnel
;
1032 up
= tb_switch_find_port(sw
, TB_TYPE_PCIE_UP
);
1036 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_PCI
, NULL
, up
);
1037 if (WARN_ON(!tunnel
))
1040 tb_tunnel_deactivate(tunnel
);
1041 list_del(&tunnel
->list
);
1042 tb_tunnel_free(tunnel
);
1046 static int tb_tunnel_pci(struct tb
*tb
, struct tb_switch
*sw
)
1048 struct tb_port
*up
, *down
, *port
;
1049 struct tb_cm
*tcm
= tb_priv(tb
);
1050 struct tb_switch
*parent_sw
;
1051 struct tb_tunnel
*tunnel
;
1053 up
= tb_switch_find_port(sw
, TB_TYPE_PCIE_UP
);
1058 * Look up available down port. Since we are chaining it should
1059 * be found right above this switch.
1061 parent_sw
= tb_to_switch(sw
->dev
.parent
);
1062 port
= tb_port_at(tb_route(sw
), parent_sw
);
1063 down
= tb_find_pcie_down(parent_sw
, port
);
1067 tunnel
= tb_tunnel_alloc_pci(tb
, up
, down
);
1071 if (tb_tunnel_activate(tunnel
)) {
1073 "PCIe tunnel activation failed, aborting\n");
1074 tb_tunnel_free(tunnel
);
1078 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
1082 static int tb_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
,
1083 int transmit_path
, int transmit_ring
,
1084 int receive_path
, int receive_ring
)
1086 struct tb_cm
*tcm
= tb_priv(tb
);
1087 struct tb_port
*nhi_port
, *dst_port
;
1088 struct tb_tunnel
*tunnel
;
1089 struct tb_switch
*sw
;
1091 sw
= tb_to_switch(xd
->dev
.parent
);
1092 dst_port
= tb_port_at(xd
->route
, sw
);
1093 nhi_port
= tb_switch_find_port(tb
->root_switch
, TB_TYPE_NHI
);
1095 mutex_lock(&tb
->lock
);
1096 tunnel
= tb_tunnel_alloc_dma(tb
, nhi_port
, dst_port
, transmit_path
,
1097 transmit_ring
, receive_path
, receive_ring
);
1099 mutex_unlock(&tb
->lock
);
1103 if (tb_tunnel_activate(tunnel
)) {
1104 tb_port_info(nhi_port
,
1105 "DMA tunnel activation failed, aborting\n");
1106 tb_tunnel_free(tunnel
);
1107 mutex_unlock(&tb
->lock
);
1111 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
1112 mutex_unlock(&tb
->lock
);
1116 static void __tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
,
1117 int transmit_path
, int transmit_ring
,
1118 int receive_path
, int receive_ring
)
1120 struct tb_cm
*tcm
= tb_priv(tb
);
1121 struct tb_port
*nhi_port
, *dst_port
;
1122 struct tb_tunnel
*tunnel
, *n
;
1123 struct tb_switch
*sw
;
1125 sw
= tb_to_switch(xd
->dev
.parent
);
1126 dst_port
= tb_port_at(xd
->route
, sw
);
1127 nhi_port
= tb_switch_find_port(tb
->root_switch
, TB_TYPE_NHI
);
1129 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
1130 if (!tb_tunnel_is_dma(tunnel
))
1132 if (tunnel
->src_port
!= nhi_port
|| tunnel
->dst_port
!= dst_port
)
1135 if (tb_tunnel_match_dma(tunnel
, transmit_path
, transmit_ring
,
1136 receive_path
, receive_ring
))
1137 tb_deactivate_and_free_tunnel(tunnel
);
1141 static int tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
,
1142 int transmit_path
, int transmit_ring
,
1143 int receive_path
, int receive_ring
)
1145 if (!xd
->is_unplugged
) {
1146 mutex_lock(&tb
->lock
);
1147 __tb_disconnect_xdomain_paths(tb
, xd
, transmit_path
,
1148 transmit_ring
, receive_path
,
1150 mutex_unlock(&tb
->lock
);
1155 /* hotplug handling */
1158 * tb_handle_hotplug() - handle hotplug event
1160 * Executes on tb->wq.
1162 static void tb_handle_hotplug(struct work_struct
*work
)
1164 struct tb_hotplug_event
*ev
= container_of(work
, typeof(*ev
), work
);
1165 struct tb
*tb
= ev
->tb
;
1166 struct tb_cm
*tcm
= tb_priv(tb
);
1167 struct tb_switch
*sw
;
1168 struct tb_port
*port
;
1170 /* Bring the domain back from sleep if it was suspended */
1171 pm_runtime_get_sync(&tb
->dev
);
1173 mutex_lock(&tb
->lock
);
1174 if (!tcm
->hotplug_active
)
1175 goto out
; /* during init, suspend or shutdown */
1177 sw
= tb_switch_find_by_route(tb
, ev
->route
);
1180 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1181 ev
->route
, ev
->port
, ev
->unplug
);
1184 if (ev
->port
> sw
->config
.max_port_number
) {
1186 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1187 ev
->route
, ev
->port
, ev
->unplug
);
1190 port
= &sw
->ports
[ev
->port
];
1191 if (tb_is_upstream_port(port
)) {
1192 tb_dbg(tb
, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1193 ev
->route
, ev
->port
, ev
->unplug
);
1197 pm_runtime_get_sync(&sw
->dev
);
1200 tb_retimer_remove_all(port
);
1202 if (tb_port_has_remote(port
)) {
1203 tb_port_dbg(port
, "switch unplugged\n");
1204 tb_sw_set_unplugged(port
->remote
->sw
);
1205 tb_free_invalid_tunnels(tb
);
1206 tb_remove_dp_resources(port
->remote
->sw
);
1207 tb_switch_tmu_disable(port
->remote
->sw
);
1208 tb_switch_unconfigure_link(port
->remote
->sw
);
1209 tb_switch_lane_bonding_disable(port
->remote
->sw
);
1210 tb_switch_remove(port
->remote
->sw
);
1211 port
->remote
= NULL
;
1212 if (port
->dual_link_port
)
1213 port
->dual_link_port
->remote
= NULL
;
1214 /* Maybe we can create another DP tunnel */
1216 } else if (port
->xdomain
) {
1217 struct tb_xdomain
*xd
= tb_xdomain_get(port
->xdomain
);
1219 tb_port_dbg(port
, "xdomain unplugged\n");
1221 * Service drivers are unbound during
1222 * tb_xdomain_remove() so setting XDomain as
1223 * unplugged here prevents deadlock if they call
1224 * tb_xdomain_disable_paths(). We will tear down
1225 * all the tunnels below.
1227 xd
->is_unplugged
= true;
1228 tb_xdomain_remove(xd
);
1229 port
->xdomain
= NULL
;
1230 __tb_disconnect_xdomain_paths(tb
, xd
, -1, -1, -1, -1);
1232 tb_port_unconfigure_xdomain(port
);
1233 } else if (tb_port_is_dpout(port
) || tb_port_is_dpin(port
)) {
1234 tb_dp_resource_unavailable(tb
, port
);
1237 "got unplug event for disconnected port, ignoring\n");
1239 } else if (port
->remote
) {
1240 tb_port_dbg(port
, "got plug event for connected port, ignoring\n");
1242 if (tb_port_is_null(port
)) {
1243 tb_port_dbg(port
, "hotplug: scanning\n");
1246 tb_port_dbg(port
, "hotplug: no switch found\n");
1247 } else if (tb_port_is_dpout(port
) || tb_port_is_dpin(port
)) {
1248 tb_dp_resource_available(tb
, port
);
1252 pm_runtime_mark_last_busy(&sw
->dev
);
1253 pm_runtime_put_autosuspend(&sw
->dev
);
1258 mutex_unlock(&tb
->lock
);
1260 pm_runtime_mark_last_busy(&tb
->dev
);
1261 pm_runtime_put_autosuspend(&tb
->dev
);
1267 * tb_schedule_hotplug_handler() - callback function for the control channel
1269 * Delegates to tb_handle_hotplug.
1271 static void tb_handle_event(struct tb
*tb
, enum tb_cfg_pkg_type type
,
1272 const void *buf
, size_t size
)
1274 const struct cfg_event_pkg
*pkg
= buf
;
1277 if (type
!= TB_CFG_PKG_EVENT
) {
1278 tb_warn(tb
, "unexpected event %#x, ignoring\n", type
);
1282 route
= tb_cfg_get_route(&pkg
->header
);
1284 if (tb_cfg_ack_plug(tb
->ctl
, route
, pkg
->port
, pkg
->unplug
)) {
1285 tb_warn(tb
, "could not ack plug event on %llx:%x\n", route
,
1289 tb_queue_hotplug(tb
, route
, pkg
->port
, pkg
->unplug
);
1292 static void tb_stop(struct tb
*tb
)
1294 struct tb_cm
*tcm
= tb_priv(tb
);
1295 struct tb_tunnel
*tunnel
;
1296 struct tb_tunnel
*n
;
1298 cancel_delayed_work(&tcm
->remove_work
);
1299 /* tunnels are only present after everything has been initialized */
1300 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
1302 * DMA tunnels require the driver to be functional so we
1303 * tear them down. Other protocol tunnels can be left
1306 if (tb_tunnel_is_dma(tunnel
))
1307 tb_tunnel_deactivate(tunnel
);
1308 tb_tunnel_free(tunnel
);
1310 tb_switch_remove(tb
->root_switch
);
1311 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
1314 static int tb_scan_finalize_switch(struct device
*dev
, void *data
)
1316 if (tb_is_switch(dev
)) {
1317 struct tb_switch
*sw
= tb_to_switch(dev
);
1320 * If we found that the switch was already setup by the
1321 * boot firmware, mark it as authorized now before we
1322 * send uevent to userspace.
1327 dev_set_uevent_suppress(dev
, false);
1328 kobject_uevent(&dev
->kobj
, KOBJ_ADD
);
1329 device_for_each_child(dev
, NULL
, tb_scan_finalize_switch
);
1335 static int tb_start(struct tb
*tb
)
1337 struct tb_cm
*tcm
= tb_priv(tb
);
1340 tb
->root_switch
= tb_switch_alloc(tb
, &tb
->dev
, 0);
1341 if (IS_ERR(tb
->root_switch
))
1342 return PTR_ERR(tb
->root_switch
);
1345 * ICM firmware upgrade needs running firmware and in native
1346 * mode that is not available so disable firmware upgrade of the
1349 tb
->root_switch
->no_nvm_upgrade
= true;
1350 /* All USB4 routers support runtime PM */
1351 tb
->root_switch
->rpm
= tb_switch_is_usb4(tb
->root_switch
);
1353 ret
= tb_switch_configure(tb
->root_switch
);
1355 tb_switch_put(tb
->root_switch
);
1359 /* Announce the switch to the world */
1360 ret
= tb_switch_add(tb
->root_switch
);
1362 tb_switch_put(tb
->root_switch
);
1366 /* Enable TMU if it is off */
1367 tb_switch_tmu_enable(tb
->root_switch
);
1368 /* Full scan to discover devices added before the driver was loaded. */
1369 tb_scan_switch(tb
->root_switch
);
1370 /* Find out tunnels created by the boot firmware */
1371 tb_discover_tunnels(tb
->root_switch
);
1373 * If the boot firmware did not create USB 3.x tunnels create them
1374 * now for the whole topology.
1376 tb_create_usb3_tunnels(tb
->root_switch
);
1377 /* Add DP IN resources for the root switch */
1378 tb_add_dp_resources(tb
->root_switch
);
1379 /* Make the discovered switches available to the userspace */
1380 device_for_each_child(&tb
->root_switch
->dev
, NULL
,
1381 tb_scan_finalize_switch
);
1383 /* Allow tb_handle_hotplug to progress events */
1384 tcm
->hotplug_active
= true;
1388 static int tb_suspend_noirq(struct tb
*tb
)
1390 struct tb_cm
*tcm
= tb_priv(tb
);
1392 tb_dbg(tb
, "suspending...\n");
1393 tb_disconnect_and_release_dp(tb
);
1394 tb_switch_suspend(tb
->root_switch
, false);
1395 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
1396 tb_dbg(tb
, "suspend finished\n");
1401 static void tb_restore_children(struct tb_switch
*sw
)
1403 struct tb_port
*port
;
1405 /* No need to restore if the router is already unplugged */
1406 if (sw
->is_unplugged
)
1409 if (tb_enable_tmu(sw
))
1410 tb_sw_warn(sw
, "failed to restore TMU configuration\n");
1412 tb_switch_for_each_port(sw
, port
) {
1413 if (!tb_port_has_remote(port
) && !port
->xdomain
)
1417 tb_switch_lane_bonding_enable(port
->remote
->sw
);
1418 tb_switch_configure_link(port
->remote
->sw
);
1420 tb_restore_children(port
->remote
->sw
);
1421 } else if (port
->xdomain
) {
1422 tb_port_configure_xdomain(port
);
1427 static int tb_resume_noirq(struct tb
*tb
)
1429 struct tb_cm
*tcm
= tb_priv(tb
);
1430 struct tb_tunnel
*tunnel
, *n
;
1432 tb_dbg(tb
, "resuming...\n");
1434 /* remove any pci devices the firmware might have setup */
1435 tb_switch_reset(tb
->root_switch
);
1437 tb_switch_resume(tb
->root_switch
);
1438 tb_free_invalid_tunnels(tb
);
1439 tb_free_unplugged_children(tb
->root_switch
);
1440 tb_restore_children(tb
->root_switch
);
1441 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
)
1442 tb_tunnel_restart(tunnel
);
1443 if (!list_empty(&tcm
->tunnel_list
)) {
1445 * the pcie links need some time to get going.
1446 * 100ms works for me...
1448 tb_dbg(tb
, "tunnels restarted, sleeping for 100ms\n");
1451 /* Allow tb_handle_hotplug to progress events */
1452 tcm
->hotplug_active
= true;
1453 tb_dbg(tb
, "resume finished\n");
1458 static int tb_free_unplugged_xdomains(struct tb_switch
*sw
)
1460 struct tb_port
*port
;
1463 tb_switch_for_each_port(sw
, port
) {
1464 if (tb_is_upstream_port(port
))
1466 if (port
->xdomain
&& port
->xdomain
->is_unplugged
) {
1467 tb_retimer_remove_all(port
);
1468 tb_xdomain_remove(port
->xdomain
);
1469 tb_port_unconfigure_xdomain(port
);
1470 port
->xdomain
= NULL
;
1472 } else if (port
->remote
) {
1473 ret
+= tb_free_unplugged_xdomains(port
->remote
->sw
);
1480 static int tb_freeze_noirq(struct tb
*tb
)
1482 struct tb_cm
*tcm
= tb_priv(tb
);
1484 tcm
->hotplug_active
= false;
1488 static int tb_thaw_noirq(struct tb
*tb
)
1490 struct tb_cm
*tcm
= tb_priv(tb
);
1492 tcm
->hotplug_active
= true;
1496 static void tb_complete(struct tb
*tb
)
1499 * Release any unplugged XDomains and if there is a case where
1500 * another domain is swapped in place of unplugged XDomain we
1501 * need to run another rescan.
1503 mutex_lock(&tb
->lock
);
1504 if (tb_free_unplugged_xdomains(tb
->root_switch
))
1505 tb_scan_switch(tb
->root_switch
);
1506 mutex_unlock(&tb
->lock
);
1509 static int tb_runtime_suspend(struct tb
*tb
)
1511 struct tb_cm
*tcm
= tb_priv(tb
);
1513 mutex_lock(&tb
->lock
);
1514 tb_switch_suspend(tb
->root_switch
, true);
1515 tcm
->hotplug_active
= false;
1516 mutex_unlock(&tb
->lock
);
1521 static void tb_remove_work(struct work_struct
*work
)
1523 struct tb_cm
*tcm
= container_of(work
, struct tb_cm
, remove_work
.work
);
1524 struct tb
*tb
= tcm_to_tb(tcm
);
1526 mutex_lock(&tb
->lock
);
1527 if (tb
->root_switch
) {
1528 tb_free_unplugged_children(tb
->root_switch
);
1529 tb_free_unplugged_xdomains(tb
->root_switch
);
1531 mutex_unlock(&tb
->lock
);
1534 static int tb_runtime_resume(struct tb
*tb
)
1536 struct tb_cm
*tcm
= tb_priv(tb
);
1537 struct tb_tunnel
*tunnel
, *n
;
1539 mutex_lock(&tb
->lock
);
1540 tb_switch_resume(tb
->root_switch
);
1541 tb_free_invalid_tunnels(tb
);
1542 tb_restore_children(tb
->root_switch
);
1543 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
)
1544 tb_tunnel_restart(tunnel
);
1545 tcm
->hotplug_active
= true;
1546 mutex_unlock(&tb
->lock
);
1549 * Schedule cleanup of any unplugged devices. Run this in a
1550 * separate thread to avoid possible deadlock if the device
1551 * removal runtime resumes the unplugged device.
1553 queue_delayed_work(tb
->wq
, &tcm
->remove_work
, msecs_to_jiffies(50));
1557 static const struct tb_cm_ops tb_cm_ops
= {
1560 .suspend_noirq
= tb_suspend_noirq
,
1561 .resume_noirq
= tb_resume_noirq
,
1562 .freeze_noirq
= tb_freeze_noirq
,
1563 .thaw_noirq
= tb_thaw_noirq
,
1564 .complete
= tb_complete
,
1565 .runtime_suspend
= tb_runtime_suspend
,
1566 .runtime_resume
= tb_runtime_resume
,
1567 .handle_event
= tb_handle_event
,
1568 .disapprove_switch
= tb_disconnect_pci
,
1569 .approve_switch
= tb_tunnel_pci
,
1570 .approve_xdomain_paths
= tb_approve_xdomain_paths
,
1571 .disconnect_xdomain_paths
= tb_disconnect_xdomain_paths
,
1574 struct tb
*tb_probe(struct tb_nhi
*nhi
)
1579 tb
= tb_domain_alloc(nhi
, TB_TIMEOUT
, sizeof(*tcm
));
1583 if (tb_acpi_may_tunnel_pcie())
1584 tb
->security_level
= TB_SECURITY_USER
;
1586 tb
->security_level
= TB_SECURITY_NOPCIE
;
1588 tb
->cm_ops
= &tb_cm_ops
;
1591 INIT_LIST_HEAD(&tcm
->tunnel_list
);
1592 INIT_LIST_HEAD(&tcm
->dp_resources
);
1593 INIT_DELAYED_WORK(&tcm
->remove_work
, tb_remove_work
);
1595 tb_dbg(tb
, "using software connection manager\n");