1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
19 * struct tb_cm - Simple Thunderbolt connection manager
20 * @tunnel_list: List of active tunnels
21 * @dp_resources: List of available DP resources for DP tunneling
22 * @hotplug_active: tb_handle_hotplug will stop progressing plug
23 * events and exit if this is not set (it needs to
24 * acquire the lock one more time). Used to drain wq
25 * after cfg has been paused.
26 * @remove_work: Work used to remove any unplugged routers after
30 struct list_head tunnel_list
;
31 struct list_head dp_resources
;
33 struct delayed_work remove_work
;
36 static inline struct tb
*tcm_to_tb(struct tb_cm
*tcm
)
38 return ((void *)tcm
- sizeof(struct tb
));
41 struct tb_hotplug_event
{
42 struct work_struct work
;
49 static void tb_handle_hotplug(struct work_struct
*work
);
51 static void tb_queue_hotplug(struct tb
*tb
, u64 route
, u8 port
, bool unplug
)
53 struct tb_hotplug_event
*ev
;
55 ev
= kmalloc(sizeof(*ev
), GFP_KERNEL
);
63 INIT_WORK(&ev
->work
, tb_handle_hotplug
);
64 queue_work(tb
->wq
, &ev
->work
);
67 /* enumeration & hot plug handling */
69 static void tb_add_dp_resources(struct tb_switch
*sw
)
71 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
74 tb_switch_for_each_port(sw
, port
) {
75 if (!tb_port_is_dpin(port
))
78 if (!tb_switch_query_dp_resource(sw
, port
))
81 list_add_tail(&port
->list
, &tcm
->dp_resources
);
82 tb_port_dbg(port
, "DP IN resource available\n");
86 static void tb_remove_dp_resources(struct tb_switch
*sw
)
88 struct tb_cm
*tcm
= tb_priv(sw
->tb
);
89 struct tb_port
*port
, *tmp
;
91 /* Clear children resources first */
92 tb_switch_for_each_port(sw
, port
) {
93 if (tb_port_has_remote(port
))
94 tb_remove_dp_resources(port
->remote
->sw
);
97 list_for_each_entry_safe(port
, tmp
, &tcm
->dp_resources
, list
) {
99 tb_port_dbg(port
, "DP OUT resource unavailable\n");
100 list_del_init(&port
->list
);
105 static void tb_discover_tunnels(struct tb_switch
*sw
)
107 struct tb
*tb
= sw
->tb
;
108 struct tb_cm
*tcm
= tb_priv(tb
);
109 struct tb_port
*port
;
111 tb_switch_for_each_port(sw
, port
) {
112 struct tb_tunnel
*tunnel
= NULL
;
114 switch (port
->config
.type
) {
115 case TB_TYPE_DP_HDMI_IN
:
116 tunnel
= tb_tunnel_discover_dp(tb
, port
);
119 case TB_TYPE_PCIE_DOWN
:
120 tunnel
= tb_tunnel_discover_pci(tb
, port
);
123 case TB_TYPE_USB3_DOWN
:
124 tunnel
= tb_tunnel_discover_usb3(tb
, port
);
134 if (tb_tunnel_is_pci(tunnel
)) {
135 struct tb_switch
*parent
= tunnel
->dst_port
->sw
;
137 while (parent
!= tunnel
->src_port
->sw
) {
139 parent
= tb_switch_parent(parent
);
141 } else if (tb_tunnel_is_dp(tunnel
)) {
142 /* Keep the domain from powering down */
143 pm_runtime_get_sync(&tunnel
->src_port
->sw
->dev
);
144 pm_runtime_get_sync(&tunnel
->dst_port
->sw
->dev
);
147 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
150 tb_switch_for_each_port(sw
, port
) {
151 if (tb_port_has_remote(port
))
152 tb_discover_tunnels(port
->remote
->sw
);
156 static int tb_port_configure_xdomain(struct tb_port
*port
)
159 * XDomain paths currently only support single lane so we must
160 * disable the other lane according to USB4 spec.
162 tb_port_disable(port
->dual_link_port
);
164 if (tb_switch_is_usb4(port
->sw
))
165 return usb4_port_configure_xdomain(port
);
166 return tb_lc_configure_xdomain(port
);
169 static void tb_port_unconfigure_xdomain(struct tb_port
*port
)
171 if (tb_switch_is_usb4(port
->sw
))
172 usb4_port_unconfigure_xdomain(port
);
174 tb_lc_unconfigure_xdomain(port
);
176 tb_port_enable(port
->dual_link_port
);
179 static void tb_scan_xdomain(struct tb_port
*port
)
181 struct tb_switch
*sw
= port
->sw
;
182 struct tb
*tb
= sw
->tb
;
183 struct tb_xdomain
*xd
;
186 if (!tb_is_xdomain_enabled())
189 route
= tb_downstream_route(port
);
190 xd
= tb_xdomain_find_by_route(tb
, route
);
196 xd
= tb_xdomain_alloc(tb
, &sw
->dev
, route
, tb
->root_switch
->uuid
,
199 tb_port_at(route
, sw
)->xdomain
= xd
;
200 tb_port_configure_xdomain(port
);
205 static int tb_enable_tmu(struct tb_switch
*sw
)
209 /* If it is already enabled in correct mode, don't touch it */
210 if (tb_switch_tmu_is_enabled(sw
))
213 ret
= tb_switch_tmu_disable(sw
);
217 ret
= tb_switch_tmu_post_time(sw
);
221 return tb_switch_tmu_enable(sw
);
225 * tb_find_unused_port() - return the first inactive port on @sw
226 * @sw: Switch to find the port on
227 * @type: Port type to look for
229 static struct tb_port
*tb_find_unused_port(struct tb_switch
*sw
,
230 enum tb_port_type type
)
232 struct tb_port
*port
;
234 tb_switch_for_each_port(sw
, port
) {
235 if (tb_is_upstream_port(port
))
237 if (port
->config
.type
!= type
)
241 if (tb_port_is_enabled(port
))
248 static struct tb_port
*tb_find_usb3_down(struct tb_switch
*sw
,
249 const struct tb_port
*port
)
251 struct tb_port
*down
;
253 down
= usb4_switch_map_usb3_down(sw
, port
);
254 if (down
&& !tb_usb3_port_is_enabled(down
))
259 static struct tb_tunnel
*tb_find_tunnel(struct tb
*tb
, enum tb_tunnel_type type
,
260 struct tb_port
*src_port
,
261 struct tb_port
*dst_port
)
263 struct tb_cm
*tcm
= tb_priv(tb
);
264 struct tb_tunnel
*tunnel
;
266 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
267 if (tunnel
->type
== type
&&
268 ((src_port
&& src_port
== tunnel
->src_port
) ||
269 (dst_port
&& dst_port
== tunnel
->dst_port
))) {
277 static struct tb_tunnel
*tb_find_first_usb3_tunnel(struct tb
*tb
,
278 struct tb_port
*src_port
,
279 struct tb_port
*dst_port
)
281 struct tb_port
*port
, *usb3_down
;
282 struct tb_switch
*sw
;
284 /* Pick the router that is deepest in the topology */
285 if (dst_port
->sw
->config
.depth
> src_port
->sw
->config
.depth
)
290 /* Can't be the host router */
291 if (sw
== tb
->root_switch
)
294 /* Find the downstream USB4 port that leads to this router */
295 port
= tb_port_at(tb_route(sw
), tb
->root_switch
);
296 /* Find the corresponding host router USB3 downstream port */
297 usb3_down
= usb4_switch_map_usb3_down(tb
->root_switch
, port
);
301 return tb_find_tunnel(tb
, TB_TUNNEL_USB3
, usb3_down
, NULL
);
304 static int tb_available_bandwidth(struct tb
*tb
, struct tb_port
*src_port
,
305 struct tb_port
*dst_port
, int *available_up
, int *available_down
)
307 int usb3_consumed_up
, usb3_consumed_down
, ret
;
308 struct tb_cm
*tcm
= tb_priv(tb
);
309 struct tb_tunnel
*tunnel
;
310 struct tb_port
*port
;
312 tb_port_dbg(dst_port
, "calculating available bandwidth\n");
314 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
316 ret
= tb_tunnel_consumed_bandwidth(tunnel
, &usb3_consumed_up
,
317 &usb3_consumed_down
);
321 usb3_consumed_up
= 0;
322 usb3_consumed_down
= 0;
325 *available_up
= *available_down
= 40000;
327 /* Find the minimum available bandwidth over all links */
328 tb_for_each_port_on_path(src_port
, dst_port
, port
) {
329 int link_speed
, link_width
, up_bw
, down_bw
;
331 if (!tb_port_is_null(port
))
334 if (tb_is_upstream_port(port
)) {
335 link_speed
= port
->sw
->link_speed
;
337 link_speed
= tb_port_get_link_speed(port
);
342 link_width
= port
->bonded
? 2 : 1;
344 up_bw
= link_speed
* link_width
* 1000; /* Mb/s */
345 /* Leave 10% guard band */
349 tb_port_dbg(port
, "link total bandwidth %d Mb/s\n", up_bw
);
352 * Find all DP tunnels that cross the port and reduce
353 * their consumed bandwidth from the available.
355 list_for_each_entry(tunnel
, &tcm
->tunnel_list
, list
) {
356 int dp_consumed_up
, dp_consumed_down
;
358 if (!tb_tunnel_is_dp(tunnel
))
361 if (!tb_tunnel_port_on_path(tunnel
, port
))
364 ret
= tb_tunnel_consumed_bandwidth(tunnel
,
370 up_bw
-= dp_consumed_up
;
371 down_bw
-= dp_consumed_down
;
375 * If USB3 is tunneled from the host router down to the
376 * branch leading to port we need to take USB3 consumed
377 * bandwidth into account regardless whether it actually
380 up_bw
-= usb3_consumed_up
;
381 down_bw
-= usb3_consumed_down
;
383 if (up_bw
< *available_up
)
384 *available_up
= up_bw
;
385 if (down_bw
< *available_down
)
386 *available_down
= down_bw
;
389 if (*available_up
< 0)
391 if (*available_down
< 0)
397 static int tb_release_unused_usb3_bandwidth(struct tb
*tb
,
398 struct tb_port
*src_port
,
399 struct tb_port
*dst_port
)
401 struct tb_tunnel
*tunnel
;
403 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
404 return tunnel
? tb_tunnel_release_unused_bandwidth(tunnel
) : 0;
407 static void tb_reclaim_usb3_bandwidth(struct tb
*tb
, struct tb_port
*src_port
,
408 struct tb_port
*dst_port
)
410 int ret
, available_up
, available_down
;
411 struct tb_tunnel
*tunnel
;
413 tunnel
= tb_find_first_usb3_tunnel(tb
, src_port
, dst_port
);
417 tb_dbg(tb
, "reclaiming unused bandwidth for USB3\n");
420 * Calculate available bandwidth for the first hop USB3 tunnel.
421 * That determines the whole USB3 bandwidth for this branch.
423 ret
= tb_available_bandwidth(tb
, tunnel
->src_port
, tunnel
->dst_port
,
424 &available_up
, &available_down
);
426 tb_warn(tb
, "failed to calculate available bandwidth\n");
430 tb_dbg(tb
, "available bandwidth for USB3 %d/%d Mb/s\n",
431 available_up
, available_down
);
433 tb_tunnel_reclaim_available_bandwidth(tunnel
, &available_up
, &available_down
);
436 static int tb_tunnel_usb3(struct tb
*tb
, struct tb_switch
*sw
)
438 struct tb_switch
*parent
= tb_switch_parent(sw
);
439 int ret
, available_up
, available_down
;
440 struct tb_port
*up
, *down
, *port
;
441 struct tb_cm
*tcm
= tb_priv(tb
);
442 struct tb_tunnel
*tunnel
;
444 if (!tb_acpi_may_tunnel_usb3()) {
445 tb_dbg(tb
, "USB3 tunneling disabled, not creating tunnel\n");
449 up
= tb_switch_find_port(sw
, TB_TYPE_USB3_UP
);
457 * Look up available down port. Since we are chaining it should
458 * be found right above this switch.
460 port
= tb_port_at(tb_route(sw
), parent
);
461 down
= tb_find_usb3_down(parent
, port
);
465 if (tb_route(parent
)) {
466 struct tb_port
*parent_up
;
468 * Check first that the parent switch has its upstream USB3
469 * port enabled. Otherwise the chain is not complete and
470 * there is no point setting up a new tunnel.
472 parent_up
= tb_switch_find_port(parent
, TB_TYPE_USB3_UP
);
473 if (!parent_up
|| !tb_port_is_enabled(parent_up
))
476 /* Make all unused bandwidth available for the new tunnel */
477 ret
= tb_release_unused_usb3_bandwidth(tb
, down
, up
);
482 ret
= tb_available_bandwidth(tb
, down
, up
, &available_up
,
487 tb_port_dbg(up
, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
488 available_up
, available_down
);
490 tunnel
= tb_tunnel_alloc_usb3(tb
, up
, down
, available_up
,
497 if (tb_tunnel_activate(tunnel
)) {
499 "USB3 tunnel activation failed, aborting\n");
504 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
505 if (tb_route(parent
))
506 tb_reclaim_usb3_bandwidth(tb
, down
, up
);
511 tb_tunnel_free(tunnel
);
513 if (tb_route(parent
))
514 tb_reclaim_usb3_bandwidth(tb
, down
, up
);
519 static int tb_create_usb3_tunnels(struct tb_switch
*sw
)
521 struct tb_port
*port
;
524 if (!tb_acpi_may_tunnel_usb3())
528 ret
= tb_tunnel_usb3(sw
->tb
, sw
);
533 tb_switch_for_each_port(sw
, port
) {
534 if (!tb_port_has_remote(port
))
536 ret
= tb_create_usb3_tunnels(port
->remote
->sw
);
544 static void tb_scan_port(struct tb_port
*port
);
547 * tb_scan_switch() - scan for and initialize downstream switches
549 static void tb_scan_switch(struct tb_switch
*sw
)
551 struct tb_port
*port
;
553 pm_runtime_get_sync(&sw
->dev
);
555 tb_switch_for_each_port(sw
, port
)
558 pm_runtime_mark_last_busy(&sw
->dev
);
559 pm_runtime_put_autosuspend(&sw
->dev
);
563 * tb_scan_port() - check for and initialize switches below port
565 static void tb_scan_port(struct tb_port
*port
)
567 struct tb_cm
*tcm
= tb_priv(port
->sw
->tb
);
568 struct tb_port
*upstream_port
;
569 struct tb_switch
*sw
;
571 if (tb_is_upstream_port(port
))
574 if (tb_port_is_dpout(port
) && tb_dp_port_hpd_is_active(port
) == 1 &&
575 !tb_dp_port_is_enabled(port
)) {
576 tb_port_dbg(port
, "DP adapter HPD set, queuing hotplug\n");
577 tb_queue_hotplug(port
->sw
->tb
, tb_route(port
->sw
), port
->port
,
582 if (port
->config
.type
!= TB_TYPE_PORT
)
584 if (port
->dual_link_port
&& port
->link_nr
)
586 * Downstream switch is reachable through two ports.
587 * Only scan on the primary port (link_nr == 0).
589 if (tb_wait_for_port(port
, false) <= 0)
592 tb_port_dbg(port
, "port already has a remote\n");
596 tb_retimer_scan(port
);
598 sw
= tb_switch_alloc(port
->sw
->tb
, &port
->sw
->dev
,
599 tb_downstream_route(port
));
602 * If there is an error accessing the connected switch
603 * it may be connected to another domain. Also we allow
604 * the other domain to be connected to a max depth switch.
606 if (PTR_ERR(sw
) == -EIO
|| PTR_ERR(sw
) == -EADDRNOTAVAIL
)
607 tb_scan_xdomain(port
);
611 if (tb_switch_configure(sw
)) {
617 * If there was previously another domain connected remove it
621 tb_xdomain_remove(port
->xdomain
);
622 tb_port_unconfigure_xdomain(port
);
623 port
->xdomain
= NULL
;
627 * Do not send uevents until we have discovered all existing
628 * tunnels and know which switches were authorized already by
631 if (!tcm
->hotplug_active
)
632 dev_set_uevent_suppress(&sw
->dev
, true);
635 * At the moment Thunderbolt 2 and beyond (devices with LC) we
636 * can support runtime PM.
638 sw
->rpm
= sw
->generation
> 1;
640 if (tb_switch_add(sw
)) {
645 /* Link the switches using both links if available */
646 upstream_port
= tb_upstream_port(sw
);
647 port
->remote
= upstream_port
;
648 upstream_port
->remote
= port
;
649 if (port
->dual_link_port
&& upstream_port
->dual_link_port
) {
650 port
->dual_link_port
->remote
= upstream_port
->dual_link_port
;
651 upstream_port
->dual_link_port
->remote
= port
->dual_link_port
;
654 /* Enable lane bonding if supported */
655 tb_switch_lane_bonding_enable(sw
);
656 /* Set the link configured */
657 tb_switch_configure_link(sw
);
659 if (tb_enable_tmu(sw
))
660 tb_sw_warn(sw
, "failed to enable TMU\n");
662 /* Scan upstream retimers */
663 tb_retimer_scan(upstream_port
);
666 * Create USB 3.x tunnels only when the switch is plugged to the
667 * domain. This is because we scan the domain also during discovery
668 * and want to discover existing USB 3.x tunnels before we create
671 if (tcm
->hotplug_active
&& tb_tunnel_usb3(sw
->tb
, sw
))
672 tb_sw_warn(sw
, "USB3 tunnel creation failed\n");
674 tb_add_dp_resources(sw
);
678 static void tb_deactivate_and_free_tunnel(struct tb_tunnel
*tunnel
)
680 struct tb_port
*src_port
, *dst_port
;
686 tb_tunnel_deactivate(tunnel
);
687 list_del(&tunnel
->list
);
690 src_port
= tunnel
->src_port
;
691 dst_port
= tunnel
->dst_port
;
693 switch (tunnel
->type
) {
696 * In case of DP tunnel make sure the DP IN resource is
697 * deallocated properly.
699 tb_switch_dealloc_dp_resource(src_port
->sw
, src_port
);
700 /* Now we can allow the domain to runtime suspend again */
701 pm_runtime_mark_last_busy(&dst_port
->sw
->dev
);
702 pm_runtime_put_autosuspend(&dst_port
->sw
->dev
);
703 pm_runtime_mark_last_busy(&src_port
->sw
->dev
);
704 pm_runtime_put_autosuspend(&src_port
->sw
->dev
);
708 tb_reclaim_usb3_bandwidth(tb
, src_port
, dst_port
);
713 * PCIe and DMA tunnels do not consume guaranteed
719 tb_tunnel_free(tunnel
);
723 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
725 static void tb_free_invalid_tunnels(struct tb
*tb
)
727 struct tb_cm
*tcm
= tb_priv(tb
);
728 struct tb_tunnel
*tunnel
;
731 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
732 if (tb_tunnel_is_invalid(tunnel
))
733 tb_deactivate_and_free_tunnel(tunnel
);
738 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
740 static void tb_free_unplugged_children(struct tb_switch
*sw
)
742 struct tb_port
*port
;
744 tb_switch_for_each_port(sw
, port
) {
745 if (!tb_port_has_remote(port
))
748 if (port
->remote
->sw
->is_unplugged
) {
749 tb_retimer_remove_all(port
);
750 tb_remove_dp_resources(port
->remote
->sw
);
751 tb_switch_unconfigure_link(port
->remote
->sw
);
752 tb_switch_lane_bonding_disable(port
->remote
->sw
);
753 tb_switch_remove(port
->remote
->sw
);
755 if (port
->dual_link_port
)
756 port
->dual_link_port
->remote
= NULL
;
758 tb_free_unplugged_children(port
->remote
->sw
);
763 static struct tb_port
*tb_find_pcie_down(struct tb_switch
*sw
,
764 const struct tb_port
*port
)
766 struct tb_port
*down
= NULL
;
769 * To keep plugging devices consistently in the same PCIe
770 * hierarchy, do mapping here for switch downstream PCIe ports.
772 if (tb_switch_is_usb4(sw
)) {
773 down
= usb4_switch_map_pcie_down(sw
, port
);
774 } else if (!tb_route(sw
)) {
775 int phy_port
= tb_phy_port_from_link(port
->port
);
779 * Hard-coded Thunderbolt port to PCIe down port mapping
782 if (tb_switch_is_cactus_ridge(sw
) ||
783 tb_switch_is_alpine_ridge(sw
))
784 index
= !phy_port
? 6 : 7;
785 else if (tb_switch_is_falcon_ridge(sw
))
786 index
= !phy_port
? 6 : 8;
787 else if (tb_switch_is_titan_ridge(sw
))
788 index
= !phy_port
? 8 : 9;
792 /* Validate the hard-coding */
793 if (WARN_ON(index
> sw
->config
.max_port_number
))
796 down
= &sw
->ports
[index
];
800 if (WARN_ON(!tb_port_is_pcie_down(down
)))
802 if (tb_pci_port_is_enabled(down
))
809 return tb_find_unused_port(sw
, TB_TYPE_PCIE_DOWN
);
812 static struct tb_port
*tb_find_dp_out(struct tb
*tb
, struct tb_port
*in
)
814 struct tb_port
*host_port
, *port
;
815 struct tb_cm
*tcm
= tb_priv(tb
);
817 host_port
= tb_route(in
->sw
) ?
818 tb_port_at(tb_route(in
->sw
), tb
->root_switch
) : NULL
;
820 list_for_each_entry(port
, &tcm
->dp_resources
, list
) {
821 if (!tb_port_is_dpout(port
))
824 if (tb_port_is_enabled(port
)) {
825 tb_port_dbg(port
, "in use\n");
829 tb_port_dbg(port
, "DP OUT available\n");
832 * Keep the DP tunnel under the topology starting from
833 * the same host router downstream port.
835 if (host_port
&& tb_route(port
->sw
)) {
838 p
= tb_port_at(tb_route(port
->sw
), tb
->root_switch
);
849 static void tb_tunnel_dp(struct tb
*tb
)
851 int available_up
, available_down
, ret
;
852 struct tb_cm
*tcm
= tb_priv(tb
);
853 struct tb_port
*port
, *in
, *out
;
854 struct tb_tunnel
*tunnel
;
856 if (!tb_acpi_may_tunnel_dp()) {
857 tb_dbg(tb
, "DP tunneling disabled, not creating tunnel\n");
862 * Find pair of inactive DP IN and DP OUT adapters and then
863 * establish a DP tunnel between them.
865 tb_dbg(tb
, "looking for DP IN <-> DP OUT pairs:\n");
869 list_for_each_entry(port
, &tcm
->dp_resources
, list
) {
870 if (!tb_port_is_dpin(port
))
873 if (tb_port_is_enabled(port
)) {
874 tb_port_dbg(port
, "in use\n");
878 tb_port_dbg(port
, "DP IN available\n");
880 out
= tb_find_dp_out(tb
, port
);
888 tb_dbg(tb
, "no suitable DP IN adapter available, not tunneling\n");
892 tb_dbg(tb
, "no suitable DP OUT adapter available, not tunneling\n");
897 * DP stream needs the domain to be active so runtime resume
898 * both ends of the tunnel.
900 * This should bring the routers in the middle active as well
901 * and keeps the domain from runtime suspending while the DP
904 pm_runtime_get_sync(&in
->sw
->dev
);
905 pm_runtime_get_sync(&out
->sw
->dev
);
907 if (tb_switch_alloc_dp_resource(in
->sw
, in
)) {
908 tb_port_dbg(in
, "no resource available for DP IN, not tunneling\n");
912 /* Make all unused USB3 bandwidth available for the new DP tunnel */
913 ret
= tb_release_unused_usb3_bandwidth(tb
, in
, out
);
915 tb_warn(tb
, "failed to release unused bandwidth\n");
919 ret
= tb_available_bandwidth(tb
, in
, out
, &available_up
,
924 tb_dbg(tb
, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
925 available_up
, available_down
);
927 tunnel
= tb_tunnel_alloc_dp(tb
, in
, out
, available_up
, available_down
);
929 tb_port_dbg(out
, "could not allocate DP tunnel\n");
933 if (tb_tunnel_activate(tunnel
)) {
934 tb_port_info(out
, "DP tunnel activation failed, aborting\n");
938 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
939 tb_reclaim_usb3_bandwidth(tb
, in
, out
);
943 tb_tunnel_free(tunnel
);
945 tb_reclaim_usb3_bandwidth(tb
, in
, out
);
947 tb_switch_dealloc_dp_resource(in
->sw
, in
);
949 pm_runtime_mark_last_busy(&out
->sw
->dev
);
950 pm_runtime_put_autosuspend(&out
->sw
->dev
);
951 pm_runtime_mark_last_busy(&in
->sw
->dev
);
952 pm_runtime_put_autosuspend(&in
->sw
->dev
);
955 static void tb_dp_resource_unavailable(struct tb
*tb
, struct tb_port
*port
)
957 struct tb_port
*in
, *out
;
958 struct tb_tunnel
*tunnel
;
960 if (tb_port_is_dpin(port
)) {
961 tb_port_dbg(port
, "DP IN resource unavailable\n");
965 tb_port_dbg(port
, "DP OUT resource unavailable\n");
970 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_DP
, in
, out
);
971 tb_deactivate_and_free_tunnel(tunnel
);
972 list_del_init(&port
->list
);
975 * See if there is another DP OUT port that can be used for
976 * to create another tunnel.
981 static void tb_dp_resource_available(struct tb
*tb
, struct tb_port
*port
)
983 struct tb_cm
*tcm
= tb_priv(tb
);
986 if (tb_port_is_enabled(port
))
989 list_for_each_entry(p
, &tcm
->dp_resources
, list
) {
994 tb_port_dbg(port
, "DP %s resource available\n",
995 tb_port_is_dpin(port
) ? "IN" : "OUT");
996 list_add_tail(&port
->list
, &tcm
->dp_resources
);
998 /* Look for suitable DP IN <-> DP OUT pairs now */
1002 static void tb_disconnect_and_release_dp(struct tb
*tb
)
1004 struct tb_cm
*tcm
= tb_priv(tb
);
1005 struct tb_tunnel
*tunnel
, *n
;
1008 * Tear down all DP tunnels and release their resources. They
1009 * will be re-established after resume based on plug events.
1011 list_for_each_entry_safe_reverse(tunnel
, n
, &tcm
->tunnel_list
, list
) {
1012 if (tb_tunnel_is_dp(tunnel
))
1013 tb_deactivate_and_free_tunnel(tunnel
);
1016 while (!list_empty(&tcm
->dp_resources
)) {
1017 struct tb_port
*port
;
1019 port
= list_first_entry(&tcm
->dp_resources
,
1020 struct tb_port
, list
);
1021 list_del_init(&port
->list
);
1025 static int tb_disconnect_pci(struct tb
*tb
, struct tb_switch
*sw
)
1027 struct tb_tunnel
*tunnel
;
1030 up
= tb_switch_find_port(sw
, TB_TYPE_PCIE_UP
);
1034 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_PCI
, NULL
, up
);
1035 if (WARN_ON(!tunnel
))
1038 tb_tunnel_deactivate(tunnel
);
1039 list_del(&tunnel
->list
);
1040 tb_tunnel_free(tunnel
);
1044 static int tb_tunnel_pci(struct tb
*tb
, struct tb_switch
*sw
)
1046 struct tb_port
*up
, *down
, *port
;
1047 struct tb_cm
*tcm
= tb_priv(tb
);
1048 struct tb_switch
*parent_sw
;
1049 struct tb_tunnel
*tunnel
;
1051 up
= tb_switch_find_port(sw
, TB_TYPE_PCIE_UP
);
1056 * Look up available down port. Since we are chaining it should
1057 * be found right above this switch.
1059 parent_sw
= tb_to_switch(sw
->dev
.parent
);
1060 port
= tb_port_at(tb_route(sw
), parent_sw
);
1061 down
= tb_find_pcie_down(parent_sw
, port
);
1065 tunnel
= tb_tunnel_alloc_pci(tb
, up
, down
);
1069 if (tb_tunnel_activate(tunnel
)) {
1071 "PCIe tunnel activation failed, aborting\n");
1072 tb_tunnel_free(tunnel
);
1076 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
1080 static int tb_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
1082 struct tb_cm
*tcm
= tb_priv(tb
);
1083 struct tb_port
*nhi_port
, *dst_port
;
1084 struct tb_tunnel
*tunnel
;
1085 struct tb_switch
*sw
;
1087 sw
= tb_to_switch(xd
->dev
.parent
);
1088 dst_port
= tb_port_at(xd
->route
, sw
);
1089 nhi_port
= tb_switch_find_port(tb
->root_switch
, TB_TYPE_NHI
);
1091 mutex_lock(&tb
->lock
);
1092 tunnel
= tb_tunnel_alloc_dma(tb
, nhi_port
, dst_port
, xd
->transmit_ring
,
1093 xd
->transmit_path
, xd
->receive_ring
,
1096 mutex_unlock(&tb
->lock
);
1100 if (tb_tunnel_activate(tunnel
)) {
1101 tb_port_info(nhi_port
,
1102 "DMA tunnel activation failed, aborting\n");
1103 tb_tunnel_free(tunnel
);
1104 mutex_unlock(&tb
->lock
);
1108 list_add_tail(&tunnel
->list
, &tcm
->tunnel_list
);
1109 mutex_unlock(&tb
->lock
);
1113 static void __tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
1115 struct tb_port
*dst_port
;
1116 struct tb_tunnel
*tunnel
;
1117 struct tb_switch
*sw
;
1119 sw
= tb_to_switch(xd
->dev
.parent
);
1120 dst_port
= tb_port_at(xd
->route
, sw
);
1123 * It is possible that the tunnel was already teared down (in
1124 * case of cable disconnect) so it is fine if we cannot find it
1127 tunnel
= tb_find_tunnel(tb
, TB_TUNNEL_DMA
, NULL
, dst_port
);
1128 tb_deactivate_and_free_tunnel(tunnel
);
1131 static int tb_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
1133 if (!xd
->is_unplugged
) {
1134 mutex_lock(&tb
->lock
);
1135 __tb_disconnect_xdomain_paths(tb
, xd
);
1136 mutex_unlock(&tb
->lock
);
1141 /* hotplug handling */
1144 * tb_handle_hotplug() - handle hotplug event
1146 * Executes on tb->wq.
1148 static void tb_handle_hotplug(struct work_struct
*work
)
1150 struct tb_hotplug_event
*ev
= container_of(work
, typeof(*ev
), work
);
1151 struct tb
*tb
= ev
->tb
;
1152 struct tb_cm
*tcm
= tb_priv(tb
);
1153 struct tb_switch
*sw
;
1154 struct tb_port
*port
;
1156 /* Bring the domain back from sleep if it was suspended */
1157 pm_runtime_get_sync(&tb
->dev
);
1159 mutex_lock(&tb
->lock
);
1160 if (!tcm
->hotplug_active
)
1161 goto out
; /* during init, suspend or shutdown */
1163 sw
= tb_switch_find_by_route(tb
, ev
->route
);
1166 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1167 ev
->route
, ev
->port
, ev
->unplug
);
1170 if (ev
->port
> sw
->config
.max_port_number
) {
1172 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1173 ev
->route
, ev
->port
, ev
->unplug
);
1176 port
= &sw
->ports
[ev
->port
];
1177 if (tb_is_upstream_port(port
)) {
1178 tb_dbg(tb
, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1179 ev
->route
, ev
->port
, ev
->unplug
);
1183 pm_runtime_get_sync(&sw
->dev
);
1186 tb_retimer_remove_all(port
);
1188 if (tb_port_has_remote(port
)) {
1189 tb_port_dbg(port
, "switch unplugged\n");
1190 tb_sw_set_unplugged(port
->remote
->sw
);
1191 tb_free_invalid_tunnels(tb
);
1192 tb_remove_dp_resources(port
->remote
->sw
);
1193 tb_switch_tmu_disable(port
->remote
->sw
);
1194 tb_switch_unconfigure_link(port
->remote
->sw
);
1195 tb_switch_lane_bonding_disable(port
->remote
->sw
);
1196 tb_switch_remove(port
->remote
->sw
);
1197 port
->remote
= NULL
;
1198 if (port
->dual_link_port
)
1199 port
->dual_link_port
->remote
= NULL
;
1200 /* Maybe we can create another DP tunnel */
1202 } else if (port
->xdomain
) {
1203 struct tb_xdomain
*xd
= tb_xdomain_get(port
->xdomain
);
1205 tb_port_dbg(port
, "xdomain unplugged\n");
1207 * Service drivers are unbound during
1208 * tb_xdomain_remove() so setting XDomain as
1209 * unplugged here prevents deadlock if they call
1210 * tb_xdomain_disable_paths(). We will tear down
1213 xd
->is_unplugged
= true;
1214 tb_xdomain_remove(xd
);
1215 port
->xdomain
= NULL
;
1216 __tb_disconnect_xdomain_paths(tb
, xd
);
1218 tb_port_unconfigure_xdomain(port
);
1219 } else if (tb_port_is_dpout(port
) || tb_port_is_dpin(port
)) {
1220 tb_dp_resource_unavailable(tb
, port
);
1223 "got unplug event for disconnected port, ignoring\n");
1225 } else if (port
->remote
) {
1226 tb_port_dbg(port
, "got plug event for connected port, ignoring\n");
1228 if (tb_port_is_null(port
)) {
1229 tb_port_dbg(port
, "hotplug: scanning\n");
1232 tb_port_dbg(port
, "hotplug: no switch found\n");
1233 } else if (tb_port_is_dpout(port
) || tb_port_is_dpin(port
)) {
1234 tb_dp_resource_available(tb
, port
);
1238 pm_runtime_mark_last_busy(&sw
->dev
);
1239 pm_runtime_put_autosuspend(&sw
->dev
);
1244 mutex_unlock(&tb
->lock
);
1246 pm_runtime_mark_last_busy(&tb
->dev
);
1247 pm_runtime_put_autosuspend(&tb
->dev
);
1253 * tb_schedule_hotplug_handler() - callback function for the control channel
1255 * Delegates to tb_handle_hotplug.
1257 static void tb_handle_event(struct tb
*tb
, enum tb_cfg_pkg_type type
,
1258 const void *buf
, size_t size
)
1260 const struct cfg_event_pkg
*pkg
= buf
;
1263 if (type
!= TB_CFG_PKG_EVENT
) {
1264 tb_warn(tb
, "unexpected event %#x, ignoring\n", type
);
1268 route
= tb_cfg_get_route(&pkg
->header
);
1270 if (tb_cfg_ack_plug(tb
->ctl
, route
, pkg
->port
, pkg
->unplug
)) {
1271 tb_warn(tb
, "could not ack plug event on %llx:%x\n", route
,
1275 tb_queue_hotplug(tb
, route
, pkg
->port
, pkg
->unplug
);
1278 static void tb_stop(struct tb
*tb
)
1280 struct tb_cm
*tcm
= tb_priv(tb
);
1281 struct tb_tunnel
*tunnel
;
1282 struct tb_tunnel
*n
;
1284 cancel_delayed_work(&tcm
->remove_work
);
1285 /* tunnels are only present after everything has been initialized */
1286 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
) {
1288 * DMA tunnels require the driver to be functional so we
1289 * tear them down. Other protocol tunnels can be left
1292 if (tb_tunnel_is_dma(tunnel
))
1293 tb_tunnel_deactivate(tunnel
);
1294 tb_tunnel_free(tunnel
);
1296 tb_switch_remove(tb
->root_switch
);
1297 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
1300 static int tb_scan_finalize_switch(struct device
*dev
, void *data
)
1302 if (tb_is_switch(dev
)) {
1303 struct tb_switch
*sw
= tb_to_switch(dev
);
1306 * If we found that the switch was already setup by the
1307 * boot firmware, mark it as authorized now before we
1308 * send uevent to userspace.
1313 dev_set_uevent_suppress(dev
, false);
1314 kobject_uevent(&dev
->kobj
, KOBJ_ADD
);
1315 device_for_each_child(dev
, NULL
, tb_scan_finalize_switch
);
1321 static int tb_start(struct tb
*tb
)
1323 struct tb_cm
*tcm
= tb_priv(tb
);
1326 tb
->root_switch
= tb_switch_alloc(tb
, &tb
->dev
, 0);
1327 if (IS_ERR(tb
->root_switch
))
1328 return PTR_ERR(tb
->root_switch
);
1331 * ICM firmware upgrade needs running firmware and in native
1332 * mode that is not available so disable firmware upgrade of the
1335 tb
->root_switch
->no_nvm_upgrade
= true;
1336 /* All USB4 routers support runtime PM */
1337 tb
->root_switch
->rpm
= tb_switch_is_usb4(tb
->root_switch
);
1339 ret
= tb_switch_configure(tb
->root_switch
);
1341 tb_switch_put(tb
->root_switch
);
1345 /* Announce the switch to the world */
1346 ret
= tb_switch_add(tb
->root_switch
);
1348 tb_switch_put(tb
->root_switch
);
1352 /* Enable TMU if it is off */
1353 tb_switch_tmu_enable(tb
->root_switch
);
1354 /* Full scan to discover devices added before the driver was loaded. */
1355 tb_scan_switch(tb
->root_switch
);
1356 /* Find out tunnels created by the boot firmware */
1357 tb_discover_tunnels(tb
->root_switch
);
1359 * If the boot firmware did not create USB 3.x tunnels create them
1360 * now for the whole topology.
1362 tb_create_usb3_tunnels(tb
->root_switch
);
1363 /* Add DP IN resources for the root switch */
1364 tb_add_dp_resources(tb
->root_switch
);
1365 /* Make the discovered switches available to the userspace */
1366 device_for_each_child(&tb
->root_switch
->dev
, NULL
,
1367 tb_scan_finalize_switch
);
1369 /* Allow tb_handle_hotplug to progress events */
1370 tcm
->hotplug_active
= true;
1374 static int tb_suspend_noirq(struct tb
*tb
)
1376 struct tb_cm
*tcm
= tb_priv(tb
);
1378 tb_dbg(tb
, "suspending...\n");
1379 tb_disconnect_and_release_dp(tb
);
1380 tb_switch_suspend(tb
->root_switch
, false);
1381 tcm
->hotplug_active
= false; /* signal tb_handle_hotplug to quit */
1382 tb_dbg(tb
, "suspend finished\n");
1387 static void tb_restore_children(struct tb_switch
*sw
)
1389 struct tb_port
*port
;
1391 /* No need to restore if the router is already unplugged */
1392 if (sw
->is_unplugged
)
1395 if (tb_enable_tmu(sw
))
1396 tb_sw_warn(sw
, "failed to restore TMU configuration\n");
1398 tb_switch_for_each_port(sw
, port
) {
1399 if (!tb_port_has_remote(port
) && !port
->xdomain
)
1403 tb_switch_lane_bonding_enable(port
->remote
->sw
);
1404 tb_switch_configure_link(port
->remote
->sw
);
1406 tb_restore_children(port
->remote
->sw
);
1407 } else if (port
->xdomain
) {
1408 tb_port_configure_xdomain(port
);
1413 static int tb_resume_noirq(struct tb
*tb
)
1415 struct tb_cm
*tcm
= tb_priv(tb
);
1416 struct tb_tunnel
*tunnel
, *n
;
1418 tb_dbg(tb
, "resuming...\n");
1420 /* remove any pci devices the firmware might have setup */
1421 tb_switch_reset(tb
->root_switch
);
1423 tb_switch_resume(tb
->root_switch
);
1424 tb_free_invalid_tunnels(tb
);
1425 tb_free_unplugged_children(tb
->root_switch
);
1426 tb_restore_children(tb
->root_switch
);
1427 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
)
1428 tb_tunnel_restart(tunnel
);
1429 if (!list_empty(&tcm
->tunnel_list
)) {
1431 * the pcie links need some time to get going.
1432 * 100ms works for me...
1434 tb_dbg(tb
, "tunnels restarted, sleeping for 100ms\n");
1437 /* Allow tb_handle_hotplug to progress events */
1438 tcm
->hotplug_active
= true;
1439 tb_dbg(tb
, "resume finished\n");
1444 static int tb_free_unplugged_xdomains(struct tb_switch
*sw
)
1446 struct tb_port
*port
;
1449 tb_switch_for_each_port(sw
, port
) {
1450 if (tb_is_upstream_port(port
))
1452 if (port
->xdomain
&& port
->xdomain
->is_unplugged
) {
1453 tb_retimer_remove_all(port
);
1454 tb_xdomain_remove(port
->xdomain
);
1455 tb_port_unconfigure_xdomain(port
);
1456 port
->xdomain
= NULL
;
1458 } else if (port
->remote
) {
1459 ret
+= tb_free_unplugged_xdomains(port
->remote
->sw
);
1466 static int tb_freeze_noirq(struct tb
*tb
)
1468 struct tb_cm
*tcm
= tb_priv(tb
);
1470 tcm
->hotplug_active
= false;
1474 static int tb_thaw_noirq(struct tb
*tb
)
1476 struct tb_cm
*tcm
= tb_priv(tb
);
1478 tcm
->hotplug_active
= true;
1482 static void tb_complete(struct tb
*tb
)
1485 * Release any unplugged XDomains and if there is a case where
1486 * another domain is swapped in place of unplugged XDomain we
1487 * need to run another rescan.
1489 mutex_lock(&tb
->lock
);
1490 if (tb_free_unplugged_xdomains(tb
->root_switch
))
1491 tb_scan_switch(tb
->root_switch
);
1492 mutex_unlock(&tb
->lock
);
1495 static int tb_runtime_suspend(struct tb
*tb
)
1497 struct tb_cm
*tcm
= tb_priv(tb
);
1499 mutex_lock(&tb
->lock
);
1500 tb_switch_suspend(tb
->root_switch
, true);
1501 tcm
->hotplug_active
= false;
1502 mutex_unlock(&tb
->lock
);
1507 static void tb_remove_work(struct work_struct
*work
)
1509 struct tb_cm
*tcm
= container_of(work
, struct tb_cm
, remove_work
.work
);
1510 struct tb
*tb
= tcm_to_tb(tcm
);
1512 mutex_lock(&tb
->lock
);
1513 if (tb
->root_switch
) {
1514 tb_free_unplugged_children(tb
->root_switch
);
1515 tb_free_unplugged_xdomains(tb
->root_switch
);
1517 mutex_unlock(&tb
->lock
);
1520 static int tb_runtime_resume(struct tb
*tb
)
1522 struct tb_cm
*tcm
= tb_priv(tb
);
1523 struct tb_tunnel
*tunnel
, *n
;
1525 mutex_lock(&tb
->lock
);
1526 tb_switch_resume(tb
->root_switch
);
1527 tb_free_invalid_tunnels(tb
);
1528 tb_restore_children(tb
->root_switch
);
1529 list_for_each_entry_safe(tunnel
, n
, &tcm
->tunnel_list
, list
)
1530 tb_tunnel_restart(tunnel
);
1531 tcm
->hotplug_active
= true;
1532 mutex_unlock(&tb
->lock
);
1535 * Schedule cleanup of any unplugged devices. Run this in a
1536 * separate thread to avoid possible deadlock if the device
1537 * removal runtime resumes the unplugged device.
1539 queue_delayed_work(tb
->wq
, &tcm
->remove_work
, msecs_to_jiffies(50));
1543 static const struct tb_cm_ops tb_cm_ops
= {
1546 .suspend_noirq
= tb_suspend_noirq
,
1547 .resume_noirq
= tb_resume_noirq
,
1548 .freeze_noirq
= tb_freeze_noirq
,
1549 .thaw_noirq
= tb_thaw_noirq
,
1550 .complete
= tb_complete
,
1551 .runtime_suspend
= tb_runtime_suspend
,
1552 .runtime_resume
= tb_runtime_resume
,
1553 .handle_event
= tb_handle_event
,
1554 .disapprove_switch
= tb_disconnect_pci
,
1555 .approve_switch
= tb_tunnel_pci
,
1556 .approve_xdomain_paths
= tb_approve_xdomain_paths
,
1557 .disconnect_xdomain_paths
= tb_disconnect_xdomain_paths
,
1560 struct tb
*tb_probe(struct tb_nhi
*nhi
)
1565 tb
= tb_domain_alloc(nhi
, sizeof(*tcm
));
1569 if (tb_acpi_may_tunnel_pcie())
1570 tb
->security_level
= TB_SECURITY_USER
;
1572 tb
->security_level
= TB_SECURITY_NOPCIE
;
1574 tb
->cm_ops
= &tb_cm_ops
;
1577 INIT_LIST_HEAD(&tcm
->tunnel_list
);
1578 INIT_LIST_HEAD(&tcm
->dp_resources
);
1579 INIT_DELAYED_WORK(&tcm
->remove_work
, tb_remove_work
);
1581 tb_dbg(tb
, "using software connection manager\n");