2 * Internal Thunderbolt Connection Manager. This is a firmware running on
3 * the Thunderbolt host controller performing most of the low-level
6 * Copyright (C) 2017, Intel Corporation
7 * Authors: Michael Jamet <michael.jamet@intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/delay.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/platform_data/x86/apple.h>
20 #include <linux/sizes.h>
21 #include <linux/slab.h>
22 #include <linux/workqueue.h>
28 #define PCIE2CIO_CMD 0x30
29 #define PCIE2CIO_CMD_TIMEOUT BIT(31)
30 #define PCIE2CIO_CMD_START BIT(30)
31 #define PCIE2CIO_CMD_WRITE BIT(21)
32 #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19)
33 #define PCIE2CIO_CMD_CS_SHIFT 19
34 #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13)
35 #define PCIE2CIO_CMD_PORT_SHIFT 13
37 #define PCIE2CIO_WRDATA 0x34
38 #define PCIE2CIO_RDDATA 0x38
40 #define PHY_PORT_CS1 0x37
41 #define PHY_PORT_CS1_LINK_DISABLE BIT(14)
42 #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26)
43 #define PHY_PORT_CS1_LINK_STATE_SHIFT 26
45 #define ICM_TIMEOUT 5000 /* ms */
46 #define ICM_APPROVE_TIMEOUT 10000 /* ms */
47 #define ICM_MAX_LINK 4
48 #define ICM_MAX_DEPTH 6
51 * struct icm - Internal connection manager private data
52 * @request_lock: Makes sure only one message is send to ICM at time
53 * @rescan_work: Work used to rescan the surviving switches after resume
54 * @upstream_port: Pointer to the PCIe upstream port this host
55 * controller is connected. This is only set for systems
56 * where ICM needs to be started manually
57 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
58 * (only set when @upstream_port is not %NULL)
59 * @safe_mode: ICM is in safe mode
60 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
61 * @rpm: Does the controller support runtime PM (RTD3)
62 * @is_supported: Checks if we can support ICM on this controller
63 * @get_mode: Read and return the ICM firmware mode (optional)
64 * @get_route: Find a route string for given switch
65 * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
66 * @driver_ready: Send driver ready message to ICM
67 * @device_connected: Handle device connected ICM message
68 * @device_disconnected: Handle device disconnected ICM message
69 * @xdomain_connected - Handle XDomain connected ICM message
70 * @xdomain_disconnected - Handle XDomain disconnected ICM message
73 struct mutex request_lock
;
74 struct delayed_work rescan_work
;
75 struct pci_dev
*upstream_port
;
80 bool (*is_supported
)(struct tb
*tb
);
81 int (*get_mode
)(struct tb
*tb
);
82 int (*get_route
)(struct tb
*tb
, u8 link
, u8 depth
, u64
*route
);
83 void (*save_devices
)(struct tb
*tb
);
84 int (*driver_ready
)(struct tb
*tb
,
85 enum tb_security_level
*security_level
,
86 size_t *nboot_acl
, bool *rpm
);
87 void (*device_connected
)(struct tb
*tb
,
88 const struct icm_pkg_header
*hdr
);
89 void (*device_disconnected
)(struct tb
*tb
,
90 const struct icm_pkg_header
*hdr
);
91 void (*xdomain_connected
)(struct tb
*tb
,
92 const struct icm_pkg_header
*hdr
);
93 void (*xdomain_disconnected
)(struct tb
*tb
,
94 const struct icm_pkg_header
*hdr
);
97 struct icm_notification
{
98 struct work_struct work
;
99 struct icm_pkg_header
*pkg
;
103 struct ep_name_entry
{
109 #define EP_NAME_INTEL_VSS 0x10
111 /* Intel Vendor specific structure */
121 #define INTEL_VSS_FLAGS_RTD3 BIT(0)
123 static const struct intel_vss
*parse_intel_vss(const void *ep_name
, size_t size
)
125 const void *end
= ep_name
+ size
;
127 while (ep_name
< end
) {
128 const struct ep_name_entry
*ep
= ep_name
;
132 if (ep_name
+ ep
->len
> end
)
135 if (ep
->type
== EP_NAME_INTEL_VSS
)
136 return (const struct intel_vss
*)ep
->data
;
144 static inline struct tb
*icm_to_tb(struct icm
*icm
)
146 return ((void *)icm
- sizeof(struct tb
));
149 static inline u8
phy_port_from_route(u64 route
, u8 depth
)
153 link
= depth
? route
>> ((depth
- 1) * 8) : route
;
154 return tb_phy_port_from_link(link
);
157 static inline u8
dual_link_from_link(u8 link
)
159 return link
? ((link
- 1) ^ 0x01) + 1 : 0;
162 static inline u64
get_route(u32 route_hi
, u32 route_lo
)
164 return (u64
)route_hi
<< 32 | route_lo
;
167 static inline u64
get_parent_route(u64 route
)
169 int depth
= tb_route_length(route
);
170 return depth
? route
& ~(0xffULL
<< (depth
- 1) * TB_ROUTE_SHIFT
) : 0;
173 static bool icm_match(const struct tb_cfg_request
*req
,
174 const struct ctl_pkg
*pkg
)
176 const struct icm_pkg_header
*res_hdr
= pkg
->buffer
;
177 const struct icm_pkg_header
*req_hdr
= req
->request
;
179 if (pkg
->frame
.eof
!= req
->response_type
)
181 if (res_hdr
->code
!= req_hdr
->code
)
187 static bool icm_copy(struct tb_cfg_request
*req
, const struct ctl_pkg
*pkg
)
189 const struct icm_pkg_header
*hdr
= pkg
->buffer
;
191 if (hdr
->packet_id
< req
->npackets
) {
192 size_t offset
= hdr
->packet_id
* req
->response_size
;
194 memcpy(req
->response
+ offset
, pkg
->buffer
, req
->response_size
);
197 return hdr
->packet_id
== hdr
->total_packets
- 1;
200 static int icm_request(struct tb
*tb
, const void *request
, size_t request_size
,
201 void *response
, size_t response_size
, size_t npackets
,
202 unsigned int timeout_msec
)
204 struct icm
*icm
= tb_priv(tb
);
208 struct tb_cfg_request
*req
;
209 struct tb_cfg_result res
;
211 req
= tb_cfg_request_alloc();
215 req
->match
= icm_match
;
216 req
->copy
= icm_copy
;
217 req
->request
= request
;
218 req
->request_size
= request_size
;
219 req
->request_type
= TB_CFG_PKG_ICM_CMD
;
220 req
->response
= response
;
221 req
->npackets
= npackets
;
222 req
->response_size
= response_size
;
223 req
->response_type
= TB_CFG_PKG_ICM_RESP
;
225 mutex_lock(&icm
->request_lock
);
226 res
= tb_cfg_request_sync(tb
->ctl
, req
, timeout_msec
);
227 mutex_unlock(&icm
->request_lock
);
229 tb_cfg_request_put(req
);
231 if (res
.err
!= -ETIMEDOUT
)
232 return res
.err
== 1 ? -EIO
: res
.err
;
234 usleep_range(20, 50);
240 static bool icm_fr_is_supported(struct tb
*tb
)
242 return !x86_apple_machine
;
245 static inline int icm_fr_get_switch_index(u32 port
)
249 if ((port
& ICM_PORT_TYPE_MASK
) != TB_TYPE_PORT
)
252 index
= port
>> ICM_PORT_INDEX_SHIFT
;
253 return index
!= 0xff ? index
: 0;
256 static int icm_fr_get_route(struct tb
*tb
, u8 link
, u8 depth
, u64
*route
)
258 struct icm_fr_pkg_get_topology_response
*switches
, *sw
;
259 struct icm_fr_pkg_get_topology request
= {
260 .hdr
= { .code
= ICM_GET_TOPOLOGY
},
262 size_t npackets
= ICM_GET_TOPOLOGY_PACKETS
;
266 switches
= kcalloc(npackets
, sizeof(*switches
), GFP_KERNEL
);
270 ret
= icm_request(tb
, &request
, sizeof(request
), switches
,
271 sizeof(*switches
), npackets
, ICM_TIMEOUT
);
276 index
= icm_fr_get_switch_index(sw
->ports
[link
]);
282 sw
= &switches
[index
];
283 for (i
= 1; i
< depth
; i
++) {
286 if (!(sw
->first_data
& ICM_SWITCH_USED
)) {
291 for (j
= 0; j
< ARRAY_SIZE(sw
->ports
); j
++) {
292 index
= icm_fr_get_switch_index(sw
->ports
[j
]);
293 if (index
> sw
->switch_index
) {
294 sw
= &switches
[index
];
300 *route
= get_route(sw
->route_hi
, sw
->route_lo
);
307 static void icm_fr_save_devices(struct tb
*tb
)
309 nhi_mailbox_cmd(tb
->nhi
, NHI_MAILBOX_SAVE_DEVS
, 0);
313 icm_fr_driver_ready(struct tb
*tb
, enum tb_security_level
*security_level
,
314 size_t *nboot_acl
, bool *rpm
)
316 struct icm_fr_pkg_driver_ready_response reply
;
317 struct icm_pkg_driver_ready request
= {
318 .hdr
.code
= ICM_DRIVER_READY
,
322 memset(&reply
, 0, sizeof(reply
));
323 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
329 *security_level
= reply
.security_level
& ICM_FR_SLEVEL_MASK
;
334 static int icm_fr_approve_switch(struct tb
*tb
, struct tb_switch
*sw
)
336 struct icm_fr_pkg_approve_device request
;
337 struct icm_fr_pkg_approve_device reply
;
340 memset(&request
, 0, sizeof(request
));
341 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
342 request
.hdr
.code
= ICM_APPROVE_DEVICE
;
343 request
.connection_id
= sw
->connection_id
;
344 request
.connection_key
= sw
->connection_key
;
346 memset(&reply
, 0, sizeof(reply
));
347 /* Use larger timeout as establishing tunnels can take some time */
348 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
349 1, ICM_APPROVE_TIMEOUT
);
353 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
) {
354 tb_warn(tb
, "PCIe tunnel creation failed\n");
361 static int icm_fr_add_switch_key(struct tb
*tb
, struct tb_switch
*sw
)
363 struct icm_fr_pkg_add_device_key request
;
364 struct icm_fr_pkg_add_device_key_response reply
;
367 memset(&request
, 0, sizeof(request
));
368 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
369 request
.hdr
.code
= ICM_ADD_DEVICE_KEY
;
370 request
.connection_id
= sw
->connection_id
;
371 request
.connection_key
= sw
->connection_key
;
372 memcpy(request
.key
, sw
->key
, TB_SWITCH_KEY_SIZE
);
374 memset(&reply
, 0, sizeof(reply
));
375 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
380 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
) {
381 tb_warn(tb
, "Adding key to switch failed\n");
388 static int icm_fr_challenge_switch_key(struct tb
*tb
, struct tb_switch
*sw
,
389 const u8
*challenge
, u8
*response
)
391 struct icm_fr_pkg_challenge_device request
;
392 struct icm_fr_pkg_challenge_device_response reply
;
395 memset(&request
, 0, sizeof(request
));
396 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
397 request
.hdr
.code
= ICM_CHALLENGE_DEVICE
;
398 request
.connection_id
= sw
->connection_id
;
399 request
.connection_key
= sw
->connection_key
;
400 memcpy(request
.challenge
, challenge
, TB_SWITCH_KEY_SIZE
);
402 memset(&reply
, 0, sizeof(reply
));
403 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
408 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
409 return -EKEYREJECTED
;
410 if (reply
.hdr
.flags
& ICM_FLAGS_NO_KEY
)
413 memcpy(response
, reply
.response
, TB_SWITCH_KEY_SIZE
);
418 static int icm_fr_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
420 struct icm_fr_pkg_approve_xdomain_response reply
;
421 struct icm_fr_pkg_approve_xdomain request
;
424 memset(&request
, 0, sizeof(request
));
425 request
.hdr
.code
= ICM_APPROVE_XDOMAIN
;
426 request
.link_info
= xd
->depth
<< ICM_LINK_INFO_DEPTH_SHIFT
| xd
->link
;
427 memcpy(&request
.remote_uuid
, xd
->remote_uuid
, sizeof(*xd
->remote_uuid
));
429 request
.transmit_path
= xd
->transmit_path
;
430 request
.transmit_ring
= xd
->transmit_ring
;
431 request
.receive_path
= xd
->receive_path
;
432 request
.receive_ring
= xd
->receive_ring
;
434 memset(&reply
, 0, sizeof(reply
));
435 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
440 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
446 static int icm_fr_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
451 phy_port
= tb_phy_port_from_link(xd
->link
);
453 cmd
= NHI_MAILBOX_DISCONNECT_PA
;
455 cmd
= NHI_MAILBOX_DISCONNECT_PB
;
457 nhi_mailbox_cmd(tb
->nhi
, cmd
, 1);
458 usleep_range(10, 50);
459 nhi_mailbox_cmd(tb
->nhi
, cmd
, 2);
463 static void add_switch(struct tb_switch
*parent_sw
, u64 route
,
464 const uuid_t
*uuid
, const u8
*ep_name
,
465 size_t ep_name_size
, u8 connection_id
, u8 connection_key
,
466 u8 link
, u8 depth
, enum tb_security_level security_level
,
467 bool authorized
, bool boot
)
469 const struct intel_vss
*vss
;
470 struct tb_switch
*sw
;
472 pm_runtime_get_sync(&parent_sw
->dev
);
474 sw
= tb_switch_alloc(parent_sw
->tb
, &parent_sw
->dev
, route
);
478 sw
->uuid
= kmemdup(uuid
, sizeof(*uuid
), GFP_KERNEL
);
479 sw
->connection_id
= connection_id
;
480 sw
->connection_key
= connection_key
;
483 sw
->authorized
= authorized
;
484 sw
->security_level
= security_level
;
487 vss
= parse_intel_vss(ep_name
, ep_name_size
);
489 sw
->rpm
= !!(vss
->flags
& INTEL_VSS_FLAGS_RTD3
);
491 /* Link the two switches now */
492 tb_port_at(route
, parent_sw
)->remote
= tb_upstream_port(sw
);
493 tb_upstream_port(sw
)->remote
= tb_port_at(route
, parent_sw
);
495 if (tb_switch_add(sw
)) {
496 tb_port_at(tb_route(sw
), parent_sw
)->remote
= NULL
;
501 pm_runtime_mark_last_busy(&parent_sw
->dev
);
502 pm_runtime_put_autosuspend(&parent_sw
->dev
);
505 static void update_switch(struct tb_switch
*parent_sw
, struct tb_switch
*sw
,
506 u64 route
, u8 connection_id
, u8 connection_key
,
507 u8 link
, u8 depth
, bool boot
)
509 /* Disconnect from parent */
510 tb_port_at(tb_route(sw
), parent_sw
)->remote
= NULL
;
511 /* Re-connect via updated port*/
512 tb_port_at(route
, parent_sw
)->remote
= tb_upstream_port(sw
);
514 /* Update with the new addressing information */
515 sw
->config
.route_hi
= upper_32_bits(route
);
516 sw
->config
.route_lo
= lower_32_bits(route
);
517 sw
->connection_id
= connection_id
;
518 sw
->connection_key
= connection_key
;
523 /* This switch still exists */
524 sw
->is_unplugged
= false;
527 static void remove_switch(struct tb_switch
*sw
)
529 struct tb_switch
*parent_sw
;
531 parent_sw
= tb_to_switch(sw
->dev
.parent
);
532 tb_port_at(tb_route(sw
), parent_sw
)->remote
= NULL
;
533 tb_switch_remove(sw
);
536 static void add_xdomain(struct tb_switch
*sw
, u64 route
,
537 const uuid_t
*local_uuid
, const uuid_t
*remote_uuid
,
540 struct tb_xdomain
*xd
;
542 pm_runtime_get_sync(&sw
->dev
);
544 xd
= tb_xdomain_alloc(sw
->tb
, &sw
->dev
, route
, local_uuid
, remote_uuid
);
551 tb_port_at(route
, sw
)->xdomain
= xd
;
556 pm_runtime_mark_last_busy(&sw
->dev
);
557 pm_runtime_put_autosuspend(&sw
->dev
);
560 static void update_xdomain(struct tb_xdomain
*xd
, u64 route
, u8 link
)
564 xd
->is_unplugged
= false;
567 static void remove_xdomain(struct tb_xdomain
*xd
)
569 struct tb_switch
*sw
;
571 sw
= tb_to_switch(xd
->dev
.parent
);
572 tb_port_at(xd
->route
, sw
)->xdomain
= NULL
;
573 tb_xdomain_remove(xd
);
577 icm_fr_device_connected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
579 const struct icm_fr_event_device_connected
*pkg
=
580 (const struct icm_fr_event_device_connected
*)hdr
;
581 enum tb_security_level security_level
;
582 struct tb_switch
*sw
, *parent_sw
;
583 struct icm
*icm
= tb_priv(tb
);
584 bool authorized
= false;
585 struct tb_xdomain
*xd
;
591 link
= pkg
->link_info
& ICM_LINK_INFO_LINK_MASK
;
592 depth
= (pkg
->link_info
& ICM_LINK_INFO_DEPTH_MASK
) >>
593 ICM_LINK_INFO_DEPTH_SHIFT
;
594 authorized
= pkg
->link_info
& ICM_LINK_INFO_APPROVED
;
595 security_level
= (pkg
->hdr
.flags
& ICM_FLAGS_SLEVEL_MASK
) >>
596 ICM_FLAGS_SLEVEL_SHIFT
;
597 boot
= pkg
->link_info
& ICM_LINK_INFO_BOOT
;
599 if (pkg
->link_info
& ICM_LINK_INFO_REJECTED
) {
600 tb_info(tb
, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
605 sw
= tb_switch_find_by_uuid(tb
, &pkg
->ep_uuid
);
607 u8 phy_port
, sw_phy_port
;
609 parent_sw
= tb_to_switch(sw
->dev
.parent
);
610 sw_phy_port
= tb_phy_port_from_link(sw
->link
);
611 phy_port
= tb_phy_port_from_link(link
);
614 * On resume ICM will send us connected events for the
615 * devices that still are present. However, that
616 * information might have changed for example by the
617 * fact that a switch on a dual-link connection might
618 * have been enumerated using the other link now. Make
619 * sure our book keeping matches that.
621 if (sw
->depth
== depth
&& sw_phy_port
== phy_port
&&
622 !!sw
->authorized
== authorized
) {
624 * It was enumerated through another link so update
625 * route string accordingly.
627 if (sw
->link
!= link
) {
628 ret
= icm
->get_route(tb
, link
, depth
, &route
);
630 tb_err(tb
, "failed to update route string for switch at %u.%u\n",
636 route
= tb_route(sw
);
639 update_switch(parent_sw
, sw
, route
, pkg
->connection_id
,
640 pkg
->connection_key
, link
, depth
, boot
);
646 * User connected the same switch to another physical
647 * port or to another part of the topology. Remove the
648 * existing switch now before adding the new one.
655 * If the switch was not found by UUID, look for a switch on
656 * same physical port (taking possible link aggregation into
657 * account) and depth. If we found one it is definitely a stale
658 * one so remove it first.
660 sw
= tb_switch_find_by_link_depth(tb
, link
, depth
);
664 dual_link
= dual_link_from_link(link
);
666 sw
= tb_switch_find_by_link_depth(tb
, dual_link
, depth
);
673 /* Remove existing XDomain connection if found */
674 xd
= tb_xdomain_find_by_link_depth(tb
, link
, depth
);
680 parent_sw
= tb_switch_find_by_link_depth(tb
, link
, depth
- 1);
682 tb_err(tb
, "failed to find parent switch for %u.%u\n",
687 ret
= icm
->get_route(tb
, link
, depth
, &route
);
689 tb_err(tb
, "failed to find route string for switch at %u.%u\n",
691 tb_switch_put(parent_sw
);
695 add_switch(parent_sw
, route
, &pkg
->ep_uuid
, (const u8
*)pkg
->ep_name
,
696 sizeof(pkg
->ep_name
), pkg
->connection_id
,
697 pkg
->connection_key
, link
, depth
, security_level
,
700 tb_switch_put(parent_sw
);
704 icm_fr_device_disconnected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
706 const struct icm_fr_event_device_disconnected
*pkg
=
707 (const struct icm_fr_event_device_disconnected
*)hdr
;
708 struct tb_switch
*sw
;
711 link
= pkg
->link_info
& ICM_LINK_INFO_LINK_MASK
;
712 depth
= (pkg
->link_info
& ICM_LINK_INFO_DEPTH_MASK
) >>
713 ICM_LINK_INFO_DEPTH_SHIFT
;
715 if (link
> ICM_MAX_LINK
|| depth
> ICM_MAX_DEPTH
) {
716 tb_warn(tb
, "invalid topology %u.%u, ignoring\n", link
, depth
);
720 sw
= tb_switch_find_by_link_depth(tb
, link
, depth
);
722 tb_warn(tb
, "no switch exists at %u.%u, ignoring\n", link
,
732 icm_fr_xdomain_connected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
734 const struct icm_fr_event_xdomain_connected
*pkg
=
735 (const struct icm_fr_event_xdomain_connected
*)hdr
;
736 struct tb_xdomain
*xd
;
737 struct tb_switch
*sw
;
743 * After NVM upgrade adding root switch device fails because we
744 * initiated reset. During that time ICM might still send
745 * XDomain connected message which we ignore here.
747 if (!tb
->root_switch
)
750 link
= pkg
->link_info
& ICM_LINK_INFO_LINK_MASK
;
751 depth
= (pkg
->link_info
& ICM_LINK_INFO_DEPTH_MASK
) >>
752 ICM_LINK_INFO_DEPTH_SHIFT
;
753 approved
= pkg
->link_info
& ICM_LINK_INFO_APPROVED
;
755 if (link
> ICM_MAX_LINK
|| depth
> ICM_MAX_DEPTH
) {
756 tb_warn(tb
, "invalid topology %u.%u, ignoring\n", link
, depth
);
760 route
= get_route(pkg
->local_route_hi
, pkg
->local_route_lo
);
762 xd
= tb_xdomain_find_by_uuid(tb
, &pkg
->remote_uuid
);
764 u8 xd_phy_port
, phy_port
;
766 xd_phy_port
= phy_port_from_route(xd
->route
, xd
->depth
);
767 phy_port
= phy_port_from_route(route
, depth
);
769 if (xd
->depth
== depth
&& xd_phy_port
== phy_port
) {
770 update_xdomain(xd
, route
, link
);
776 * If we find an existing XDomain connection remove it
777 * now. We need to go through login handshake and
778 * everything anyway to be able to re-establish the
786 * Look if there already exists an XDomain in the same place
787 * than the new one and in that case remove it because it is
788 * most likely another host that got disconnected.
790 xd
= tb_xdomain_find_by_link_depth(tb
, link
, depth
);
794 dual_link
= dual_link_from_link(link
);
796 xd
= tb_xdomain_find_by_link_depth(tb
, dual_link
,
805 * If the user disconnected a switch during suspend and
806 * connected another host to the same port, remove the switch
809 sw
= get_switch_at_route(tb
->root_switch
, route
);
813 sw
= tb_switch_find_by_link_depth(tb
, link
, depth
);
815 tb_warn(tb
, "no switch exists at %u.%u, ignoring\n", link
,
820 add_xdomain(sw
, route
, &pkg
->local_uuid
, &pkg
->remote_uuid
, link
,
826 icm_fr_xdomain_disconnected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
828 const struct icm_fr_event_xdomain_disconnected
*pkg
=
829 (const struct icm_fr_event_xdomain_disconnected
*)hdr
;
830 struct tb_xdomain
*xd
;
833 * If the connection is through one or multiple devices, the
834 * XDomain device is removed along with them so it is fine if we
835 * cannot find it here.
837 xd
= tb_xdomain_find_by_uuid(tb
, &pkg
->remote_uuid
);
845 icm_tr_driver_ready(struct tb
*tb
, enum tb_security_level
*security_level
,
846 size_t *nboot_acl
, bool *rpm
)
848 struct icm_tr_pkg_driver_ready_response reply
;
849 struct icm_pkg_driver_ready request
= {
850 .hdr
.code
= ICM_DRIVER_READY
,
854 memset(&reply
, 0, sizeof(reply
));
855 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
861 *security_level
= reply
.info
& ICM_TR_INFO_SLEVEL_MASK
;
863 *nboot_acl
= (reply
.info
& ICM_TR_INFO_BOOT_ACL_MASK
) >>
864 ICM_TR_INFO_BOOT_ACL_SHIFT
;
866 *rpm
= !!(reply
.hdr
.flags
& ICM_TR_FLAGS_RTD3
);
871 static int icm_tr_approve_switch(struct tb
*tb
, struct tb_switch
*sw
)
873 struct icm_tr_pkg_approve_device request
;
874 struct icm_tr_pkg_approve_device reply
;
877 memset(&request
, 0, sizeof(request
));
878 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
879 request
.hdr
.code
= ICM_APPROVE_DEVICE
;
880 request
.route_lo
= sw
->config
.route_lo
;
881 request
.route_hi
= sw
->config
.route_hi
;
882 request
.connection_id
= sw
->connection_id
;
884 memset(&reply
, 0, sizeof(reply
));
885 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
886 1, ICM_APPROVE_TIMEOUT
);
890 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
) {
891 tb_warn(tb
, "PCIe tunnel creation failed\n");
898 static int icm_tr_add_switch_key(struct tb
*tb
, struct tb_switch
*sw
)
900 struct icm_tr_pkg_add_device_key_response reply
;
901 struct icm_tr_pkg_add_device_key request
;
904 memset(&request
, 0, sizeof(request
));
905 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
906 request
.hdr
.code
= ICM_ADD_DEVICE_KEY
;
907 request
.route_lo
= sw
->config
.route_lo
;
908 request
.route_hi
= sw
->config
.route_hi
;
909 request
.connection_id
= sw
->connection_id
;
910 memcpy(request
.key
, sw
->key
, TB_SWITCH_KEY_SIZE
);
912 memset(&reply
, 0, sizeof(reply
));
913 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
918 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
) {
919 tb_warn(tb
, "Adding key to switch failed\n");
926 static int icm_tr_challenge_switch_key(struct tb
*tb
, struct tb_switch
*sw
,
927 const u8
*challenge
, u8
*response
)
929 struct icm_tr_pkg_challenge_device_response reply
;
930 struct icm_tr_pkg_challenge_device request
;
933 memset(&request
, 0, sizeof(request
));
934 memcpy(&request
.ep_uuid
, sw
->uuid
, sizeof(request
.ep_uuid
));
935 request
.hdr
.code
= ICM_CHALLENGE_DEVICE
;
936 request
.route_lo
= sw
->config
.route_lo
;
937 request
.route_hi
= sw
->config
.route_hi
;
938 request
.connection_id
= sw
->connection_id
;
939 memcpy(request
.challenge
, challenge
, TB_SWITCH_KEY_SIZE
);
941 memset(&reply
, 0, sizeof(reply
));
942 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
947 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
948 return -EKEYREJECTED
;
949 if (reply
.hdr
.flags
& ICM_FLAGS_NO_KEY
)
952 memcpy(response
, reply
.response
, TB_SWITCH_KEY_SIZE
);
957 static int icm_tr_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
959 struct icm_tr_pkg_approve_xdomain_response reply
;
960 struct icm_tr_pkg_approve_xdomain request
;
963 memset(&request
, 0, sizeof(request
));
964 request
.hdr
.code
= ICM_APPROVE_XDOMAIN
;
965 request
.route_hi
= upper_32_bits(xd
->route
);
966 request
.route_lo
= lower_32_bits(xd
->route
);
967 request
.transmit_path
= xd
->transmit_path
;
968 request
.transmit_ring
= xd
->transmit_ring
;
969 request
.receive_path
= xd
->receive_path
;
970 request
.receive_ring
= xd
->receive_ring
;
971 memcpy(&request
.remote_uuid
, xd
->remote_uuid
, sizeof(*xd
->remote_uuid
));
973 memset(&reply
, 0, sizeof(reply
));
974 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
979 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
985 static int icm_tr_xdomain_tear_down(struct tb
*tb
, struct tb_xdomain
*xd
,
988 struct icm_tr_pkg_disconnect_xdomain_response reply
;
989 struct icm_tr_pkg_disconnect_xdomain request
;
992 memset(&request
, 0, sizeof(request
));
993 request
.hdr
.code
= ICM_DISCONNECT_XDOMAIN
;
994 request
.stage
= stage
;
995 request
.route_hi
= upper_32_bits(xd
->route
);
996 request
.route_lo
= lower_32_bits(xd
->route
);
997 memcpy(&request
.remote_uuid
, xd
->remote_uuid
, sizeof(*xd
->remote_uuid
));
999 memset(&reply
, 0, sizeof(reply
));
1000 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
1005 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
1011 static int icm_tr_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
1015 ret
= icm_tr_xdomain_tear_down(tb
, xd
, 1);
1019 usleep_range(10, 50);
1020 return icm_tr_xdomain_tear_down(tb
, xd
, 2);
1024 icm_tr_device_connected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
1026 const struct icm_tr_event_device_connected
*pkg
=
1027 (const struct icm_tr_event_device_connected
*)hdr
;
1028 enum tb_security_level security_level
;
1029 struct tb_switch
*sw
, *parent_sw
;
1030 struct tb_xdomain
*xd
;
1031 bool authorized
, boot
;
1035 * Currently we don't use the QoS information coming with the
1036 * device connected message so simply just ignore that extra
1039 if (pkg
->hdr
.packet_id
)
1043 * After NVM upgrade adding root switch device fails because we
1044 * initiated reset. During that time ICM might still send device
1045 * connected message which we ignore here.
1047 if (!tb
->root_switch
)
1050 route
= get_route(pkg
->route_hi
, pkg
->route_lo
);
1051 authorized
= pkg
->link_info
& ICM_LINK_INFO_APPROVED
;
1052 security_level
= (pkg
->hdr
.flags
& ICM_FLAGS_SLEVEL_MASK
) >>
1053 ICM_FLAGS_SLEVEL_SHIFT
;
1054 boot
= pkg
->link_info
& ICM_LINK_INFO_BOOT
;
1056 if (pkg
->link_info
& ICM_LINK_INFO_REJECTED
) {
1057 tb_info(tb
, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
1062 sw
= tb_switch_find_by_uuid(tb
, &pkg
->ep_uuid
);
1064 /* Update the switch if it is still in the same place */
1065 if (tb_route(sw
) == route
&& !!sw
->authorized
== authorized
) {
1066 parent_sw
= tb_to_switch(sw
->dev
.parent
);
1067 update_switch(parent_sw
, sw
, route
, pkg
->connection_id
,
1077 /* Another switch with the same address */
1078 sw
= tb_switch_find_by_route(tb
, route
);
1084 /* XDomain connection with the same address */
1085 xd
= tb_xdomain_find_by_route(tb
, route
);
1091 parent_sw
= tb_switch_find_by_route(tb
, get_parent_route(route
));
1093 tb_err(tb
, "failed to find parent switch for %llx\n", route
);
1097 add_switch(parent_sw
, route
, &pkg
->ep_uuid
, (const u8
*)pkg
->ep_name
,
1098 sizeof(pkg
->ep_name
), pkg
->connection_id
,
1099 0, 0, 0, security_level
, authorized
, boot
);
1101 tb_switch_put(parent_sw
);
1105 icm_tr_device_disconnected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
1107 const struct icm_tr_event_device_disconnected
*pkg
=
1108 (const struct icm_tr_event_device_disconnected
*)hdr
;
1109 struct tb_switch
*sw
;
1112 route
= get_route(pkg
->route_hi
, pkg
->route_lo
);
1114 sw
= tb_switch_find_by_route(tb
, route
);
1116 tb_warn(tb
, "no switch exists at %llx, ignoring\n", route
);
1125 icm_tr_xdomain_connected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
1127 const struct icm_tr_event_xdomain_connected
*pkg
=
1128 (const struct icm_tr_event_xdomain_connected
*)hdr
;
1129 struct tb_xdomain
*xd
;
1130 struct tb_switch
*sw
;
1133 if (!tb
->root_switch
)
1136 route
= get_route(pkg
->local_route_hi
, pkg
->local_route_lo
);
1138 xd
= tb_xdomain_find_by_uuid(tb
, &pkg
->remote_uuid
);
1140 if (xd
->route
== route
) {
1141 update_xdomain(xd
, route
, 0);
1150 /* An existing xdomain with the same address */
1151 xd
= tb_xdomain_find_by_route(tb
, route
);
1158 * If the user disconnected a switch during suspend and
1159 * connected another host to the same port, remove the switch
1162 sw
= get_switch_at_route(tb
->root_switch
, route
);
1166 sw
= tb_switch_find_by_route(tb
, get_parent_route(route
));
1168 tb_warn(tb
, "no switch exists at %llx, ignoring\n", route
);
1172 add_xdomain(sw
, route
, &pkg
->local_uuid
, &pkg
->remote_uuid
, 0, 0);
1177 icm_tr_xdomain_disconnected(struct tb
*tb
, const struct icm_pkg_header
*hdr
)
1179 const struct icm_tr_event_xdomain_disconnected
*pkg
=
1180 (const struct icm_tr_event_xdomain_disconnected
*)hdr
;
1181 struct tb_xdomain
*xd
;
1184 route
= get_route(pkg
->route_hi
, pkg
->route_lo
);
1186 xd
= tb_xdomain_find_by_route(tb
, route
);
1193 static struct pci_dev
*get_upstream_port(struct pci_dev
*pdev
)
1195 struct pci_dev
*parent
;
1197 parent
= pci_upstream_bridge(pdev
);
1199 if (!pci_is_pcie(parent
))
1201 if (pci_pcie_type(parent
) == PCI_EXP_TYPE_UPSTREAM
)
1203 parent
= pci_upstream_bridge(parent
);
1209 switch (parent
->device
) {
1210 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE
:
1211 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE
:
1212 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE
:
1213 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE
:
1214 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE
:
1221 static bool icm_ar_is_supported(struct tb
*tb
)
1223 struct pci_dev
*upstream_port
;
1224 struct icm
*icm
= tb_priv(tb
);
1227 * Starting from Alpine Ridge we can use ICM on Apple machines
1228 * as well. We just need to reset and re-enable it first.
1230 if (!x86_apple_machine
)
1234 * Find the upstream PCIe port in case we need to do reset
1235 * through its vendor specific registers.
1237 upstream_port
= get_upstream_port(tb
->nhi
->pdev
);
1238 if (upstream_port
) {
1241 cap
= pci_find_ext_capability(upstream_port
,
1242 PCI_EXT_CAP_ID_VNDR
);
1244 icm
->upstream_port
= upstream_port
;
1254 static int icm_ar_get_mode(struct tb
*tb
)
1256 struct tb_nhi
*nhi
= tb
->nhi
;
1261 val
= ioread32(nhi
->iobase
+ REG_FW_STS
);
1262 if (val
& REG_FW_STS_NVM_AUTH_DONE
)
1265 } while (--retries
);
1268 dev_err(&nhi
->pdev
->dev
, "ICM firmware not authenticated\n");
1272 return nhi_mailbox_mode(nhi
);
1276 icm_ar_driver_ready(struct tb
*tb
, enum tb_security_level
*security_level
,
1277 size_t *nboot_acl
, bool *rpm
)
1279 struct icm_ar_pkg_driver_ready_response reply
;
1280 struct icm_pkg_driver_ready request
= {
1281 .hdr
.code
= ICM_DRIVER_READY
,
1285 memset(&reply
, 0, sizeof(reply
));
1286 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
1292 *security_level
= reply
.info
& ICM_AR_INFO_SLEVEL_MASK
;
1293 if (nboot_acl
&& (reply
.info
& ICM_AR_INFO_BOOT_ACL_SUPPORTED
))
1294 *nboot_acl
= (reply
.info
& ICM_AR_INFO_BOOT_ACL_MASK
) >>
1295 ICM_AR_INFO_BOOT_ACL_SHIFT
;
1297 *rpm
= !!(reply
.hdr
.flags
& ICM_AR_FLAGS_RTD3
);
1302 static int icm_ar_get_route(struct tb
*tb
, u8 link
, u8 depth
, u64
*route
)
1304 struct icm_ar_pkg_get_route_response reply
;
1305 struct icm_ar_pkg_get_route request
= {
1306 .hdr
= { .code
= ICM_GET_ROUTE
},
1307 .link_info
= depth
<< ICM_LINK_INFO_DEPTH_SHIFT
| link
,
1311 memset(&reply
, 0, sizeof(reply
));
1312 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
1317 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
1320 *route
= get_route(reply
.route_hi
, reply
.route_lo
);
1324 static int icm_ar_get_boot_acl(struct tb
*tb
, uuid_t
*uuids
, size_t nuuids
)
1326 struct icm_ar_pkg_preboot_acl_response reply
;
1327 struct icm_ar_pkg_preboot_acl request
= {
1328 .hdr
= { .code
= ICM_PREBOOT_ACL
},
1332 memset(&reply
, 0, sizeof(reply
));
1333 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
1338 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
1341 for (i
= 0; i
< nuuids
; i
++) {
1342 u32
*uuid
= (u32
*)&uuids
[i
];
1344 uuid
[0] = reply
.acl
[i
].uuid_lo
;
1345 uuid
[1] = reply
.acl
[i
].uuid_hi
;
1347 if (uuid
[0] == 0xffffffff && uuid
[1] == 0xffffffff) {
1348 /* Map empty entries to null UUID */
1351 } else if (uuid
[0] != 0 || uuid
[1] != 0) {
1352 /* Upper two DWs are always one's */
1353 uuid
[2] = 0xffffffff;
1354 uuid
[3] = 0xffffffff;
1361 static int icm_ar_set_boot_acl(struct tb
*tb
, const uuid_t
*uuids
,
1364 struct icm_ar_pkg_preboot_acl_response reply
;
1365 struct icm_ar_pkg_preboot_acl request
= {
1367 .code
= ICM_PREBOOT_ACL
,
1368 .flags
= ICM_FLAGS_WRITE
,
1373 for (i
= 0; i
< nuuids
; i
++) {
1374 const u32
*uuid
= (const u32
*)&uuids
[i
];
1376 if (uuid_is_null(&uuids
[i
])) {
1378 * Map null UUID to the empty (all one) entries
1381 request
.acl
[i
].uuid_lo
= 0xffffffff;
1382 request
.acl
[i
].uuid_hi
= 0xffffffff;
1384 /* Two high DWs need to be set to all one */
1385 if (uuid
[2] != 0xffffffff || uuid
[3] != 0xffffffff)
1388 request
.acl
[i
].uuid_lo
= uuid
[0];
1389 request
.acl
[i
].uuid_hi
= uuid
[1];
1393 memset(&reply
, 0, sizeof(reply
));
1394 ret
= icm_request(tb
, &request
, sizeof(request
), &reply
, sizeof(reply
),
1399 if (reply
.hdr
.flags
& ICM_FLAGS_ERROR
)
1405 static void icm_handle_notification(struct work_struct
*work
)
1407 struct icm_notification
*n
= container_of(work
, typeof(*n
), work
);
1408 struct tb
*tb
= n
->tb
;
1409 struct icm
*icm
= tb_priv(tb
);
1411 mutex_lock(&tb
->lock
);
1413 switch (n
->pkg
->code
) {
1414 case ICM_EVENT_DEVICE_CONNECTED
:
1415 icm
->device_connected(tb
, n
->pkg
);
1417 case ICM_EVENT_DEVICE_DISCONNECTED
:
1418 icm
->device_disconnected(tb
, n
->pkg
);
1420 case ICM_EVENT_XDOMAIN_CONNECTED
:
1421 icm
->xdomain_connected(tb
, n
->pkg
);
1423 case ICM_EVENT_XDOMAIN_DISCONNECTED
:
1424 icm
->xdomain_disconnected(tb
, n
->pkg
);
1428 mutex_unlock(&tb
->lock
);
1434 static void icm_handle_event(struct tb
*tb
, enum tb_cfg_pkg_type type
,
1435 const void *buf
, size_t size
)
1437 struct icm_notification
*n
;
1439 n
= kmalloc(sizeof(*n
), GFP_KERNEL
);
1443 INIT_WORK(&n
->work
, icm_handle_notification
);
1444 n
->pkg
= kmemdup(buf
, size
, GFP_KERNEL
);
1447 queue_work(tb
->wq
, &n
->work
);
1451 __icm_driver_ready(struct tb
*tb
, enum tb_security_level
*security_level
,
1452 size_t *nboot_acl
, bool *rpm
)
1454 struct icm
*icm
= tb_priv(tb
);
1455 unsigned int retries
= 50;
1458 ret
= icm
->driver_ready(tb
, security_level
, nboot_acl
, rpm
);
1460 tb_err(tb
, "failed to send driver ready to ICM\n");
1465 * Hold on here until the switch config space is accessible so
1466 * that we can read root switch config successfully.
1469 struct tb_cfg_result res
;
1472 res
= tb_cfg_read_raw(tb
->ctl
, &tmp
, 0, 0, TB_CFG_SWITCH
,
1478 } while (--retries
);
1480 tb_err(tb
, "failed to read root switch config space, giving up\n");
1484 static int pci2cio_wait_completion(struct icm
*icm
, unsigned long timeout_msec
)
1486 unsigned long end
= jiffies
+ msecs_to_jiffies(timeout_msec
);
1490 pci_read_config_dword(icm
->upstream_port
,
1491 icm
->vnd_cap
+ PCIE2CIO_CMD
, &cmd
);
1492 if (!(cmd
& PCIE2CIO_CMD_START
)) {
1493 if (cmd
& PCIE2CIO_CMD_TIMEOUT
)
1499 } while (time_before(jiffies
, end
));
1504 static int pcie2cio_read(struct icm
*icm
, enum tb_cfg_space cs
,
1505 unsigned int port
, unsigned int index
, u32
*data
)
1507 struct pci_dev
*pdev
= icm
->upstream_port
;
1508 int ret
, vnd_cap
= icm
->vnd_cap
;
1512 cmd
|= (port
<< PCIE2CIO_CMD_PORT_SHIFT
) & PCIE2CIO_CMD_PORT_MASK
;
1513 cmd
|= (cs
<< PCIE2CIO_CMD_CS_SHIFT
) & PCIE2CIO_CMD_CS_MASK
;
1514 cmd
|= PCIE2CIO_CMD_START
;
1515 pci_write_config_dword(pdev
, vnd_cap
+ PCIE2CIO_CMD
, cmd
);
1517 ret
= pci2cio_wait_completion(icm
, 5000);
1521 pci_read_config_dword(pdev
, vnd_cap
+ PCIE2CIO_RDDATA
, data
);
1525 static int pcie2cio_write(struct icm
*icm
, enum tb_cfg_space cs
,
1526 unsigned int port
, unsigned int index
, u32 data
)
1528 struct pci_dev
*pdev
= icm
->upstream_port
;
1529 int vnd_cap
= icm
->vnd_cap
;
1532 pci_write_config_dword(pdev
, vnd_cap
+ PCIE2CIO_WRDATA
, data
);
1535 cmd
|= (port
<< PCIE2CIO_CMD_PORT_SHIFT
) & PCIE2CIO_CMD_PORT_MASK
;
1536 cmd
|= (cs
<< PCIE2CIO_CMD_CS_SHIFT
) & PCIE2CIO_CMD_CS_MASK
;
1537 cmd
|= PCIE2CIO_CMD_WRITE
| PCIE2CIO_CMD_START
;
1538 pci_write_config_dword(pdev
, vnd_cap
+ PCIE2CIO_CMD
, cmd
);
1540 return pci2cio_wait_completion(icm
, 5000);
1543 static int icm_firmware_reset(struct tb
*tb
, struct tb_nhi
*nhi
)
1545 struct icm
*icm
= tb_priv(tb
);
1548 if (!icm
->upstream_port
)
1551 /* Put ARC to wait for CIO reset event to happen */
1552 val
= ioread32(nhi
->iobase
+ REG_FW_STS
);
1553 val
|= REG_FW_STS_CIO_RESET_REQ
;
1554 iowrite32(val
, nhi
->iobase
+ REG_FW_STS
);
1557 val
= ioread32(nhi
->iobase
+ REG_FW_STS
);
1558 val
|= REG_FW_STS_ICM_EN_INVERT
;
1559 val
|= REG_FW_STS_ICM_EN_CPU
;
1560 iowrite32(val
, nhi
->iobase
+ REG_FW_STS
);
1562 /* Trigger CIO reset now */
1563 return pcie2cio_write(icm
, TB_CFG_SWITCH
, 0, 0x50, BIT(9));
1566 static int icm_firmware_start(struct tb
*tb
, struct tb_nhi
*nhi
)
1568 unsigned int retries
= 10;
1572 /* Check if the ICM firmware is already running */
1573 val
= ioread32(nhi
->iobase
+ REG_FW_STS
);
1574 if (val
& REG_FW_STS_ICM_EN
)
1577 dev_info(&nhi
->pdev
->dev
, "starting ICM firmware\n");
1579 ret
= icm_firmware_reset(tb
, nhi
);
1583 /* Wait until the ICM firmware tells us it is up and running */
1585 /* Check that the ICM firmware is running */
1586 val
= ioread32(nhi
->iobase
+ REG_FW_STS
);
1587 if (val
& REG_FW_STS_NVM_AUTH_DONE
)
1591 } while (--retries
);
1596 static int icm_reset_phy_port(struct tb
*tb
, int phy_port
)
1598 struct icm
*icm
= tb_priv(tb
);
1604 if (!icm
->upstream_port
)
1616 * Read link status of both null ports belonging to a single
1619 ret
= pcie2cio_read(icm
, TB_CFG_PORT
, port0
, PHY_PORT_CS1
, &val0
);
1622 ret
= pcie2cio_read(icm
, TB_CFG_PORT
, port1
, PHY_PORT_CS1
, &val1
);
1626 state0
= val0
& PHY_PORT_CS1_LINK_STATE_MASK
;
1627 state0
>>= PHY_PORT_CS1_LINK_STATE_SHIFT
;
1628 state1
= val1
& PHY_PORT_CS1_LINK_STATE_MASK
;
1629 state1
>>= PHY_PORT_CS1_LINK_STATE_SHIFT
;
1631 /* If they are both up we need to reset them now */
1632 if (state0
!= TB_PORT_UP
|| state1
!= TB_PORT_UP
)
1635 val0
|= PHY_PORT_CS1_LINK_DISABLE
;
1636 ret
= pcie2cio_write(icm
, TB_CFG_PORT
, port0
, PHY_PORT_CS1
, val0
);
1640 val1
|= PHY_PORT_CS1_LINK_DISABLE
;
1641 ret
= pcie2cio_write(icm
, TB_CFG_PORT
, port1
, PHY_PORT_CS1
, val1
);
1645 /* Wait a bit and then re-enable both ports */
1646 usleep_range(10, 100);
1648 ret
= pcie2cio_read(icm
, TB_CFG_PORT
, port0
, PHY_PORT_CS1
, &val0
);
1651 ret
= pcie2cio_read(icm
, TB_CFG_PORT
, port1
, PHY_PORT_CS1
, &val1
);
1655 val0
&= ~PHY_PORT_CS1_LINK_DISABLE
;
1656 ret
= pcie2cio_write(icm
, TB_CFG_PORT
, port0
, PHY_PORT_CS1
, val0
);
1660 val1
&= ~PHY_PORT_CS1_LINK_DISABLE
;
1661 return pcie2cio_write(icm
, TB_CFG_PORT
, port1
, PHY_PORT_CS1
, val1
);
1664 static int icm_firmware_init(struct tb
*tb
)
1666 struct icm
*icm
= tb_priv(tb
);
1667 struct tb_nhi
*nhi
= tb
->nhi
;
1670 ret
= icm_firmware_start(tb
, nhi
);
1672 dev_err(&nhi
->pdev
->dev
, "could not start ICM firmware\n");
1676 if (icm
->get_mode
) {
1677 ret
= icm
->get_mode(tb
);
1680 case NHI_FW_SAFE_MODE
:
1681 icm
->safe_mode
= true;
1684 case NHI_FW_CM_MODE
:
1685 /* Ask ICM to accept all Thunderbolt devices */
1686 nhi_mailbox_cmd(nhi
, NHI_MAILBOX_ALLOW_ALL_DEVS
, 0);
1693 tb_err(tb
, "ICM firmware is in wrong mode: %u\n", ret
);
1699 * Reset both physical ports if there is anything connected to
1702 ret
= icm_reset_phy_port(tb
, 0);
1704 dev_warn(&nhi
->pdev
->dev
, "failed to reset links on port0\n");
1705 ret
= icm_reset_phy_port(tb
, 1);
1707 dev_warn(&nhi
->pdev
->dev
, "failed to reset links on port1\n");
1712 static int icm_driver_ready(struct tb
*tb
)
1714 struct icm
*icm
= tb_priv(tb
);
1717 ret
= icm_firmware_init(tb
);
1721 if (icm
->safe_mode
) {
1722 tb_info(tb
, "Thunderbolt host controller is in safe mode.\n");
1723 tb_info(tb
, "You need to update NVM firmware of the controller before it can be used.\n");
1724 tb_info(tb
, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1728 ret
= __icm_driver_ready(tb
, &tb
->security_level
, &tb
->nboot_acl
,
1734 * Make sure the number of supported preboot ACL matches what we
1735 * expect or disable the whole feature.
1737 if (tb
->nboot_acl
> icm
->max_boot_acl
)
1743 static int icm_suspend(struct tb
*tb
)
1745 struct icm
*icm
= tb_priv(tb
);
1747 if (icm
->save_devices
)
1748 icm
->save_devices(tb
);
1750 nhi_mailbox_cmd(tb
->nhi
, NHI_MAILBOX_DRV_UNLOADS
, 0);
1755 * Mark all switches (except root switch) below this one unplugged. ICM
1756 * firmware will send us an updated list of switches after we have send
1757 * it driver ready command. If a switch is not in that list it will be
1758 * removed when we perform rescan.
1760 static void icm_unplug_children(struct tb_switch
*sw
)
1765 sw
->is_unplugged
= true;
1767 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
1768 struct tb_port
*port
= &sw
->ports
[i
];
1770 if (tb_is_upstream_port(port
))
1772 if (port
->xdomain
) {
1773 port
->xdomain
->is_unplugged
= true;
1779 icm_unplug_children(port
->remote
->sw
);
1783 static void icm_free_unplugged_children(struct tb_switch
*sw
)
1787 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
1788 struct tb_port
*port
= &sw
->ports
[i
];
1790 if (tb_is_upstream_port(port
))
1793 if (port
->xdomain
&& port
->xdomain
->is_unplugged
) {
1794 tb_xdomain_remove(port
->xdomain
);
1795 port
->xdomain
= NULL
;
1802 if (port
->remote
->sw
->is_unplugged
) {
1803 tb_switch_remove(port
->remote
->sw
);
1804 port
->remote
= NULL
;
1806 icm_free_unplugged_children(port
->remote
->sw
);
1811 static void icm_rescan_work(struct work_struct
*work
)
1813 struct icm
*icm
= container_of(work
, struct icm
, rescan_work
.work
);
1814 struct tb
*tb
= icm_to_tb(icm
);
1816 mutex_lock(&tb
->lock
);
1817 if (tb
->root_switch
)
1818 icm_free_unplugged_children(tb
->root_switch
);
1819 mutex_unlock(&tb
->lock
);
1822 static void icm_complete(struct tb
*tb
)
1824 struct icm
*icm
= tb_priv(tb
);
1826 if (tb
->nhi
->going_away
)
1829 icm_unplug_children(tb
->root_switch
);
1832 * Now all existing children should be resumed, start events
1833 * from ICM to get updated status.
1835 __icm_driver_ready(tb
, NULL
, NULL
, NULL
);
1838 * We do not get notifications of devices that have been
1839 * unplugged during suspend so schedule rescan to clean them up
1842 queue_delayed_work(tb
->wq
, &icm
->rescan_work
, msecs_to_jiffies(500));
1845 static int icm_runtime_suspend(struct tb
*tb
)
1847 nhi_mailbox_cmd(tb
->nhi
, NHI_MAILBOX_DRV_UNLOADS
, 0);
1851 static int icm_runtime_resume(struct tb
*tb
)
1854 * We can reuse the same resume functionality than with system
1861 static int icm_start(struct tb
*tb
)
1863 struct icm
*icm
= tb_priv(tb
);
1867 tb
->root_switch
= tb_switch_alloc_safe_mode(tb
, &tb
->dev
, 0);
1869 tb
->root_switch
= tb_switch_alloc(tb
, &tb
->dev
, 0);
1870 if (!tb
->root_switch
)
1874 * NVM upgrade has not been tested on Apple systems and they
1875 * don't provide images publicly either. To be on the safe side
1876 * prevent root switch NVM upgrade on Macs for now.
1878 tb
->root_switch
->no_nvm_upgrade
= x86_apple_machine
;
1879 tb
->root_switch
->rpm
= icm
->rpm
;
1881 ret
= tb_switch_add(tb
->root_switch
);
1883 tb_switch_put(tb
->root_switch
);
1884 tb
->root_switch
= NULL
;
1890 static void icm_stop(struct tb
*tb
)
1892 struct icm
*icm
= tb_priv(tb
);
1894 cancel_delayed_work(&icm
->rescan_work
);
1895 tb_switch_remove(tb
->root_switch
);
1896 tb
->root_switch
= NULL
;
1897 nhi_mailbox_cmd(tb
->nhi
, NHI_MAILBOX_DRV_UNLOADS
, 0);
1900 static int icm_disconnect_pcie_paths(struct tb
*tb
)
1902 return nhi_mailbox_cmd(tb
->nhi
, NHI_MAILBOX_DISCONNECT_PCIE_PATHS
, 0);
1906 static const struct tb_cm_ops icm_fr_ops
= {
1907 .driver_ready
= icm_driver_ready
,
1910 .suspend
= icm_suspend
,
1911 .complete
= icm_complete
,
1912 .handle_event
= icm_handle_event
,
1913 .approve_switch
= icm_fr_approve_switch
,
1914 .add_switch_key
= icm_fr_add_switch_key
,
1915 .challenge_switch_key
= icm_fr_challenge_switch_key
,
1916 .disconnect_pcie_paths
= icm_disconnect_pcie_paths
,
1917 .approve_xdomain_paths
= icm_fr_approve_xdomain_paths
,
1918 .disconnect_xdomain_paths
= icm_fr_disconnect_xdomain_paths
,
1922 static const struct tb_cm_ops icm_ar_ops
= {
1923 .driver_ready
= icm_driver_ready
,
1926 .suspend
= icm_suspend
,
1927 .complete
= icm_complete
,
1928 .runtime_suspend
= icm_runtime_suspend
,
1929 .runtime_resume
= icm_runtime_resume
,
1930 .handle_event
= icm_handle_event
,
1931 .get_boot_acl
= icm_ar_get_boot_acl
,
1932 .set_boot_acl
= icm_ar_set_boot_acl
,
1933 .approve_switch
= icm_fr_approve_switch
,
1934 .add_switch_key
= icm_fr_add_switch_key
,
1935 .challenge_switch_key
= icm_fr_challenge_switch_key
,
1936 .disconnect_pcie_paths
= icm_disconnect_pcie_paths
,
1937 .approve_xdomain_paths
= icm_fr_approve_xdomain_paths
,
1938 .disconnect_xdomain_paths
= icm_fr_disconnect_xdomain_paths
,
1942 static const struct tb_cm_ops icm_tr_ops
= {
1943 .driver_ready
= icm_driver_ready
,
1946 .suspend
= icm_suspend
,
1947 .complete
= icm_complete
,
1948 .runtime_suspend
= icm_runtime_suspend
,
1949 .runtime_resume
= icm_runtime_resume
,
1950 .handle_event
= icm_handle_event
,
1951 .get_boot_acl
= icm_ar_get_boot_acl
,
1952 .set_boot_acl
= icm_ar_set_boot_acl
,
1953 .approve_switch
= icm_tr_approve_switch
,
1954 .add_switch_key
= icm_tr_add_switch_key
,
1955 .challenge_switch_key
= icm_tr_challenge_switch_key
,
1956 .disconnect_pcie_paths
= icm_disconnect_pcie_paths
,
1957 .approve_xdomain_paths
= icm_tr_approve_xdomain_paths
,
1958 .disconnect_xdomain_paths
= icm_tr_disconnect_xdomain_paths
,
1961 struct tb
*icm_probe(struct tb_nhi
*nhi
)
1966 tb
= tb_domain_alloc(nhi
, sizeof(struct icm
));
1971 INIT_DELAYED_WORK(&icm
->rescan_work
, icm_rescan_work
);
1972 mutex_init(&icm
->request_lock
);
1974 switch (nhi
->pdev
->device
) {
1975 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI
:
1976 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI
:
1977 icm
->is_supported
= icm_fr_is_supported
;
1978 icm
->get_route
= icm_fr_get_route
;
1979 icm
->save_devices
= icm_fr_save_devices
;
1980 icm
->driver_ready
= icm_fr_driver_ready
;
1981 icm
->device_connected
= icm_fr_device_connected
;
1982 icm
->device_disconnected
= icm_fr_device_disconnected
;
1983 icm
->xdomain_connected
= icm_fr_xdomain_connected
;
1984 icm
->xdomain_disconnected
= icm_fr_xdomain_disconnected
;
1985 tb
->cm_ops
= &icm_fr_ops
;
1988 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI
:
1989 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI
:
1990 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI
:
1991 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI
:
1992 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI
:
1993 icm
->max_boot_acl
= ICM_AR_PREBOOT_ACL_ENTRIES
;
1994 icm
->is_supported
= icm_ar_is_supported
;
1995 icm
->get_mode
= icm_ar_get_mode
;
1996 icm
->get_route
= icm_ar_get_route
;
1997 icm
->save_devices
= icm_fr_save_devices
;
1998 icm
->driver_ready
= icm_ar_driver_ready
;
1999 icm
->device_connected
= icm_fr_device_connected
;
2000 icm
->device_disconnected
= icm_fr_device_disconnected
;
2001 icm
->xdomain_connected
= icm_fr_xdomain_connected
;
2002 icm
->xdomain_disconnected
= icm_fr_xdomain_disconnected
;
2003 tb
->cm_ops
= &icm_ar_ops
;
2006 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI
:
2007 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI
:
2008 icm
->max_boot_acl
= ICM_AR_PREBOOT_ACL_ENTRIES
;
2009 icm
->is_supported
= icm_ar_is_supported
;
2010 icm
->get_mode
= icm_ar_get_mode
;
2011 icm
->driver_ready
= icm_tr_driver_ready
;
2012 icm
->device_connected
= icm_tr_device_connected
;
2013 icm
->device_disconnected
= icm_tr_device_disconnected
;
2014 icm
->xdomain_connected
= icm_tr_xdomain_connected
;
2015 icm
->xdomain_disconnected
= icm_tr_xdomain_disconnected
;
2016 tb
->cm_ops
= &icm_tr_ops
;
2020 if (!icm
->is_supported
|| !icm
->is_supported(tb
)) {
2021 dev_dbg(&nhi
->pdev
->dev
, "ICM not supported on this controller\n");