]>
Commit | Line | Data |
---|---|---|
f67cf491 MW |
1 | /* |
2 | * Internal Thunderbolt Connection Manager. This is a firmware running on | |
3 | * the Thunderbolt host controller performing most of the low-level | |
4 | * handling. | |
5 | * | |
6 | * Copyright (C) 2017, Intel Corporation | |
7 | * Authors: Michael Jamet <michael.jamet@intel.com> | |
8 | * Mika Westerberg <mika.westerberg@linux.intel.com> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | */ | |
14 | ||
15 | #include <linux/delay.h> | |
f67cf491 MW |
16 | #include <linux/mutex.h> |
17 | #include <linux/pci.h> | |
2de98e05 | 18 | #include <linux/pm_runtime.h> |
630b3aff | 19 | #include <linux/platform_data/x86/apple.h> |
f67cf491 MW |
20 | #include <linux/sizes.h> |
21 | #include <linux/slab.h> | |
22 | #include <linux/workqueue.h> | |
23 | ||
24 | #include "ctl.h" | |
25 | #include "nhi_regs.h" | |
26 | #include "tb.h" | |
27 | ||
28 | #define PCIE2CIO_CMD 0x30 | |
29 | #define PCIE2CIO_CMD_TIMEOUT BIT(31) | |
30 | #define PCIE2CIO_CMD_START BIT(30) | |
31 | #define PCIE2CIO_CMD_WRITE BIT(21) | |
32 | #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19) | |
33 | #define PCIE2CIO_CMD_CS_SHIFT 19 | |
34 | #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13) | |
35 | #define PCIE2CIO_CMD_PORT_SHIFT 13 | |
36 | ||
37 | #define PCIE2CIO_WRDATA 0x34 | |
38 | #define PCIE2CIO_RDDATA 0x38 | |
39 | ||
40 | #define PHY_PORT_CS1 0x37 | |
41 | #define PHY_PORT_CS1_LINK_DISABLE BIT(14) | |
42 | #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26) | |
43 | #define PHY_PORT_CS1_LINK_STATE_SHIFT 26 | |
44 | ||
b2b6c2eb MW |
45 | #define ICM_TIMEOUT 5000 /* ms */ |
46 | #define ICM_APPROVE_TIMEOUT 10000 /* ms */ | |
f67cf491 MW |
47 | #define ICM_MAX_LINK 4 |
48 | #define ICM_MAX_DEPTH 6 | |
49 | ||
50 | /** | |
51 | * struct icm - Internal connection manager private data | |
52 | * @request_lock: Makes sure only one message is send to ICM at time | |
53 | * @rescan_work: Work used to rescan the surviving switches after resume | |
54 | * @upstream_port: Pointer to the PCIe upstream port this host | |
55 | * controller is connected. This is only set for systems | |
56 | * where ICM needs to be started manually | |
57 | * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides | |
58 | * (only set when @upstream_port is not %NULL) | |
e6b245cc | 59 | * @safe_mode: ICM is in safe mode |
afa11ff5 | 60 | * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) |
2de98e05 | 61 | * @rpm: Does the controller support runtime PM (RTD3) |
f67cf491 MW |
62 | * @is_supported: Checks if we can support ICM on this controller |
63 | * @get_mode: Read and return the ICM firmware mode (optional) | |
64 | * @get_route: Find a route string for given switch | |
fa3f20f6 | 65 | * @save_devices: Ask ICM to save devices to ACL when suspending (optional) |
c1c79d65 | 66 | * @driver_ready: Send driver ready message to ICM |
f67cf491 MW |
67 | * @device_connected: Handle device connected ICM message |
68 | * @device_disconnected: Handle device disconnected ICM message | |
d1ff7024 MW |
69 | * @xdomain_connected - Handle XDomain connected ICM message |
70 | * @xdomain_disconnected - Handle XDomain disconnected ICM message | |
f67cf491 MW |
71 | */ |
72 | struct icm { | |
73 | struct mutex request_lock; | |
74 | struct delayed_work rescan_work; | |
75 | struct pci_dev *upstream_port; | |
afa11ff5 | 76 | size_t max_boot_acl; |
f67cf491 | 77 | int vnd_cap; |
e6b245cc | 78 | bool safe_mode; |
2de98e05 | 79 | bool rpm; |
f67cf491 MW |
80 | bool (*is_supported)(struct tb *tb); |
81 | int (*get_mode)(struct tb *tb); | |
82 | int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); | |
fa3f20f6 | 83 | void (*save_devices)(struct tb *tb); |
c1c79d65 | 84 | int (*driver_ready)(struct tb *tb, |
afa11ff5 | 85 | enum tb_security_level *security_level, |
2de98e05 | 86 | size_t *nboot_acl, bool *rpm); |
f67cf491 MW |
87 | void (*device_connected)(struct tb *tb, |
88 | const struct icm_pkg_header *hdr); | |
89 | void (*device_disconnected)(struct tb *tb, | |
90 | const struct icm_pkg_header *hdr); | |
d1ff7024 MW |
91 | void (*xdomain_connected)(struct tb *tb, |
92 | const struct icm_pkg_header *hdr); | |
93 | void (*xdomain_disconnected)(struct tb *tb, | |
94 | const struct icm_pkg_header *hdr); | |
f67cf491 MW |
95 | }; |
96 | ||
97 | struct icm_notification { | |
98 | struct work_struct work; | |
99 | struct icm_pkg_header *pkg; | |
100 | struct tb *tb; | |
101 | }; | |
102 | ||
2de98e05 MW |
103 | struct ep_name_entry { |
104 | u8 len; | |
105 | u8 type; | |
106 | u8 data[0]; | |
107 | }; | |
108 | ||
109 | #define EP_NAME_INTEL_VSS 0x10 | |
110 | ||
111 | /* Intel Vendor specific structure */ | |
112 | struct intel_vss { | |
113 | u16 vendor; | |
114 | u16 model; | |
115 | u8 mc; | |
116 | u8 flags; | |
117 | u16 pci_devid; | |
118 | u32 nvm_version; | |
119 | }; | |
120 | ||
121 | #define INTEL_VSS_FLAGS_RTD3 BIT(0) | |
122 | ||
123 | static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size) | |
124 | { | |
125 | const void *end = ep_name + size; | |
126 | ||
127 | while (ep_name < end) { | |
128 | const struct ep_name_entry *ep = ep_name; | |
129 | ||
130 | if (!ep->len) | |
131 | break; | |
132 | if (ep_name + ep->len > end) | |
133 | break; | |
134 | ||
135 | if (ep->type == EP_NAME_INTEL_VSS) | |
136 | return (const struct intel_vss *)ep->data; | |
137 | ||
138 | ep_name += ep->len; | |
139 | } | |
140 | ||
141 | return NULL; | |
142 | } | |
143 | ||
f67cf491 MW |
144 | static inline struct tb *icm_to_tb(struct icm *icm) |
145 | { | |
146 | return ((void *)icm - sizeof(struct tb)); | |
147 | } | |
148 | ||
149 | static inline u8 phy_port_from_route(u64 route, u8 depth) | |
150 | { | |
d1ff7024 MW |
151 | u8 link; |
152 | ||
153 | link = depth ? route >> ((depth - 1) * 8) : route; | |
154 | return tb_phy_port_from_link(link); | |
f67cf491 MW |
155 | } |
156 | ||
157 | static inline u8 dual_link_from_link(u8 link) | |
158 | { | |
159 | return link ? ((link - 1) ^ 0x01) + 1 : 0; | |
160 | } | |
161 | ||
162 | static inline u64 get_route(u32 route_hi, u32 route_lo) | |
163 | { | |
164 | return (u64)route_hi << 32 | route_lo; | |
165 | } | |
166 | ||
d1d8b263 RM |
167 | static inline u64 get_parent_route(u64 route) |
168 | { | |
169 | int depth = tb_route_length(route); | |
170 | return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; | |
171 | } | |
172 | ||
f67cf491 MW |
173 | static bool icm_match(const struct tb_cfg_request *req, |
174 | const struct ctl_pkg *pkg) | |
175 | { | |
176 | const struct icm_pkg_header *res_hdr = pkg->buffer; | |
177 | const struct icm_pkg_header *req_hdr = req->request; | |
178 | ||
179 | if (pkg->frame.eof != req->response_type) | |
180 | return false; | |
181 | if (res_hdr->code != req_hdr->code) | |
182 | return false; | |
183 | ||
184 | return true; | |
185 | } | |
186 | ||
187 | static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) | |
188 | { | |
189 | const struct icm_pkg_header *hdr = pkg->buffer; | |
190 | ||
191 | if (hdr->packet_id < req->npackets) { | |
192 | size_t offset = hdr->packet_id * req->response_size; | |
193 | ||
194 | memcpy(req->response + offset, pkg->buffer, req->response_size); | |
195 | } | |
196 | ||
197 | return hdr->packet_id == hdr->total_packets - 1; | |
198 | } | |
199 | ||
200 | static int icm_request(struct tb *tb, const void *request, size_t request_size, | |
201 | void *response, size_t response_size, size_t npackets, | |
202 | unsigned int timeout_msec) | |
203 | { | |
204 | struct icm *icm = tb_priv(tb); | |
205 | int retries = 3; | |
206 | ||
207 | do { | |
208 | struct tb_cfg_request *req; | |
209 | struct tb_cfg_result res; | |
210 | ||
211 | req = tb_cfg_request_alloc(); | |
212 | if (!req) | |
213 | return -ENOMEM; | |
214 | ||
215 | req->match = icm_match; | |
216 | req->copy = icm_copy; | |
217 | req->request = request; | |
218 | req->request_size = request_size; | |
219 | req->request_type = TB_CFG_PKG_ICM_CMD; | |
220 | req->response = response; | |
221 | req->npackets = npackets; | |
222 | req->response_size = response_size; | |
223 | req->response_type = TB_CFG_PKG_ICM_RESP; | |
224 | ||
225 | mutex_lock(&icm->request_lock); | |
226 | res = tb_cfg_request_sync(tb->ctl, req, timeout_msec); | |
227 | mutex_unlock(&icm->request_lock); | |
228 | ||
229 | tb_cfg_request_put(req); | |
230 | ||
231 | if (res.err != -ETIMEDOUT) | |
232 | return res.err == 1 ? -EIO : res.err; | |
233 | ||
234 | usleep_range(20, 50); | |
235 | } while (retries--); | |
236 | ||
237 | return -ETIMEDOUT; | |
238 | } | |
239 | ||
240 | static bool icm_fr_is_supported(struct tb *tb) | |
241 | { | |
630b3aff | 242 | return !x86_apple_machine; |
f67cf491 MW |
243 | } |
244 | ||
245 | static inline int icm_fr_get_switch_index(u32 port) | |
246 | { | |
247 | int index; | |
248 | ||
249 | if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT) | |
250 | return 0; | |
251 | ||
252 | index = port >> ICM_PORT_INDEX_SHIFT; | |
253 | return index != 0xff ? index : 0; | |
254 | } | |
255 | ||
256 | static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) | |
257 | { | |
258 | struct icm_fr_pkg_get_topology_response *switches, *sw; | |
259 | struct icm_fr_pkg_get_topology request = { | |
260 | .hdr = { .code = ICM_GET_TOPOLOGY }, | |
261 | }; | |
262 | size_t npackets = ICM_GET_TOPOLOGY_PACKETS; | |
263 | int ret, index; | |
264 | u8 i; | |
265 | ||
266 | switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL); | |
267 | if (!switches) | |
268 | return -ENOMEM; | |
269 | ||
270 | ret = icm_request(tb, &request, sizeof(request), switches, | |
271 | sizeof(*switches), npackets, ICM_TIMEOUT); | |
272 | if (ret) | |
273 | goto err_free; | |
274 | ||
275 | sw = &switches[0]; | |
276 | index = icm_fr_get_switch_index(sw->ports[link]); | |
277 | if (!index) { | |
278 | ret = -ENODEV; | |
279 | goto err_free; | |
280 | } | |
281 | ||
282 | sw = &switches[index]; | |
283 | for (i = 1; i < depth; i++) { | |
284 | unsigned int j; | |
285 | ||
286 | if (!(sw->first_data & ICM_SWITCH_USED)) { | |
287 | ret = -ENODEV; | |
288 | goto err_free; | |
289 | } | |
290 | ||
291 | for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { | |
292 | index = icm_fr_get_switch_index(sw->ports[j]); | |
293 | if (index > sw->switch_index) { | |
294 | sw = &switches[index]; | |
295 | break; | |
296 | } | |
297 | } | |
298 | } | |
299 | ||
300 | *route = get_route(sw->route_hi, sw->route_lo); | |
301 | ||
302 | err_free: | |
303 | kfree(switches); | |
304 | return ret; | |
305 | } | |
306 | ||
fa3f20f6 MW |
307 | static void icm_fr_save_devices(struct tb *tb) |
308 | { | |
309 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); | |
310 | } | |
311 | ||
c1c79d65 | 312 | static int |
afa11ff5 | 313 | icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
2de98e05 | 314 | size_t *nboot_acl, bool *rpm) |
c1c79d65 MW |
315 | { |
316 | struct icm_fr_pkg_driver_ready_response reply; | |
317 | struct icm_pkg_driver_ready request = { | |
318 | .hdr.code = ICM_DRIVER_READY, | |
319 | }; | |
320 | int ret; | |
321 | ||
322 | memset(&reply, 0, sizeof(reply)); | |
323 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
324 | 1, ICM_TIMEOUT); | |
325 | if (ret) | |
326 | return ret; | |
327 | ||
328 | if (security_level) | |
329 | *security_level = reply.security_level & ICM_FR_SLEVEL_MASK; | |
330 | ||
331 | return 0; | |
332 | } | |
333 | ||
f67cf491 MW |
334 | static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) |
335 | { | |
336 | struct icm_fr_pkg_approve_device request; | |
337 | struct icm_fr_pkg_approve_device reply; | |
338 | int ret; | |
339 | ||
340 | memset(&request, 0, sizeof(request)); | |
341 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
342 | request.hdr.code = ICM_APPROVE_DEVICE; | |
343 | request.connection_id = sw->connection_id; | |
344 | request.connection_key = sw->connection_key; | |
345 | ||
346 | memset(&reply, 0, sizeof(reply)); | |
347 | /* Use larger timeout as establishing tunnels can take some time */ | |
348 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
b2b6c2eb | 349 | 1, ICM_APPROVE_TIMEOUT); |
f67cf491 MW |
350 | if (ret) |
351 | return ret; | |
352 | ||
353 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
354 | tb_warn(tb, "PCIe tunnel creation failed\n"); | |
355 | return -EIO; | |
356 | } | |
357 | ||
358 | return 0; | |
359 | } | |
360 | ||
361 | static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) | |
362 | { | |
363 | struct icm_fr_pkg_add_device_key request; | |
364 | struct icm_fr_pkg_add_device_key_response reply; | |
365 | int ret; | |
366 | ||
367 | memset(&request, 0, sizeof(request)); | |
368 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
369 | request.hdr.code = ICM_ADD_DEVICE_KEY; | |
370 | request.connection_id = sw->connection_id; | |
371 | request.connection_key = sw->connection_key; | |
372 | memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); | |
373 | ||
374 | memset(&reply, 0, sizeof(reply)); | |
375 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
376 | 1, ICM_TIMEOUT); | |
377 | if (ret) | |
378 | return ret; | |
379 | ||
380 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
381 | tb_warn(tb, "Adding key to switch failed\n"); | |
382 | return -EIO; | |
383 | } | |
384 | ||
385 | return 0; | |
386 | } | |
387 | ||
388 | static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, | |
389 | const u8 *challenge, u8 *response) | |
390 | { | |
391 | struct icm_fr_pkg_challenge_device request; | |
392 | struct icm_fr_pkg_challenge_device_response reply; | |
393 | int ret; | |
394 | ||
395 | memset(&request, 0, sizeof(request)); | |
396 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
397 | request.hdr.code = ICM_CHALLENGE_DEVICE; | |
398 | request.connection_id = sw->connection_id; | |
399 | request.connection_key = sw->connection_key; | |
400 | memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); | |
401 | ||
402 | memset(&reply, 0, sizeof(reply)); | |
403 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
404 | 1, ICM_TIMEOUT); | |
405 | if (ret) | |
406 | return ret; | |
407 | ||
408 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
409 | return -EKEYREJECTED; | |
410 | if (reply.hdr.flags & ICM_FLAGS_NO_KEY) | |
411 | return -ENOKEY; | |
412 | ||
413 | memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); | |
414 | ||
415 | return 0; | |
416 | } | |
417 | ||
d1ff7024 MW |
418 | static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) |
419 | { | |
420 | struct icm_fr_pkg_approve_xdomain_response reply; | |
421 | struct icm_fr_pkg_approve_xdomain request; | |
422 | int ret; | |
423 | ||
424 | memset(&request, 0, sizeof(request)); | |
425 | request.hdr.code = ICM_APPROVE_XDOMAIN; | |
426 | request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; | |
427 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); | |
428 | ||
429 | request.transmit_path = xd->transmit_path; | |
430 | request.transmit_ring = xd->transmit_ring; | |
431 | request.receive_path = xd->receive_path; | |
432 | request.receive_ring = xd->receive_ring; | |
433 | ||
434 | memset(&reply, 0, sizeof(reply)); | |
435 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
436 | 1, ICM_TIMEOUT); | |
437 | if (ret) | |
438 | return ret; | |
439 | ||
440 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
441 | return -EIO; | |
442 | ||
443 | return 0; | |
444 | } | |
445 | ||
446 | static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
447 | { | |
448 | u8 phy_port; | |
449 | u8 cmd; | |
450 | ||
451 | phy_port = tb_phy_port_from_link(xd->link); | |
452 | if (phy_port == 0) | |
453 | cmd = NHI_MAILBOX_DISCONNECT_PA; | |
454 | else | |
455 | cmd = NHI_MAILBOX_DISCONNECT_PB; | |
456 | ||
457 | nhi_mailbox_cmd(tb->nhi, cmd, 1); | |
458 | usleep_range(10, 50); | |
459 | nhi_mailbox_cmd(tb->nhi, cmd, 2); | |
460 | return 0; | |
461 | } | |
462 | ||
210f2eb5 | 463 | static void add_switch(struct tb_switch *parent_sw, u64 route, |
2de98e05 MW |
464 | const uuid_t *uuid, const u8 *ep_name, |
465 | size_t ep_name_size, u8 connection_id, u8 connection_key, | |
210f2eb5 | 466 | u8 link, u8 depth, enum tb_security_level security_level, |
269f6def | 467 | bool authorized, bool boot) |
210f2eb5 | 468 | { |
2de98e05 | 469 | const struct intel_vss *vss; |
210f2eb5 MW |
470 | struct tb_switch *sw; |
471 | ||
2de98e05 MW |
472 | pm_runtime_get_sync(&parent_sw->dev); |
473 | ||
210f2eb5 MW |
474 | sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route); |
475 | if (!sw) | |
2de98e05 | 476 | goto out; |
210f2eb5 MW |
477 | |
478 | sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); | |
57fa410e AP |
479 | if (!sw->uuid) { |
480 | tb_sw_warn(sw, "cannot allocate memory for switch\n"); | |
481 | tb_switch_put(sw); | |
482 | goto out; | |
483 | } | |
210f2eb5 MW |
484 | sw->connection_id = connection_id; |
485 | sw->connection_key = connection_key; | |
486 | sw->link = link; | |
487 | sw->depth = depth; | |
488 | sw->authorized = authorized; | |
489 | sw->security_level = security_level; | |
269f6def | 490 | sw->boot = boot; |
210f2eb5 | 491 | |
2de98e05 MW |
492 | vss = parse_intel_vss(ep_name, ep_name_size); |
493 | if (vss) | |
494 | sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3); | |
495 | ||
210f2eb5 MW |
496 | /* Link the two switches now */ |
497 | tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); | |
498 | tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); | |
499 | ||
500 | if (tb_switch_add(sw)) { | |
501 | tb_port_at(tb_route(sw), parent_sw)->remote = NULL; | |
502 | tb_switch_put(sw); | |
210f2eb5 | 503 | } |
2de98e05 MW |
504 | |
505 | out: | |
506 | pm_runtime_mark_last_busy(&parent_sw->dev); | |
507 | pm_runtime_put_autosuspend(&parent_sw->dev); | |
210f2eb5 MW |
508 | } |
509 | ||
510 | static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, | |
511 | u64 route, u8 connection_id, u8 connection_key, | |
269f6def | 512 | u8 link, u8 depth, bool boot) |
210f2eb5 MW |
513 | { |
514 | /* Disconnect from parent */ | |
515 | tb_port_at(tb_route(sw), parent_sw)->remote = NULL; | |
516 | /* Re-connect via updated port*/ | |
517 | tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); | |
518 | ||
519 | /* Update with the new addressing information */ | |
520 | sw->config.route_hi = upper_32_bits(route); | |
521 | sw->config.route_lo = lower_32_bits(route); | |
522 | sw->connection_id = connection_id; | |
523 | sw->connection_key = connection_key; | |
524 | sw->link = link; | |
525 | sw->depth = depth; | |
269f6def | 526 | sw->boot = boot; |
210f2eb5 MW |
527 | |
528 | /* This switch still exists */ | |
529 | sw->is_unplugged = false; | |
530 | } | |
531 | ||
f67cf491 MW |
532 | static void remove_switch(struct tb_switch *sw) |
533 | { | |
534 | struct tb_switch *parent_sw; | |
535 | ||
536 | parent_sw = tb_to_switch(sw->dev.parent); | |
537 | tb_port_at(tb_route(sw), parent_sw)->remote = NULL; | |
538 | tb_switch_remove(sw); | |
539 | } | |
540 | ||
210f2eb5 MW |
541 | static void add_xdomain(struct tb_switch *sw, u64 route, |
542 | const uuid_t *local_uuid, const uuid_t *remote_uuid, | |
543 | u8 link, u8 depth) | |
544 | { | |
545 | struct tb_xdomain *xd; | |
546 | ||
2de98e05 MW |
547 | pm_runtime_get_sync(&sw->dev); |
548 | ||
210f2eb5 MW |
549 | xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); |
550 | if (!xd) | |
2de98e05 | 551 | goto out; |
210f2eb5 MW |
552 | |
553 | xd->link = link; | |
554 | xd->depth = depth; | |
555 | ||
556 | tb_port_at(route, sw)->xdomain = xd; | |
557 | ||
558 | tb_xdomain_add(xd); | |
2de98e05 MW |
559 | |
560 | out: | |
561 | pm_runtime_mark_last_busy(&sw->dev); | |
562 | pm_runtime_put_autosuspend(&sw->dev); | |
210f2eb5 MW |
563 | } |
564 | ||
565 | static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link) | |
566 | { | |
567 | xd->link = link; | |
568 | xd->route = route; | |
569 | xd->is_unplugged = false; | |
570 | } | |
571 | ||
9f3125ba MW |
572 | static void remove_xdomain(struct tb_xdomain *xd) |
573 | { | |
574 | struct tb_switch *sw; | |
575 | ||
576 | sw = tb_to_switch(xd->dev.parent); | |
577 | tb_port_at(xd->route, sw)->xdomain = NULL; | |
578 | tb_xdomain_remove(xd); | |
579 | } | |
580 | ||
f67cf491 MW |
581 | static void |
582 | icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
583 | { | |
584 | const struct icm_fr_event_device_connected *pkg = | |
585 | (const struct icm_fr_event_device_connected *)hdr; | |
210f2eb5 | 586 | enum tb_security_level security_level; |
f67cf491 MW |
587 | struct tb_switch *sw, *parent_sw; |
588 | struct icm *icm = tb_priv(tb); | |
589 | bool authorized = false; | |
9f3125ba | 590 | struct tb_xdomain *xd; |
f67cf491 | 591 | u8 link, depth; |
269f6def | 592 | bool boot; |
f67cf491 MW |
593 | u64 route; |
594 | int ret; | |
595 | ||
596 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; | |
597 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> | |
598 | ICM_LINK_INFO_DEPTH_SHIFT; | |
599 | authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; | |
210f2eb5 MW |
600 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> |
601 | ICM_FLAGS_SLEVEL_SHIFT; | |
269f6def | 602 | boot = pkg->link_info & ICM_LINK_INFO_BOOT; |
f67cf491 | 603 | |
38604142 MW |
604 | if (pkg->link_info & ICM_LINK_INFO_REJECTED) { |
605 | tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n", | |
606 | link, depth); | |
607 | return; | |
608 | } | |
609 | ||
f67cf491 MW |
610 | sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); |
611 | if (sw) { | |
612 | u8 phy_port, sw_phy_port; | |
613 | ||
614 | parent_sw = tb_to_switch(sw->dev.parent); | |
813f35fc MW |
615 | sw_phy_port = tb_phy_port_from_link(sw->link); |
616 | phy_port = tb_phy_port_from_link(link); | |
f67cf491 MW |
617 | |
618 | /* | |
619 | * On resume ICM will send us connected events for the | |
620 | * devices that still are present. However, that | |
621 | * information might have changed for example by the | |
622 | * fact that a switch on a dual-link connection might | |
623 | * have been enumerated using the other link now. Make | |
624 | * sure our book keeping matches that. | |
625 | */ | |
626 | if (sw->depth == depth && sw_phy_port == phy_port && | |
627 | !!sw->authorized == authorized) { | |
813f35fc MW |
628 | /* |
629 | * It was enumerated through another link so update | |
630 | * route string accordingly. | |
631 | */ | |
632 | if (sw->link != link) { | |
633 | ret = icm->get_route(tb, link, depth, &route); | |
634 | if (ret) { | |
635 | tb_err(tb, "failed to update route string for switch at %u.%u\n", | |
636 | link, depth); | |
637 | tb_switch_put(sw); | |
638 | return; | |
639 | } | |
640 | } else { | |
641 | route = tb_route(sw); | |
642 | } | |
643 | ||
210f2eb5 | 644 | update_switch(parent_sw, sw, route, pkg->connection_id, |
269f6def | 645 | pkg->connection_key, link, depth, boot); |
f67cf491 MW |
646 | tb_switch_put(sw); |
647 | return; | |
648 | } | |
649 | ||
650 | /* | |
651 | * User connected the same switch to another physical | |
652 | * port or to another part of the topology. Remove the | |
653 | * existing switch now before adding the new one. | |
654 | */ | |
655 | remove_switch(sw); | |
656 | tb_switch_put(sw); | |
657 | } | |
658 | ||
659 | /* | |
660 | * If the switch was not found by UUID, look for a switch on | |
661 | * same physical port (taking possible link aggregation into | |
662 | * account) and depth. If we found one it is definitely a stale | |
663 | * one so remove it first. | |
664 | */ | |
665 | sw = tb_switch_find_by_link_depth(tb, link, depth); | |
666 | if (!sw) { | |
667 | u8 dual_link; | |
668 | ||
669 | dual_link = dual_link_from_link(link); | |
670 | if (dual_link) | |
671 | sw = tb_switch_find_by_link_depth(tb, dual_link, depth); | |
672 | } | |
673 | if (sw) { | |
674 | remove_switch(sw); | |
675 | tb_switch_put(sw); | |
676 | } | |
677 | ||
9f3125ba MW |
678 | /* Remove existing XDomain connection if found */ |
679 | xd = tb_xdomain_find_by_link_depth(tb, link, depth); | |
680 | if (xd) { | |
681 | remove_xdomain(xd); | |
682 | tb_xdomain_put(xd); | |
683 | } | |
684 | ||
f67cf491 MW |
685 | parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1); |
686 | if (!parent_sw) { | |
687 | tb_err(tb, "failed to find parent switch for %u.%u\n", | |
688 | link, depth); | |
689 | return; | |
690 | } | |
691 | ||
813f35fc MW |
692 | ret = icm->get_route(tb, link, depth, &route); |
693 | if (ret) { | |
694 | tb_err(tb, "failed to find route string for switch at %u.%u\n", | |
695 | link, depth); | |
696 | tb_switch_put(parent_sw); | |
697 | return; | |
698 | } | |
699 | ||
2de98e05 MW |
700 | add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, |
701 | sizeof(pkg->ep_name), pkg->connection_id, | |
210f2eb5 | 702 | pkg->connection_key, link, depth, security_level, |
269f6def | 703 | authorized, boot); |
f67cf491 | 704 | |
f67cf491 MW |
705 | tb_switch_put(parent_sw); |
706 | } | |
707 | ||
708 | static void | |
709 | icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
710 | { | |
711 | const struct icm_fr_event_device_disconnected *pkg = | |
712 | (const struct icm_fr_event_device_disconnected *)hdr; | |
713 | struct tb_switch *sw; | |
714 | u8 link, depth; | |
715 | ||
716 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; | |
717 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> | |
718 | ICM_LINK_INFO_DEPTH_SHIFT; | |
719 | ||
720 | if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { | |
721 | tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); | |
722 | return; | |
723 | } | |
724 | ||
725 | sw = tb_switch_find_by_link_depth(tb, link, depth); | |
726 | if (!sw) { | |
727 | tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, | |
728 | depth); | |
729 | return; | |
730 | } | |
731 | ||
732 | remove_switch(sw); | |
733 | tb_switch_put(sw); | |
734 | } | |
735 | ||
d1ff7024 MW |
736 | static void |
737 | icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
738 | { | |
739 | const struct icm_fr_event_xdomain_connected *pkg = | |
740 | (const struct icm_fr_event_xdomain_connected *)hdr; | |
741 | struct tb_xdomain *xd; | |
742 | struct tb_switch *sw; | |
743 | u8 link, depth; | |
744 | bool approved; | |
745 | u64 route; | |
746 | ||
d1ff7024 MW |
747 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; |
748 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> | |
749 | ICM_LINK_INFO_DEPTH_SHIFT; | |
750 | approved = pkg->link_info & ICM_LINK_INFO_APPROVED; | |
751 | ||
752 | if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { | |
753 | tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); | |
754 | return; | |
755 | } | |
756 | ||
757 | route = get_route(pkg->local_route_hi, pkg->local_route_lo); | |
758 | ||
759 | xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); | |
760 | if (xd) { | |
761 | u8 xd_phy_port, phy_port; | |
762 | ||
763 | xd_phy_port = phy_port_from_route(xd->route, xd->depth); | |
764 | phy_port = phy_port_from_route(route, depth); | |
765 | ||
766 | if (xd->depth == depth && xd_phy_port == phy_port) { | |
210f2eb5 | 767 | update_xdomain(xd, route, link); |
d1ff7024 MW |
768 | tb_xdomain_put(xd); |
769 | return; | |
770 | } | |
771 | ||
772 | /* | |
773 | * If we find an existing XDomain connection remove it | |
774 | * now. We need to go through login handshake and | |
775 | * everything anyway to be able to re-establish the | |
776 | * connection. | |
777 | */ | |
778 | remove_xdomain(xd); | |
779 | tb_xdomain_put(xd); | |
780 | } | |
781 | ||
782 | /* | |
783 | * Look if there already exists an XDomain in the same place | |
784 | * than the new one and in that case remove it because it is | |
785 | * most likely another host that got disconnected. | |
786 | */ | |
787 | xd = tb_xdomain_find_by_link_depth(tb, link, depth); | |
788 | if (!xd) { | |
789 | u8 dual_link; | |
790 | ||
791 | dual_link = dual_link_from_link(link); | |
792 | if (dual_link) | |
793 | xd = tb_xdomain_find_by_link_depth(tb, dual_link, | |
794 | depth); | |
795 | } | |
796 | if (xd) { | |
797 | remove_xdomain(xd); | |
798 | tb_xdomain_put(xd); | |
799 | } | |
800 | ||
801 | /* | |
802 | * If the user disconnected a switch during suspend and | |
803 | * connected another host to the same port, remove the switch | |
804 | * first. | |
805 | */ | |
806 | sw = get_switch_at_route(tb->root_switch, route); | |
807 | if (sw) | |
808 | remove_switch(sw); | |
809 | ||
810 | sw = tb_switch_find_by_link_depth(tb, link, depth); | |
811 | if (!sw) { | |
812 | tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, | |
813 | depth); | |
814 | return; | |
815 | } | |
816 | ||
210f2eb5 MW |
817 | add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link, |
818 | depth); | |
d1ff7024 MW |
819 | tb_switch_put(sw); |
820 | } | |
821 | ||
822 | static void | |
823 | icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
824 | { | |
825 | const struct icm_fr_event_xdomain_disconnected *pkg = | |
826 | (const struct icm_fr_event_xdomain_disconnected *)hdr; | |
827 | struct tb_xdomain *xd; | |
828 | ||
829 | /* | |
830 | * If the connection is through one or multiple devices, the | |
831 | * XDomain device is removed along with them so it is fine if we | |
832 | * cannot find it here. | |
833 | */ | |
834 | xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); | |
835 | if (xd) { | |
836 | remove_xdomain(xd); | |
837 | tb_xdomain_put(xd); | |
838 | } | |
839 | } | |
840 | ||
d1d8b263 RM |
841 | static int |
842 | icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, | |
2de98e05 | 843 | size_t *nboot_acl, bool *rpm) |
d1d8b263 RM |
844 | { |
845 | struct icm_tr_pkg_driver_ready_response reply; | |
846 | struct icm_pkg_driver_ready request = { | |
847 | .hdr.code = ICM_DRIVER_READY, | |
848 | }; | |
849 | int ret; | |
850 | ||
851 | memset(&reply, 0, sizeof(reply)); | |
852 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
853 | 1, 20000); | |
854 | if (ret) | |
855 | return ret; | |
856 | ||
857 | if (security_level) | |
858 | *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK; | |
859 | if (nboot_acl) | |
860 | *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >> | |
861 | ICM_TR_INFO_BOOT_ACL_SHIFT; | |
2de98e05 MW |
862 | if (rpm) |
863 | *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3); | |
864 | ||
d1d8b263 RM |
865 | return 0; |
866 | } | |
867 | ||
868 | static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) | |
869 | { | |
870 | struct icm_tr_pkg_approve_device request; | |
871 | struct icm_tr_pkg_approve_device reply; | |
872 | int ret; | |
873 | ||
874 | memset(&request, 0, sizeof(request)); | |
875 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
876 | request.hdr.code = ICM_APPROVE_DEVICE; | |
877 | request.route_lo = sw->config.route_lo; | |
878 | request.route_hi = sw->config.route_hi; | |
879 | request.connection_id = sw->connection_id; | |
880 | ||
881 | memset(&reply, 0, sizeof(reply)); | |
882 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
883 | 1, ICM_APPROVE_TIMEOUT); | |
884 | if (ret) | |
885 | return ret; | |
886 | ||
887 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
888 | tb_warn(tb, "PCIe tunnel creation failed\n"); | |
889 | return -EIO; | |
890 | } | |
891 | ||
892 | return 0; | |
893 | } | |
894 | ||
895 | static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) | |
896 | { | |
897 | struct icm_tr_pkg_add_device_key_response reply; | |
898 | struct icm_tr_pkg_add_device_key request; | |
899 | int ret; | |
900 | ||
901 | memset(&request, 0, sizeof(request)); | |
902 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
903 | request.hdr.code = ICM_ADD_DEVICE_KEY; | |
904 | request.route_lo = sw->config.route_lo; | |
905 | request.route_hi = sw->config.route_hi; | |
906 | request.connection_id = sw->connection_id; | |
907 | memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); | |
908 | ||
909 | memset(&reply, 0, sizeof(reply)); | |
910 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
911 | 1, ICM_TIMEOUT); | |
912 | if (ret) | |
913 | return ret; | |
914 | ||
915 | if (reply.hdr.flags & ICM_FLAGS_ERROR) { | |
916 | tb_warn(tb, "Adding key to switch failed\n"); | |
917 | return -EIO; | |
918 | } | |
919 | ||
920 | return 0; | |
921 | } | |
922 | ||
923 | static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, | |
924 | const u8 *challenge, u8 *response) | |
925 | { | |
926 | struct icm_tr_pkg_challenge_device_response reply; | |
927 | struct icm_tr_pkg_challenge_device request; | |
928 | int ret; | |
929 | ||
930 | memset(&request, 0, sizeof(request)); | |
931 | memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); | |
932 | request.hdr.code = ICM_CHALLENGE_DEVICE; | |
933 | request.route_lo = sw->config.route_lo; | |
934 | request.route_hi = sw->config.route_hi; | |
935 | request.connection_id = sw->connection_id; | |
936 | memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); | |
937 | ||
938 | memset(&reply, 0, sizeof(reply)); | |
939 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
940 | 1, ICM_TIMEOUT); | |
941 | if (ret) | |
942 | return ret; | |
943 | ||
944 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
945 | return -EKEYREJECTED; | |
946 | if (reply.hdr.flags & ICM_FLAGS_NO_KEY) | |
947 | return -ENOKEY; | |
948 | ||
949 | memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); | |
950 | ||
951 | return 0; | |
952 | } | |
953 | ||
954 | static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
955 | { | |
956 | struct icm_tr_pkg_approve_xdomain_response reply; | |
957 | struct icm_tr_pkg_approve_xdomain request; | |
958 | int ret; | |
959 | ||
960 | memset(&request, 0, sizeof(request)); | |
961 | request.hdr.code = ICM_APPROVE_XDOMAIN; | |
962 | request.route_hi = upper_32_bits(xd->route); | |
963 | request.route_lo = lower_32_bits(xd->route); | |
964 | request.transmit_path = xd->transmit_path; | |
965 | request.transmit_ring = xd->transmit_ring; | |
966 | request.receive_path = xd->receive_path; | |
967 | request.receive_ring = xd->receive_ring; | |
968 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); | |
969 | ||
970 | memset(&reply, 0, sizeof(reply)); | |
971 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
972 | 1, ICM_TIMEOUT); | |
973 | if (ret) | |
974 | return ret; | |
975 | ||
976 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
977 | return -EIO; | |
978 | ||
979 | return 0; | |
980 | } | |
981 | ||
982 | static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, | |
983 | int stage) | |
984 | { | |
985 | struct icm_tr_pkg_disconnect_xdomain_response reply; | |
986 | struct icm_tr_pkg_disconnect_xdomain request; | |
987 | int ret; | |
988 | ||
989 | memset(&request, 0, sizeof(request)); | |
990 | request.hdr.code = ICM_DISCONNECT_XDOMAIN; | |
991 | request.stage = stage; | |
992 | request.route_hi = upper_32_bits(xd->route); | |
993 | request.route_lo = lower_32_bits(xd->route); | |
994 | memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); | |
995 | ||
996 | memset(&reply, 0, sizeof(reply)); | |
997 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
998 | 1, ICM_TIMEOUT); | |
999 | if (ret) | |
1000 | return ret; | |
1001 | ||
1002 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1003 | return -EIO; | |
1004 | ||
1005 | return 0; | |
1006 | } | |
1007 | ||
1008 | static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) | |
1009 | { | |
1010 | int ret; | |
1011 | ||
1012 | ret = icm_tr_xdomain_tear_down(tb, xd, 1); | |
1013 | if (ret) | |
1014 | return ret; | |
1015 | ||
1016 | usleep_range(10, 50); | |
1017 | return icm_tr_xdomain_tear_down(tb, xd, 2); | |
1018 | } | |
1019 | ||
1020 | static void | |
1021 | icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1022 | { | |
1023 | const struct icm_tr_event_device_connected *pkg = | |
1024 | (const struct icm_tr_event_device_connected *)hdr; | |
1025 | enum tb_security_level security_level; | |
1026 | struct tb_switch *sw, *parent_sw; | |
1027 | struct tb_xdomain *xd; | |
1028 | bool authorized, boot; | |
1029 | u64 route; | |
1030 | ||
1031 | /* | |
1032 | * Currently we don't use the QoS information coming with the | |
1033 | * device connected message so simply just ignore that extra | |
1034 | * packet for now. | |
1035 | */ | |
1036 | if (pkg->hdr.packet_id) | |
1037 | return; | |
1038 | ||
d1d8b263 RM |
1039 | route = get_route(pkg->route_hi, pkg->route_lo); |
1040 | authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; | |
1041 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> | |
1042 | ICM_FLAGS_SLEVEL_SHIFT; | |
1043 | boot = pkg->link_info & ICM_LINK_INFO_BOOT; | |
1044 | ||
1045 | if (pkg->link_info & ICM_LINK_INFO_REJECTED) { | |
1046 | tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n", | |
1047 | route); | |
1048 | return; | |
1049 | } | |
1050 | ||
1051 | sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); | |
1052 | if (sw) { | |
1053 | /* Update the switch if it is still in the same place */ | |
1054 | if (tb_route(sw) == route && !!sw->authorized == authorized) { | |
1055 | parent_sw = tb_to_switch(sw->dev.parent); | |
1056 | update_switch(parent_sw, sw, route, pkg->connection_id, | |
1057 | 0, 0, 0, boot); | |
1058 | tb_switch_put(sw); | |
1059 | return; | |
1060 | } | |
1061 | ||
1062 | remove_switch(sw); | |
1063 | tb_switch_put(sw); | |
1064 | } | |
1065 | ||
1066 | /* Another switch with the same address */ | |
1067 | sw = tb_switch_find_by_route(tb, route); | |
1068 | if (sw) { | |
1069 | remove_switch(sw); | |
1070 | tb_switch_put(sw); | |
1071 | } | |
1072 | ||
1073 | /* XDomain connection with the same address */ | |
1074 | xd = tb_xdomain_find_by_route(tb, route); | |
1075 | if (xd) { | |
1076 | remove_xdomain(xd); | |
1077 | tb_xdomain_put(xd); | |
1078 | } | |
1079 | ||
1080 | parent_sw = tb_switch_find_by_route(tb, get_parent_route(route)); | |
1081 | if (!parent_sw) { | |
1082 | tb_err(tb, "failed to find parent switch for %llx\n", route); | |
1083 | return; | |
1084 | } | |
1085 | ||
2de98e05 MW |
1086 | add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, |
1087 | sizeof(pkg->ep_name), pkg->connection_id, | |
d1d8b263 RM |
1088 | 0, 0, 0, security_level, authorized, boot); |
1089 | ||
1090 | tb_switch_put(parent_sw); | |
1091 | } | |
1092 | ||
1093 | static void | |
1094 | icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1095 | { | |
1096 | const struct icm_tr_event_device_disconnected *pkg = | |
1097 | (const struct icm_tr_event_device_disconnected *)hdr; | |
1098 | struct tb_switch *sw; | |
1099 | u64 route; | |
1100 | ||
1101 | route = get_route(pkg->route_hi, pkg->route_lo); | |
1102 | ||
1103 | sw = tb_switch_find_by_route(tb, route); | |
1104 | if (!sw) { | |
1105 | tb_warn(tb, "no switch exists at %llx, ignoring\n", route); | |
1106 | return; | |
1107 | } | |
1108 | ||
1109 | remove_switch(sw); | |
1110 | tb_switch_put(sw); | |
1111 | } | |
1112 | ||
1113 | static void | |
1114 | icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1115 | { | |
1116 | const struct icm_tr_event_xdomain_connected *pkg = | |
1117 | (const struct icm_tr_event_xdomain_connected *)hdr; | |
1118 | struct tb_xdomain *xd; | |
1119 | struct tb_switch *sw; | |
1120 | u64 route; | |
1121 | ||
1122 | if (!tb->root_switch) | |
1123 | return; | |
1124 | ||
1125 | route = get_route(pkg->local_route_hi, pkg->local_route_lo); | |
1126 | ||
1127 | xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); | |
1128 | if (xd) { | |
1129 | if (xd->route == route) { | |
1130 | update_xdomain(xd, route, 0); | |
1131 | tb_xdomain_put(xd); | |
1132 | return; | |
1133 | } | |
1134 | ||
1135 | remove_xdomain(xd); | |
1136 | tb_xdomain_put(xd); | |
1137 | } | |
1138 | ||
1139 | /* An existing xdomain with the same address */ | |
1140 | xd = tb_xdomain_find_by_route(tb, route); | |
1141 | if (xd) { | |
1142 | remove_xdomain(xd); | |
1143 | tb_xdomain_put(xd); | |
1144 | } | |
1145 | ||
1146 | /* | |
1147 | * If the user disconnected a switch during suspend and | |
1148 | * connected another host to the same port, remove the switch | |
1149 | * first. | |
1150 | */ | |
1151 | sw = get_switch_at_route(tb->root_switch, route); | |
1152 | if (sw) | |
1153 | remove_switch(sw); | |
1154 | ||
1155 | sw = tb_switch_find_by_route(tb, get_parent_route(route)); | |
1156 | if (!sw) { | |
1157 | tb_warn(tb, "no switch exists at %llx, ignoring\n", route); | |
1158 | return; | |
1159 | } | |
1160 | ||
1161 | add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); | |
1162 | tb_switch_put(sw); | |
1163 | } | |
1164 | ||
1165 | static void | |
1166 | icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) | |
1167 | { | |
1168 | const struct icm_tr_event_xdomain_disconnected *pkg = | |
1169 | (const struct icm_tr_event_xdomain_disconnected *)hdr; | |
1170 | struct tb_xdomain *xd; | |
1171 | u64 route; | |
1172 | ||
1173 | route = get_route(pkg->route_hi, pkg->route_lo); | |
1174 | ||
1175 | xd = tb_xdomain_find_by_route(tb, route); | |
1176 | if (xd) { | |
1177 | remove_xdomain(xd); | |
1178 | tb_xdomain_put(xd); | |
1179 | } | |
1180 | } | |
1181 | ||
f67cf491 MW |
1182 | static struct pci_dev *get_upstream_port(struct pci_dev *pdev) |
1183 | { | |
1184 | struct pci_dev *parent; | |
1185 | ||
1186 | parent = pci_upstream_bridge(pdev); | |
1187 | while (parent) { | |
1188 | if (!pci_is_pcie(parent)) | |
1189 | return NULL; | |
1190 | if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) | |
1191 | break; | |
1192 | parent = pci_upstream_bridge(parent); | |
1193 | } | |
1194 | ||
1195 | if (!parent) | |
1196 | return NULL; | |
1197 | ||
1198 | switch (parent->device) { | |
1199 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: | |
1200 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: | |
1201 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: | |
1202 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: | |
1203 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: | |
1204 | return parent; | |
1205 | } | |
1206 | ||
1207 | return NULL; | |
1208 | } | |
1209 | ||
1210 | static bool icm_ar_is_supported(struct tb *tb) | |
1211 | { | |
1212 | struct pci_dev *upstream_port; | |
1213 | struct icm *icm = tb_priv(tb); | |
1214 | ||
1215 | /* | |
1216 | * Starting from Alpine Ridge we can use ICM on Apple machines | |
1217 | * as well. We just need to reset and re-enable it first. | |
1218 | */ | |
630b3aff | 1219 | if (!x86_apple_machine) |
f67cf491 MW |
1220 | return true; |
1221 | ||
1222 | /* | |
1223 | * Find the upstream PCIe port in case we need to do reset | |
1224 | * through its vendor specific registers. | |
1225 | */ | |
1226 | upstream_port = get_upstream_port(tb->nhi->pdev); | |
1227 | if (upstream_port) { | |
1228 | int cap; | |
1229 | ||
1230 | cap = pci_find_ext_capability(upstream_port, | |
1231 | PCI_EXT_CAP_ID_VNDR); | |
1232 | if (cap > 0) { | |
1233 | icm->upstream_port = upstream_port; | |
1234 | icm->vnd_cap = cap; | |
1235 | ||
1236 | return true; | |
1237 | } | |
1238 | } | |
1239 | ||
1240 | return false; | |
1241 | } | |
1242 | ||
1243 | static int icm_ar_get_mode(struct tb *tb) | |
1244 | { | |
1245 | struct tb_nhi *nhi = tb->nhi; | |
3e315e54 | 1246 | int retries = 60; |
f67cf491 MW |
1247 | u32 val; |
1248 | ||
1249 | do { | |
1250 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1251 | if (val & REG_FW_STS_NVM_AUTH_DONE) | |
1252 | break; | |
3e315e54 | 1253 | msleep(50); |
f67cf491 MW |
1254 | } while (--retries); |
1255 | ||
1256 | if (!retries) { | |
1257 | dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n"); | |
1258 | return -ENODEV; | |
1259 | } | |
1260 | ||
1261 | return nhi_mailbox_mode(nhi); | |
1262 | } | |
1263 | ||
afa11ff5 MW |
1264 | static int |
1265 | icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, | |
2de98e05 | 1266 | size_t *nboot_acl, bool *rpm) |
afa11ff5 MW |
1267 | { |
1268 | struct icm_ar_pkg_driver_ready_response reply; | |
1269 | struct icm_pkg_driver_ready request = { | |
1270 | .hdr.code = ICM_DRIVER_READY, | |
1271 | }; | |
1272 | int ret; | |
1273 | ||
1274 | memset(&reply, 0, sizeof(reply)); | |
1275 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1276 | 1, ICM_TIMEOUT); | |
1277 | if (ret) | |
1278 | return ret; | |
1279 | ||
1280 | if (security_level) | |
1281 | *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK; | |
1282 | if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED)) | |
1283 | *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >> | |
1284 | ICM_AR_INFO_BOOT_ACL_SHIFT; | |
2de98e05 MW |
1285 | if (rpm) |
1286 | *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3); | |
1287 | ||
afa11ff5 MW |
1288 | return 0; |
1289 | } | |
1290 | ||
f67cf491 MW |
1291 | static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) |
1292 | { | |
1293 | struct icm_ar_pkg_get_route_response reply; | |
1294 | struct icm_ar_pkg_get_route request = { | |
1295 | .hdr = { .code = ICM_GET_ROUTE }, | |
1296 | .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link, | |
1297 | }; | |
1298 | int ret; | |
1299 | ||
1300 | memset(&reply, 0, sizeof(reply)); | |
1301 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1302 | 1, ICM_TIMEOUT); | |
1303 | if (ret) | |
1304 | return ret; | |
1305 | ||
1306 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1307 | return -EIO; | |
1308 | ||
1309 | *route = get_route(reply.route_hi, reply.route_lo); | |
1310 | return 0; | |
1311 | } | |
1312 | ||
afa11ff5 MW |
1313 | static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) |
1314 | { | |
1315 | struct icm_ar_pkg_preboot_acl_response reply; | |
1316 | struct icm_ar_pkg_preboot_acl request = { | |
1317 | .hdr = { .code = ICM_PREBOOT_ACL }, | |
1318 | }; | |
1319 | int ret, i; | |
1320 | ||
1321 | memset(&reply, 0, sizeof(reply)); | |
1322 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1323 | 1, ICM_TIMEOUT); | |
1324 | if (ret) | |
1325 | return ret; | |
1326 | ||
1327 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1328 | return -EIO; | |
1329 | ||
1330 | for (i = 0; i < nuuids; i++) { | |
1331 | u32 *uuid = (u32 *)&uuids[i]; | |
1332 | ||
1333 | uuid[0] = reply.acl[i].uuid_lo; | |
1334 | uuid[1] = reply.acl[i].uuid_hi; | |
1335 | ||
1336 | if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) { | |
1337 | /* Map empty entries to null UUID */ | |
1338 | uuid[0] = 0; | |
1339 | uuid[1] = 0; | |
a78af936 | 1340 | } else if (uuid[0] != 0 || uuid[1] != 0) { |
afa11ff5 MW |
1341 | /* Upper two DWs are always one's */ |
1342 | uuid[2] = 0xffffffff; | |
1343 | uuid[3] = 0xffffffff; | |
1344 | } | |
1345 | } | |
1346 | ||
1347 | return ret; | |
1348 | } | |
1349 | ||
1350 | static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, | |
1351 | size_t nuuids) | |
1352 | { | |
1353 | struct icm_ar_pkg_preboot_acl_response reply; | |
1354 | struct icm_ar_pkg_preboot_acl request = { | |
1355 | .hdr = { | |
1356 | .code = ICM_PREBOOT_ACL, | |
1357 | .flags = ICM_FLAGS_WRITE, | |
1358 | }, | |
1359 | }; | |
1360 | int ret, i; | |
1361 | ||
1362 | for (i = 0; i < nuuids; i++) { | |
1363 | const u32 *uuid = (const u32 *)&uuids[i]; | |
1364 | ||
1365 | if (uuid_is_null(&uuids[i])) { | |
1366 | /* | |
1367 | * Map null UUID to the empty (all one) entries | |
1368 | * for ICM. | |
1369 | */ | |
1370 | request.acl[i].uuid_lo = 0xffffffff; | |
1371 | request.acl[i].uuid_hi = 0xffffffff; | |
1372 | } else { | |
1373 | /* Two high DWs need to be set to all one */ | |
1374 | if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff) | |
1375 | return -EINVAL; | |
1376 | ||
1377 | request.acl[i].uuid_lo = uuid[0]; | |
1378 | request.acl[i].uuid_hi = uuid[1]; | |
1379 | } | |
1380 | } | |
1381 | ||
1382 | memset(&reply, 0, sizeof(reply)); | |
1383 | ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), | |
1384 | 1, ICM_TIMEOUT); | |
1385 | if (ret) | |
1386 | return ret; | |
1387 | ||
1388 | if (reply.hdr.flags & ICM_FLAGS_ERROR) | |
1389 | return -EIO; | |
1390 | ||
1391 | return 0; | |
1392 | } | |
1393 | ||
f67cf491 MW |
1394 | static void icm_handle_notification(struct work_struct *work) |
1395 | { | |
1396 | struct icm_notification *n = container_of(work, typeof(*n), work); | |
1397 | struct tb *tb = n->tb; | |
1398 | struct icm *icm = tb_priv(tb); | |
1399 | ||
1400 | mutex_lock(&tb->lock); | |
1401 | ||
59282235 MW |
1402 | /* |
1403 | * When the domain is stopped we flush its workqueue but before | |
1404 | * that the root switch is removed. In that case we should treat | |
1405 | * the queued events as being canceled. | |
1406 | */ | |
1407 | if (tb->root_switch) { | |
1408 | switch (n->pkg->code) { | |
1409 | case ICM_EVENT_DEVICE_CONNECTED: | |
1410 | icm->device_connected(tb, n->pkg); | |
1411 | break; | |
1412 | case ICM_EVENT_DEVICE_DISCONNECTED: | |
1413 | icm->device_disconnected(tb, n->pkg); | |
1414 | break; | |
1415 | case ICM_EVENT_XDOMAIN_CONNECTED: | |
1416 | icm->xdomain_connected(tb, n->pkg); | |
1417 | break; | |
1418 | case ICM_EVENT_XDOMAIN_DISCONNECTED: | |
1419 | icm->xdomain_disconnected(tb, n->pkg); | |
1420 | break; | |
1421 | } | |
f67cf491 MW |
1422 | } |
1423 | ||
1424 | mutex_unlock(&tb->lock); | |
1425 | ||
1426 | kfree(n->pkg); | |
1427 | kfree(n); | |
1428 | } | |
1429 | ||
1430 | static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, | |
1431 | const void *buf, size_t size) | |
1432 | { | |
1433 | struct icm_notification *n; | |
1434 | ||
1435 | n = kmalloc(sizeof(*n), GFP_KERNEL); | |
1436 | if (!n) | |
1437 | return; | |
1438 | ||
1439 | INIT_WORK(&n->work, icm_handle_notification); | |
1440 | n->pkg = kmemdup(buf, size, GFP_KERNEL); | |
1441 | n->tb = tb; | |
1442 | ||
1443 | queue_work(tb->wq, &n->work); | |
1444 | } | |
1445 | ||
1446 | static int | |
afa11ff5 | 1447 | __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, |
2de98e05 | 1448 | size_t *nboot_acl, bool *rpm) |
f67cf491 | 1449 | { |
c1c79d65 | 1450 | struct icm *icm = tb_priv(tb); |
289b62e3 | 1451 | unsigned int retries = 50; |
f67cf491 MW |
1452 | int ret; |
1453 | ||
2de98e05 | 1454 | ret = icm->driver_ready(tb, security_level, nboot_acl, rpm); |
c1c79d65 MW |
1455 | if (ret) { |
1456 | tb_err(tb, "failed to send driver ready to ICM\n"); | |
f67cf491 | 1457 | return ret; |
c1c79d65 | 1458 | } |
f67cf491 MW |
1459 | |
1460 | /* | |
1461 | * Hold on here until the switch config space is accessible so | |
1462 | * that we can read root switch config successfully. | |
1463 | */ | |
1464 | do { | |
1465 | struct tb_cfg_result res; | |
1466 | u32 tmp; | |
1467 | ||
1468 | res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH, | |
1469 | 0, 1, 100); | |
1470 | if (!res.err) | |
1471 | return 0; | |
1472 | ||
1473 | msleep(50); | |
1474 | } while (--retries); | |
1475 | ||
289b62e3 | 1476 | tb_err(tb, "failed to read root switch config space, giving up\n"); |
f67cf491 MW |
1477 | return -ETIMEDOUT; |
1478 | } | |
1479 | ||
1480 | static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) | |
1481 | { | |
1482 | unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); | |
1483 | u32 cmd; | |
1484 | ||
1485 | do { | |
1486 | pci_read_config_dword(icm->upstream_port, | |
1487 | icm->vnd_cap + PCIE2CIO_CMD, &cmd); | |
1488 | if (!(cmd & PCIE2CIO_CMD_START)) { | |
1489 | if (cmd & PCIE2CIO_CMD_TIMEOUT) | |
1490 | break; | |
1491 | return 0; | |
1492 | } | |
1493 | ||
1494 | msleep(50); | |
1495 | } while (time_before(jiffies, end)); | |
1496 | ||
1497 | return -ETIMEDOUT; | |
1498 | } | |
1499 | ||
1500 | static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, | |
1501 | unsigned int port, unsigned int index, u32 *data) | |
1502 | { | |
1503 | struct pci_dev *pdev = icm->upstream_port; | |
1504 | int ret, vnd_cap = icm->vnd_cap; | |
1505 | u32 cmd; | |
1506 | ||
1507 | cmd = index; | |
1508 | cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; | |
1509 | cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; | |
1510 | cmd |= PCIE2CIO_CMD_START; | |
1511 | pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); | |
1512 | ||
1513 | ret = pci2cio_wait_completion(icm, 5000); | |
1514 | if (ret) | |
1515 | return ret; | |
1516 | ||
1517 | pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); | |
1518 | return 0; | |
1519 | } | |
1520 | ||
1521 | static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, | |
1522 | unsigned int port, unsigned int index, u32 data) | |
1523 | { | |
1524 | struct pci_dev *pdev = icm->upstream_port; | |
1525 | int vnd_cap = icm->vnd_cap; | |
1526 | u32 cmd; | |
1527 | ||
1528 | pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); | |
1529 | ||
1530 | cmd = index; | |
1531 | cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; | |
1532 | cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; | |
1533 | cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; | |
1534 | pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); | |
1535 | ||
1536 | return pci2cio_wait_completion(icm, 5000); | |
1537 | } | |
1538 | ||
1539 | static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) | |
1540 | { | |
1541 | struct icm *icm = tb_priv(tb); | |
1542 | u32 val; | |
1543 | ||
77b386f3 MW |
1544 | if (!icm->upstream_port) |
1545 | return -ENODEV; | |
1546 | ||
f67cf491 MW |
1547 | /* Put ARC to wait for CIO reset event to happen */ |
1548 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1549 | val |= REG_FW_STS_CIO_RESET_REQ; | |
1550 | iowrite32(val, nhi->iobase + REG_FW_STS); | |
1551 | ||
1552 | /* Re-start ARC */ | |
1553 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1554 | val |= REG_FW_STS_ICM_EN_INVERT; | |
1555 | val |= REG_FW_STS_ICM_EN_CPU; | |
1556 | iowrite32(val, nhi->iobase + REG_FW_STS); | |
1557 | ||
1558 | /* Trigger CIO reset now */ | |
1559 | return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9)); | |
1560 | } | |
1561 | ||
1562 | static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) | |
1563 | { | |
1564 | unsigned int retries = 10; | |
1565 | int ret; | |
1566 | u32 val; | |
1567 | ||
1568 | /* Check if the ICM firmware is already running */ | |
1569 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1570 | if (val & REG_FW_STS_ICM_EN) | |
1571 | return 0; | |
1572 | ||
1573 | dev_info(&nhi->pdev->dev, "starting ICM firmware\n"); | |
1574 | ||
1575 | ret = icm_firmware_reset(tb, nhi); | |
1576 | if (ret) | |
1577 | return ret; | |
1578 | ||
1579 | /* Wait until the ICM firmware tells us it is up and running */ | |
1580 | do { | |
1581 | /* Check that the ICM firmware is running */ | |
1582 | val = ioread32(nhi->iobase + REG_FW_STS); | |
1583 | if (val & REG_FW_STS_NVM_AUTH_DONE) | |
1584 | return 0; | |
1585 | ||
1586 | msleep(300); | |
1587 | } while (--retries); | |
1588 | ||
1589 | return -ETIMEDOUT; | |
1590 | } | |
1591 | ||
1592 | static int icm_reset_phy_port(struct tb *tb, int phy_port) | |
1593 | { | |
1594 | struct icm *icm = tb_priv(tb); | |
1595 | u32 state0, state1; | |
1596 | int port0, port1; | |
1597 | u32 val0, val1; | |
1598 | int ret; | |
1599 | ||
1600 | if (!icm->upstream_port) | |
1601 | return 0; | |
1602 | ||
1603 | if (phy_port) { | |
1604 | port0 = 3; | |
1605 | port1 = 4; | |
1606 | } else { | |
1607 | port0 = 1; | |
1608 | port1 = 2; | |
1609 | } | |
1610 | ||
1611 | /* | |
1612 | * Read link status of both null ports belonging to a single | |
1613 | * physical port. | |
1614 | */ | |
1615 | ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); | |
1616 | if (ret) | |
1617 | return ret; | |
1618 | ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); | |
1619 | if (ret) | |
1620 | return ret; | |
1621 | ||
1622 | state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK; | |
1623 | state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; | |
1624 | state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK; | |
1625 | state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; | |
1626 | ||
1627 | /* If they are both up we need to reset them now */ | |
1628 | if (state0 != TB_PORT_UP || state1 != TB_PORT_UP) | |
1629 | return 0; | |
1630 | ||
1631 | val0 |= PHY_PORT_CS1_LINK_DISABLE; | |
1632 | ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); | |
1633 | if (ret) | |
1634 | return ret; | |
1635 | ||
1636 | val1 |= PHY_PORT_CS1_LINK_DISABLE; | |
1637 | ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); | |
1638 | if (ret) | |
1639 | return ret; | |
1640 | ||
1641 | /* Wait a bit and then re-enable both ports */ | |
1642 | usleep_range(10, 100); | |
1643 | ||
1644 | ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); | |
1645 | if (ret) | |
1646 | return ret; | |
1647 | ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); | |
1648 | if (ret) | |
1649 | return ret; | |
1650 | ||
1651 | val0 &= ~PHY_PORT_CS1_LINK_DISABLE; | |
1652 | ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); | |
1653 | if (ret) | |
1654 | return ret; | |
1655 | ||
1656 | val1 &= ~PHY_PORT_CS1_LINK_DISABLE; | |
1657 | return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); | |
1658 | } | |
1659 | ||
1660 | static int icm_firmware_init(struct tb *tb) | |
1661 | { | |
1662 | struct icm *icm = tb_priv(tb); | |
1663 | struct tb_nhi *nhi = tb->nhi; | |
1664 | int ret; | |
1665 | ||
1666 | ret = icm_firmware_start(tb, nhi); | |
1667 | if (ret) { | |
1668 | dev_err(&nhi->pdev->dev, "could not start ICM firmware\n"); | |
1669 | return ret; | |
1670 | } | |
1671 | ||
1672 | if (icm->get_mode) { | |
1673 | ret = icm->get_mode(tb); | |
1674 | ||
1675 | switch (ret) { | |
e6b245cc MW |
1676 | case NHI_FW_SAFE_MODE: |
1677 | icm->safe_mode = true; | |
1678 | break; | |
1679 | ||
f67cf491 MW |
1680 | case NHI_FW_CM_MODE: |
1681 | /* Ask ICM to accept all Thunderbolt devices */ | |
1682 | nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); | |
1683 | break; | |
1684 | ||
1685 | default: | |
3e315e54 MW |
1686 | if (ret < 0) |
1687 | return ret; | |
1688 | ||
f67cf491 MW |
1689 | tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret); |
1690 | return -ENODEV; | |
1691 | } | |
1692 | } | |
1693 | ||
1694 | /* | |
1695 | * Reset both physical ports if there is anything connected to | |
1696 | * them already. | |
1697 | */ | |
1698 | ret = icm_reset_phy_port(tb, 0); | |
1699 | if (ret) | |
1700 | dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n"); | |
1701 | ret = icm_reset_phy_port(tb, 1); | |
1702 | if (ret) | |
1703 | dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n"); | |
1704 | ||
1705 | return 0; | |
1706 | } | |
1707 | ||
1708 | static int icm_driver_ready(struct tb *tb) | |
1709 | { | |
e6b245cc | 1710 | struct icm *icm = tb_priv(tb); |
f67cf491 MW |
1711 | int ret; |
1712 | ||
1713 | ret = icm_firmware_init(tb); | |
1714 | if (ret) | |
1715 | return ret; | |
1716 | ||
e6b245cc MW |
1717 | if (icm->safe_mode) { |
1718 | tb_info(tb, "Thunderbolt host controller is in safe mode.\n"); | |
1719 | tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n"); | |
1720 | tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n"); | |
1721 | return 0; | |
1722 | } | |
1723 | ||
2de98e05 MW |
1724 | ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl, |
1725 | &icm->rpm); | |
afa11ff5 MW |
1726 | if (ret) |
1727 | return ret; | |
1728 | ||
1729 | /* | |
1730 | * Make sure the number of supported preboot ACL matches what we | |
1731 | * expect or disable the whole feature. | |
1732 | */ | |
1733 | if (tb->nboot_acl > icm->max_boot_acl) | |
1734 | tb->nboot_acl = 0; | |
1735 | ||
1736 | return 0; | |
f67cf491 MW |
1737 | } |
1738 | ||
1739 | static int icm_suspend(struct tb *tb) | |
1740 | { | |
fa3f20f6 | 1741 | struct icm *icm = tb_priv(tb); |
a684c5b1 | 1742 | |
fa3f20f6 MW |
1743 | if (icm->save_devices) |
1744 | icm->save_devices(tb); | |
a684c5b1 | 1745 | |
fa3f20f6 | 1746 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); |
a684c5b1 | 1747 | return 0; |
f67cf491 MW |
1748 | } |
1749 | ||
1750 | /* | |
1751 | * Mark all switches (except root switch) below this one unplugged. ICM | |
1752 | * firmware will send us an updated list of switches after we have send | |
1753 | * it driver ready command. If a switch is not in that list it will be | |
1754 | * removed when we perform rescan. | |
1755 | */ | |
1756 | static void icm_unplug_children(struct tb_switch *sw) | |
1757 | { | |
1758 | unsigned int i; | |
1759 | ||
1760 | if (tb_route(sw)) | |
1761 | sw->is_unplugged = true; | |
1762 | ||
1763 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1764 | struct tb_port *port = &sw->ports[i]; | |
1765 | ||
1766 | if (tb_is_upstream_port(port)) | |
1767 | continue; | |
d1ff7024 MW |
1768 | if (port->xdomain) { |
1769 | port->xdomain->is_unplugged = true; | |
1770 | continue; | |
1771 | } | |
f67cf491 MW |
1772 | if (!port->remote) |
1773 | continue; | |
1774 | ||
1775 | icm_unplug_children(port->remote->sw); | |
1776 | } | |
1777 | } | |
1778 | ||
1779 | static void icm_free_unplugged_children(struct tb_switch *sw) | |
1780 | { | |
1781 | unsigned int i; | |
1782 | ||
1783 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1784 | struct tb_port *port = &sw->ports[i]; | |
1785 | ||
1786 | if (tb_is_upstream_port(port)) | |
1787 | continue; | |
d1ff7024 MW |
1788 | |
1789 | if (port->xdomain && port->xdomain->is_unplugged) { | |
1790 | tb_xdomain_remove(port->xdomain); | |
1791 | port->xdomain = NULL; | |
1792 | continue; | |
1793 | } | |
1794 | ||
f67cf491 MW |
1795 | if (!port->remote) |
1796 | continue; | |
1797 | ||
1798 | if (port->remote->sw->is_unplugged) { | |
1799 | tb_switch_remove(port->remote->sw); | |
1800 | port->remote = NULL; | |
1801 | } else { | |
1802 | icm_free_unplugged_children(port->remote->sw); | |
1803 | } | |
1804 | } | |
1805 | } | |
1806 | ||
1807 | static void icm_rescan_work(struct work_struct *work) | |
1808 | { | |
1809 | struct icm *icm = container_of(work, struct icm, rescan_work.work); | |
1810 | struct tb *tb = icm_to_tb(icm); | |
1811 | ||
1812 | mutex_lock(&tb->lock); | |
1813 | if (tb->root_switch) | |
1814 | icm_free_unplugged_children(tb->root_switch); | |
1815 | mutex_unlock(&tb->lock); | |
1816 | } | |
1817 | ||
1818 | static void icm_complete(struct tb *tb) | |
1819 | { | |
1820 | struct icm *icm = tb_priv(tb); | |
1821 | ||
1822 | if (tb->nhi->going_away) | |
1823 | return; | |
1824 | ||
1825 | icm_unplug_children(tb->root_switch); | |
1826 | ||
1827 | /* | |
1828 | * Now all existing children should be resumed, start events | |
1829 | * from ICM to get updated status. | |
1830 | */ | |
2de98e05 | 1831 | __icm_driver_ready(tb, NULL, NULL, NULL); |
f67cf491 MW |
1832 | |
1833 | /* | |
1834 | * We do not get notifications of devices that have been | |
1835 | * unplugged during suspend so schedule rescan to clean them up | |
1836 | * if any. | |
1837 | */ | |
1838 | queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500)); | |
1839 | } | |
1840 | ||
2de98e05 MW |
1841 | static int icm_runtime_suspend(struct tb *tb) |
1842 | { | |
1843 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); | |
1844 | return 0; | |
1845 | } | |
1846 | ||
1847 | static int icm_runtime_resume(struct tb *tb) | |
1848 | { | |
1849 | /* | |
1850 | * We can reuse the same resume functionality than with system | |
1851 | * suspend. | |
1852 | */ | |
1853 | icm_complete(tb); | |
1854 | return 0; | |
1855 | } | |
1856 | ||
f67cf491 MW |
1857 | static int icm_start(struct tb *tb) |
1858 | { | |
e6b245cc | 1859 | struct icm *icm = tb_priv(tb); |
f67cf491 MW |
1860 | int ret; |
1861 | ||
e6b245cc MW |
1862 | if (icm->safe_mode) |
1863 | tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); | |
1864 | else | |
1865 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); | |
f67cf491 MW |
1866 | if (!tb->root_switch) |
1867 | return -ENODEV; | |
1868 | ||
e6b245cc MW |
1869 | /* |
1870 | * NVM upgrade has not been tested on Apple systems and they | |
1871 | * don't provide images publicly either. To be on the safe side | |
1872 | * prevent root switch NVM upgrade on Macs for now. | |
1873 | */ | |
630b3aff | 1874 | tb->root_switch->no_nvm_upgrade = x86_apple_machine; |
2de98e05 | 1875 | tb->root_switch->rpm = icm->rpm; |
e6b245cc | 1876 | |
f67cf491 | 1877 | ret = tb_switch_add(tb->root_switch); |
d1ff7024 | 1878 | if (ret) { |
f67cf491 | 1879 | tb_switch_put(tb->root_switch); |
d1ff7024 MW |
1880 | tb->root_switch = NULL; |
1881 | } | |
f67cf491 MW |
1882 | |
1883 | return ret; | |
1884 | } | |
1885 | ||
1886 | static void icm_stop(struct tb *tb) | |
1887 | { | |
1888 | struct icm *icm = tb_priv(tb); | |
1889 | ||
1890 | cancel_delayed_work(&icm->rescan_work); | |
1891 | tb_switch_remove(tb->root_switch); | |
1892 | tb->root_switch = NULL; | |
1893 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); | |
1894 | } | |
1895 | ||
e6b245cc MW |
1896 | static int icm_disconnect_pcie_paths(struct tb *tb) |
1897 | { | |
1898 | return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); | |
1899 | } | |
1900 | ||
afa11ff5 | 1901 | /* Falcon Ridge */ |
f67cf491 MW |
1902 | static const struct tb_cm_ops icm_fr_ops = { |
1903 | .driver_ready = icm_driver_ready, | |
1904 | .start = icm_start, | |
1905 | .stop = icm_stop, | |
1906 | .suspend = icm_suspend, | |
1907 | .complete = icm_complete, | |
1908 | .handle_event = icm_handle_event, | |
1909 | .approve_switch = icm_fr_approve_switch, | |
1910 | .add_switch_key = icm_fr_add_switch_key, | |
1911 | .challenge_switch_key = icm_fr_challenge_switch_key, | |
e6b245cc | 1912 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, |
d1ff7024 MW |
1913 | .approve_xdomain_paths = icm_fr_approve_xdomain_paths, |
1914 | .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, | |
f67cf491 MW |
1915 | }; |
1916 | ||
afa11ff5 MW |
1917 | /* Alpine Ridge */ |
1918 | static const struct tb_cm_ops icm_ar_ops = { | |
1919 | .driver_ready = icm_driver_ready, | |
1920 | .start = icm_start, | |
1921 | .stop = icm_stop, | |
1922 | .suspend = icm_suspend, | |
1923 | .complete = icm_complete, | |
2de98e05 MW |
1924 | .runtime_suspend = icm_runtime_suspend, |
1925 | .runtime_resume = icm_runtime_resume, | |
afa11ff5 MW |
1926 | .handle_event = icm_handle_event, |
1927 | .get_boot_acl = icm_ar_get_boot_acl, | |
1928 | .set_boot_acl = icm_ar_set_boot_acl, | |
1929 | .approve_switch = icm_fr_approve_switch, | |
1930 | .add_switch_key = icm_fr_add_switch_key, | |
1931 | .challenge_switch_key = icm_fr_challenge_switch_key, | |
1932 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, | |
1933 | .approve_xdomain_paths = icm_fr_approve_xdomain_paths, | |
1934 | .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, | |
1935 | }; | |
1936 | ||
d1d8b263 RM |
1937 | /* Titan Ridge */ |
1938 | static const struct tb_cm_ops icm_tr_ops = { | |
1939 | .driver_ready = icm_driver_ready, | |
1940 | .start = icm_start, | |
1941 | .stop = icm_stop, | |
1942 | .suspend = icm_suspend, | |
1943 | .complete = icm_complete, | |
2de98e05 MW |
1944 | .runtime_suspend = icm_runtime_suspend, |
1945 | .runtime_resume = icm_runtime_resume, | |
d1d8b263 RM |
1946 | .handle_event = icm_handle_event, |
1947 | .get_boot_acl = icm_ar_get_boot_acl, | |
1948 | .set_boot_acl = icm_ar_set_boot_acl, | |
1949 | .approve_switch = icm_tr_approve_switch, | |
1950 | .add_switch_key = icm_tr_add_switch_key, | |
1951 | .challenge_switch_key = icm_tr_challenge_switch_key, | |
1952 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, | |
1953 | .approve_xdomain_paths = icm_tr_approve_xdomain_paths, | |
1954 | .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, | |
1955 | }; | |
1956 | ||
f67cf491 MW |
1957 | struct tb *icm_probe(struct tb_nhi *nhi) |
1958 | { | |
1959 | struct icm *icm; | |
1960 | struct tb *tb; | |
1961 | ||
1962 | tb = tb_domain_alloc(nhi, sizeof(struct icm)); | |
1963 | if (!tb) | |
1964 | return NULL; | |
1965 | ||
1966 | icm = tb_priv(tb); | |
1967 | INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work); | |
1968 | mutex_init(&icm->request_lock); | |
1969 | ||
1970 | switch (nhi->pdev->device) { | |
1971 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: | |
1972 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: | |
1973 | icm->is_supported = icm_fr_is_supported; | |
1974 | icm->get_route = icm_fr_get_route; | |
fa3f20f6 | 1975 | icm->save_devices = icm_fr_save_devices; |
c1c79d65 | 1976 | icm->driver_ready = icm_fr_driver_ready; |
f67cf491 MW |
1977 | icm->device_connected = icm_fr_device_connected; |
1978 | icm->device_disconnected = icm_fr_device_disconnected; | |
d1ff7024 MW |
1979 | icm->xdomain_connected = icm_fr_xdomain_connected; |
1980 | icm->xdomain_disconnected = icm_fr_xdomain_disconnected; | |
f67cf491 MW |
1981 | tb->cm_ops = &icm_fr_ops; |
1982 | break; | |
1983 | ||
1984 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI: | |
1985 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI: | |
1986 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI: | |
1987 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: | |
1988 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: | |
afa11ff5 | 1989 | icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; |
f67cf491 MW |
1990 | icm->is_supported = icm_ar_is_supported; |
1991 | icm->get_mode = icm_ar_get_mode; | |
1992 | icm->get_route = icm_ar_get_route; | |
fa3f20f6 | 1993 | icm->save_devices = icm_fr_save_devices; |
afa11ff5 | 1994 | icm->driver_ready = icm_ar_driver_ready; |
f67cf491 MW |
1995 | icm->device_connected = icm_fr_device_connected; |
1996 | icm->device_disconnected = icm_fr_device_disconnected; | |
d1ff7024 MW |
1997 | icm->xdomain_connected = icm_fr_xdomain_connected; |
1998 | icm->xdomain_disconnected = icm_fr_xdomain_disconnected; | |
afa11ff5 | 1999 | tb->cm_ops = &icm_ar_ops; |
f67cf491 | 2000 | break; |
d1d8b263 RM |
2001 | |
2002 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: | |
2003 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: | |
2004 | icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; | |
2005 | icm->is_supported = icm_ar_is_supported; | |
2006 | icm->get_mode = icm_ar_get_mode; | |
2007 | icm->driver_ready = icm_tr_driver_ready; | |
2008 | icm->device_connected = icm_tr_device_connected; | |
2009 | icm->device_disconnected = icm_tr_device_disconnected; | |
2010 | icm->xdomain_connected = icm_tr_xdomain_connected; | |
2011 | icm->xdomain_disconnected = icm_tr_xdomain_disconnected; | |
2012 | tb->cm_ops = &icm_tr_ops; | |
2013 | break; | |
f67cf491 MW |
2014 | } |
2015 | ||
2016 | if (!icm->is_supported || !icm->is_supported(tb)) { | |
2017 | dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n"); | |
2018 | tb_domain_put(tb); | |
2019 | return NULL; | |
2020 | } | |
2021 | ||
2022 | return tb; | |
2023 | } |