]>
Commit | Line | Data |
---|---|---|
d6cc51cd AN |
1 | /* |
2 | * Thunderbolt Cactus Ridge driver - bus logic (NHI independent) | |
3 | * | |
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | |
5 | */ | |
6 | ||
7 | #include <linux/slab.h> | |
8 | #include <linux/errno.h> | |
9 | #include <linux/delay.h> | |
630b3aff | 10 | #include <linux/platform_data/x86/apple.h> |
d6cc51cd AN |
11 | |
12 | #include "tb.h" | |
7adf6097 | 13 | #include "tb_regs.h" |
3364f0c1 | 14 | #include "tunnel_pci.h" |
d6cc51cd | 15 | |
9d3cce0b MW |
16 | /** |
17 | * struct tb_cm - Simple Thunderbolt connection manager | |
18 | * @tunnel_list: List of active tunnels | |
19 | * @hotplug_active: tb_handle_hotplug will stop progressing plug | |
20 | * events and exit if this is not set (it needs to | |
21 | * acquire the lock one more time). Used to drain wq | |
22 | * after cfg has been paused. | |
23 | */ | |
24 | struct tb_cm { | |
25 | struct list_head tunnel_list; | |
26 | bool hotplug_active; | |
27 | }; | |
9da672a4 AN |
28 | |
29 | /* enumeration & hot plug handling */ | |
30 | ||
31 | ||
32 | static void tb_scan_port(struct tb_port *port); | |
33 | ||
34 | /** | |
35 | * tb_scan_switch() - scan for and initialize downstream switches | |
36 | */ | |
37 | static void tb_scan_switch(struct tb_switch *sw) | |
38 | { | |
39 | int i; | |
40 | for (i = 1; i <= sw->config.max_port_number; i++) | |
41 | tb_scan_port(&sw->ports[i]); | |
42 | } | |
43 | ||
44 | /** | |
45 | * tb_scan_port() - check for and initialize switches below port | |
46 | */ | |
47 | static void tb_scan_port(struct tb_port *port) | |
48 | { | |
49 | struct tb_switch *sw; | |
50 | if (tb_is_upstream_port(port)) | |
51 | return; | |
52 | if (port->config.type != TB_TYPE_PORT) | |
53 | return; | |
343fcb8c AN |
54 | if (port->dual_link_port && port->link_nr) |
55 | return; /* | |
56 | * Downstream switch is reachable through two ports. | |
57 | * Only scan on the primary port (link_nr == 0). | |
58 | */ | |
9da672a4 AN |
59 | if (tb_wait_for_port(port, false) <= 0) |
60 | return; | |
61 | if (port->remote) { | |
62 | tb_port_WARN(port, "port already has a remote!\n"); | |
63 | return; | |
64 | } | |
bfe778ac MW |
65 | sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, |
66 | tb_downstream_route(port)); | |
9da672a4 AN |
67 | if (!sw) |
68 | return; | |
bfe778ac MW |
69 | |
70 | if (tb_switch_configure(sw)) { | |
71 | tb_switch_put(sw); | |
72 | return; | |
73 | } | |
74 | ||
f67cf491 MW |
75 | sw->authorized = true; |
76 | ||
bfe778ac MW |
77 | if (tb_switch_add(sw)) { |
78 | tb_switch_put(sw); | |
79 | return; | |
80 | } | |
81 | ||
9da672a4 AN |
82 | port->remote = tb_upstream_port(sw); |
83 | tb_upstream_port(sw)->remote = port; | |
84 | tb_scan_switch(sw); | |
85 | } | |
86 | ||
3364f0c1 AN |
87 | /** |
88 | * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away | |
89 | */ | |
90 | static void tb_free_invalid_tunnels(struct tb *tb) | |
91 | { | |
9d3cce0b | 92 | struct tb_cm *tcm = tb_priv(tb); |
3364f0c1 AN |
93 | struct tb_pci_tunnel *tunnel; |
94 | struct tb_pci_tunnel *n; | |
9d3cce0b MW |
95 | |
96 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { | |
3364f0c1 AN |
97 | if (tb_pci_is_invalid(tunnel)) { |
98 | tb_pci_deactivate(tunnel); | |
9d3cce0b | 99 | list_del(&tunnel->list); |
3364f0c1 AN |
100 | tb_pci_free(tunnel); |
101 | } | |
102 | } | |
103 | } | |
104 | ||
23dd5bb4 AN |
105 | /** |
106 | * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches | |
107 | */ | |
108 | static void tb_free_unplugged_children(struct tb_switch *sw) | |
109 | { | |
110 | int i; | |
111 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
112 | struct tb_port *port = &sw->ports[i]; | |
113 | if (tb_is_upstream_port(port)) | |
114 | continue; | |
115 | if (!port->remote) | |
116 | continue; | |
117 | if (port->remote->sw->is_unplugged) { | |
bfe778ac | 118 | tb_switch_remove(port->remote->sw); |
23dd5bb4 AN |
119 | port->remote = NULL; |
120 | } else { | |
121 | tb_free_unplugged_children(port->remote->sw); | |
122 | } | |
123 | } | |
124 | } | |
125 | ||
126 | ||
3364f0c1 AN |
127 | /** |
128 | * find_pci_up_port() - return the first PCIe up port on @sw or NULL | |
129 | */ | |
130 | static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw) | |
131 | { | |
132 | int i; | |
133 | for (i = 1; i <= sw->config.max_port_number; i++) | |
134 | if (sw->ports[i].config.type == TB_TYPE_PCIE_UP) | |
135 | return &sw->ports[i]; | |
136 | return NULL; | |
137 | } | |
138 | ||
139 | /** | |
140 | * find_unused_down_port() - return the first inactive PCIe down port on @sw | |
141 | */ | |
142 | static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw) | |
143 | { | |
144 | int i; | |
145 | int cap; | |
146 | int res; | |
147 | int data; | |
148 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
149 | if (tb_is_upstream_port(&sw->ports[i])) | |
150 | continue; | |
151 | if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN) | |
152 | continue; | |
da2da04b MW |
153 | cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP); |
154 | if (cap < 0) | |
3364f0c1 AN |
155 | continue; |
156 | res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1); | |
157 | if (res < 0) | |
158 | continue; | |
159 | if (data & 0x80000000) | |
160 | continue; | |
161 | return &sw->ports[i]; | |
162 | } | |
163 | return NULL; | |
164 | } | |
165 | ||
166 | /** | |
167 | * tb_activate_pcie_devices() - scan for and activate PCIe devices | |
168 | * | |
169 | * This method is somewhat ad hoc. For now it only supports one device | |
170 | * per port and only devices at depth 1. | |
171 | */ | |
172 | static void tb_activate_pcie_devices(struct tb *tb) | |
173 | { | |
174 | int i; | |
175 | int cap; | |
176 | u32 data; | |
177 | struct tb_switch *sw; | |
178 | struct tb_port *up_port; | |
179 | struct tb_port *down_port; | |
180 | struct tb_pci_tunnel *tunnel; | |
9d3cce0b MW |
181 | struct tb_cm *tcm = tb_priv(tb); |
182 | ||
3364f0c1 AN |
183 | /* scan for pcie devices at depth 1*/ |
184 | for (i = 1; i <= tb->root_switch->config.max_port_number; i++) { | |
185 | if (tb_is_upstream_port(&tb->root_switch->ports[i])) | |
186 | continue; | |
187 | if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT) | |
188 | continue; | |
189 | if (!tb->root_switch->ports[i].remote) | |
190 | continue; | |
191 | sw = tb->root_switch->ports[i].remote->sw; | |
192 | up_port = tb_find_pci_up_port(sw); | |
193 | if (!up_port) { | |
194 | tb_sw_info(sw, "no PCIe devices found, aborting\n"); | |
195 | continue; | |
196 | } | |
197 | ||
198 | /* check whether port is already activated */ | |
da2da04b MW |
199 | cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP); |
200 | if (cap < 0) | |
3364f0c1 AN |
201 | continue; |
202 | if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1)) | |
203 | continue; | |
204 | if (data & 0x80000000) { | |
205 | tb_port_info(up_port, | |
206 | "PCIe port already activated, aborting\n"); | |
207 | continue; | |
208 | } | |
209 | ||
210 | down_port = tb_find_unused_down_port(tb->root_switch); | |
211 | if (!down_port) { | |
212 | tb_port_info(up_port, | |
213 | "All PCIe down ports are occupied, aborting\n"); | |
214 | continue; | |
215 | } | |
216 | tunnel = tb_pci_alloc(tb, up_port, down_port); | |
217 | if (!tunnel) { | |
218 | tb_port_info(up_port, | |
219 | "PCIe tunnel allocation failed, aborting\n"); | |
220 | continue; | |
221 | } | |
222 | ||
223 | if (tb_pci_activate(tunnel)) { | |
224 | tb_port_info(up_port, | |
225 | "PCIe tunnel activation failed, aborting\n"); | |
226 | tb_pci_free(tunnel); | |
227 | } | |
228 | ||
9d3cce0b | 229 | list_add(&tunnel->list, &tcm->tunnel_list); |
3364f0c1 AN |
230 | } |
231 | } | |
9da672a4 | 232 | |
d6cc51cd AN |
233 | /* hotplug handling */ |
234 | ||
235 | struct tb_hotplug_event { | |
236 | struct work_struct work; | |
237 | struct tb *tb; | |
238 | u64 route; | |
239 | u8 port; | |
240 | bool unplug; | |
241 | }; | |
242 | ||
243 | /** | |
244 | * tb_handle_hotplug() - handle hotplug event | |
245 | * | |
246 | * Executes on tb->wq. | |
247 | */ | |
248 | static void tb_handle_hotplug(struct work_struct *work) | |
249 | { | |
250 | struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); | |
251 | struct tb *tb = ev->tb; | |
9d3cce0b | 252 | struct tb_cm *tcm = tb_priv(tb); |
053596d9 AN |
253 | struct tb_switch *sw; |
254 | struct tb_port *port; | |
d6cc51cd | 255 | mutex_lock(&tb->lock); |
9d3cce0b | 256 | if (!tcm->hotplug_active) |
d6cc51cd AN |
257 | goto out; /* during init, suspend or shutdown */ |
258 | ||
053596d9 AN |
259 | sw = get_switch_at_route(tb->root_switch, ev->route); |
260 | if (!sw) { | |
261 | tb_warn(tb, | |
262 | "hotplug event from non existent switch %llx:%x (unplug: %d)\n", | |
263 | ev->route, ev->port, ev->unplug); | |
264 | goto out; | |
265 | } | |
266 | if (ev->port > sw->config.max_port_number) { | |
267 | tb_warn(tb, | |
268 | "hotplug event from non existent port %llx:%x (unplug: %d)\n", | |
269 | ev->route, ev->port, ev->unplug); | |
270 | goto out; | |
271 | } | |
272 | port = &sw->ports[ev->port]; | |
273 | if (tb_is_upstream_port(port)) { | |
274 | tb_warn(tb, | |
275 | "hotplug event for upstream port %llx:%x (unplug: %d)\n", | |
276 | ev->route, ev->port, ev->unplug); | |
277 | goto out; | |
278 | } | |
279 | if (ev->unplug) { | |
280 | if (port->remote) { | |
281 | tb_port_info(port, "unplugged\n"); | |
aae20bb6 | 282 | tb_sw_set_unplugged(port->remote->sw); |
3364f0c1 | 283 | tb_free_invalid_tunnels(tb); |
bfe778ac | 284 | tb_switch_remove(port->remote->sw); |
053596d9 AN |
285 | port->remote = NULL; |
286 | } else { | |
287 | tb_port_info(port, | |
288 | "got unplug event for disconnected port, ignoring\n"); | |
289 | } | |
290 | } else if (port->remote) { | |
291 | tb_port_info(port, | |
292 | "got plug event for connected port, ignoring\n"); | |
293 | } else { | |
294 | tb_port_info(port, "hotplug: scanning\n"); | |
295 | tb_scan_port(port); | |
296 | if (!port->remote) { | |
297 | tb_port_info(port, "hotplug: no switch found\n"); | |
298 | } else if (port->remote->sw->config.depth > 1) { | |
299 | tb_sw_warn(port->remote->sw, | |
300 | "hotplug: chaining not supported\n"); | |
3364f0c1 AN |
301 | } else { |
302 | tb_sw_info(port->remote->sw, | |
303 | "hotplug: activating pcie devices\n"); | |
304 | tb_activate_pcie_devices(tb); | |
053596d9 AN |
305 | } |
306 | } | |
d6cc51cd AN |
307 | out: |
308 | mutex_unlock(&tb->lock); | |
309 | kfree(ev); | |
310 | } | |
311 | ||
312 | /** | |
313 | * tb_schedule_hotplug_handler() - callback function for the control channel | |
314 | * | |
315 | * Delegates to tb_handle_hotplug. | |
316 | */ | |
81a54b5e MW |
317 | static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, |
318 | const void *buf, size_t size) | |
d6cc51cd | 319 | { |
81a54b5e MW |
320 | const struct cfg_event_pkg *pkg = buf; |
321 | struct tb_hotplug_event *ev; | |
322 | u64 route; | |
323 | ||
324 | if (type != TB_CFG_PKG_EVENT) { | |
325 | tb_warn(tb, "unexpected event %#x, ignoring\n", type); | |
326 | return; | |
327 | } | |
328 | ||
329 | route = tb_cfg_get_route(&pkg->header); | |
330 | ||
331 | if (tb_cfg_error(tb->ctl, route, pkg->port, | |
332 | TB_CFG_ERROR_ACK_PLUG_EVENT)) { | |
333 | tb_warn(tb, "could not ack plug event on %llx:%x\n", route, | |
334 | pkg->port); | |
335 | } | |
336 | ||
337 | ev = kmalloc(sizeof(*ev), GFP_KERNEL); | |
d6cc51cd AN |
338 | if (!ev) |
339 | return; | |
340 | INIT_WORK(&ev->work, tb_handle_hotplug); | |
341 | ev->tb = tb; | |
342 | ev->route = route; | |
81a54b5e MW |
343 | ev->port = pkg->port; |
344 | ev->unplug = pkg->unplug; | |
d6cc51cd AN |
345 | queue_work(tb->wq, &ev->work); |
346 | } | |
347 | ||
9d3cce0b | 348 | static void tb_stop(struct tb *tb) |
d6cc51cd | 349 | { |
9d3cce0b | 350 | struct tb_cm *tcm = tb_priv(tb); |
3364f0c1 AN |
351 | struct tb_pci_tunnel *tunnel; |
352 | struct tb_pci_tunnel *n; | |
353 | ||
3364f0c1 | 354 | /* tunnels are only present after everything has been initialized */ |
9d3cce0b | 355 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { |
3364f0c1 AN |
356 | tb_pci_deactivate(tunnel); |
357 | tb_pci_free(tunnel); | |
358 | } | |
bfe778ac | 359 | tb_switch_remove(tb->root_switch); |
9d3cce0b | 360 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
d6cc51cd AN |
361 | } |
362 | ||
9d3cce0b | 363 | static int tb_start(struct tb *tb) |
d6cc51cd | 364 | { |
9d3cce0b | 365 | struct tb_cm *tcm = tb_priv(tb); |
bfe778ac | 366 | int ret; |
d6cc51cd | 367 | |
bfe778ac | 368 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); |
a25c8b2f | 369 | if (!tb->root_switch) |
9d3cce0b | 370 | return -ENOMEM; |
a25c8b2f | 371 | |
e6b245cc MW |
372 | /* |
373 | * ICM firmware upgrade needs running firmware and in native | |
374 | * mode that is not available so disable firmware upgrade of the | |
375 | * root switch. | |
376 | */ | |
377 | tb->root_switch->no_nvm_upgrade = true; | |
378 | ||
bfe778ac MW |
379 | ret = tb_switch_configure(tb->root_switch); |
380 | if (ret) { | |
381 | tb_switch_put(tb->root_switch); | |
382 | return ret; | |
383 | } | |
384 | ||
385 | /* Announce the switch to the world */ | |
386 | ret = tb_switch_add(tb->root_switch); | |
387 | if (ret) { | |
388 | tb_switch_put(tb->root_switch); | |
389 | return ret; | |
390 | } | |
391 | ||
9da672a4 AN |
392 | /* Full scan to discover devices added before the driver was loaded. */ |
393 | tb_scan_switch(tb->root_switch); | |
3364f0c1 | 394 | tb_activate_pcie_devices(tb); |
9da672a4 | 395 | |
d6cc51cd | 396 | /* Allow tb_handle_hotplug to progress events */ |
9d3cce0b MW |
397 | tcm->hotplug_active = true; |
398 | return 0; | |
d6cc51cd AN |
399 | } |
400 | ||
9d3cce0b | 401 | static int tb_suspend_noirq(struct tb *tb) |
23dd5bb4 | 402 | { |
9d3cce0b MW |
403 | struct tb_cm *tcm = tb_priv(tb); |
404 | ||
23dd5bb4 | 405 | tb_info(tb, "suspending...\n"); |
23dd5bb4 | 406 | tb_switch_suspend(tb->root_switch); |
9d3cce0b | 407 | tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ |
23dd5bb4 | 408 | tb_info(tb, "suspend finished\n"); |
9d3cce0b MW |
409 | |
410 | return 0; | |
23dd5bb4 AN |
411 | } |
412 | ||
9d3cce0b | 413 | static int tb_resume_noirq(struct tb *tb) |
23dd5bb4 | 414 | { |
9d3cce0b | 415 | struct tb_cm *tcm = tb_priv(tb); |
23dd5bb4 | 416 | struct tb_pci_tunnel *tunnel, *n; |
9d3cce0b | 417 | |
23dd5bb4 | 418 | tb_info(tb, "resuming...\n"); |
23dd5bb4 AN |
419 | |
420 | /* remove any pci devices the firmware might have setup */ | |
421 | tb_switch_reset(tb, 0); | |
422 | ||
423 | tb_switch_resume(tb->root_switch); | |
424 | tb_free_invalid_tunnels(tb); | |
425 | tb_free_unplugged_children(tb->root_switch); | |
9d3cce0b | 426 | list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) |
23dd5bb4 | 427 | tb_pci_restart(tunnel); |
9d3cce0b | 428 | if (!list_empty(&tcm->tunnel_list)) { |
23dd5bb4 AN |
429 | /* |
430 | * the pcie links need some time to get going. | |
431 | * 100ms works for me... | |
432 | */ | |
433 | tb_info(tb, "tunnels restarted, sleeping for 100ms\n"); | |
434 | msleep(100); | |
435 | } | |
436 | /* Allow tb_handle_hotplug to progress events */ | |
9d3cce0b | 437 | tcm->hotplug_active = true; |
23dd5bb4 | 438 | tb_info(tb, "resume finished\n"); |
9d3cce0b MW |
439 | |
440 | return 0; | |
441 | } | |
442 | ||
443 | static const struct tb_cm_ops tb_cm_ops = { | |
444 | .start = tb_start, | |
445 | .stop = tb_stop, | |
446 | .suspend_noirq = tb_suspend_noirq, | |
447 | .resume_noirq = tb_resume_noirq, | |
81a54b5e | 448 | .handle_event = tb_handle_event, |
9d3cce0b MW |
449 | }; |
450 | ||
451 | struct tb *tb_probe(struct tb_nhi *nhi) | |
452 | { | |
453 | struct tb_cm *tcm; | |
454 | struct tb *tb; | |
455 | ||
630b3aff | 456 | if (!x86_apple_machine) |
f67cf491 MW |
457 | return NULL; |
458 | ||
9d3cce0b MW |
459 | tb = tb_domain_alloc(nhi, sizeof(*tcm)); |
460 | if (!tb) | |
461 | return NULL; | |
462 | ||
f67cf491 | 463 | tb->security_level = TB_SECURITY_NONE; |
9d3cce0b MW |
464 | tb->cm_ops = &tb_cm_ops; |
465 | ||
466 | tcm = tb_priv(tb); | |
467 | INIT_LIST_HEAD(&tcm->tunnel_list); | |
468 | ||
469 | return tb; | |
23dd5bb4 | 470 | } |