]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a25c8b2f AN |
2 | /* |
3 | * Thunderbolt Cactus Ridge driver - switch/port utility functions | |
4 | * | |
5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | |
6 | */ | |
7 | ||
8 | #include <linux/delay.h> | |
e6b245cc MW |
9 | #include <linux/idr.h> |
10 | #include <linux/nvmem-provider.h> | |
2de98e05 | 11 | #include <linux/pm_runtime.h> |
0413d617 | 12 | #include <linux/sched/signal.h> |
e6b245cc | 13 | #include <linux/sizes.h> |
10fefe56 | 14 | #include <linux/slab.h> |
e6b245cc | 15 | #include <linux/vmalloc.h> |
a25c8b2f AN |
16 | |
17 | #include "tb.h" | |
18 | ||
e6b245cc MW |
19 | /* Switch NVM support */ |
20 | ||
21 | #define NVM_DEVID 0x05 | |
22 | #define NVM_VERSION 0x08 | |
23 | #define NVM_CSS 0x10 | |
24 | #define NVM_FLASH_SIZE 0x45 | |
25 | ||
26 | #define NVM_MIN_SIZE SZ_32K | |
27 | #define NVM_MAX_SIZE SZ_512K | |
28 | ||
29 | static DEFINE_IDA(nvm_ida); | |
30 | ||
31 | struct nvm_auth_status { | |
32 | struct list_head list; | |
7c39ffe7 | 33 | uuid_t uuid; |
e6b245cc MW |
34 | u32 status; |
35 | }; | |
36 | ||
37 | /* | |
38 | * Hold NVM authentication failure status per switch This information | |
39 | * needs to stay around even when the switch gets power cycled so we | |
40 | * keep it separately. | |
41 | */ | |
42 | static LIST_HEAD(nvm_auth_status_cache); | |
43 | static DEFINE_MUTEX(nvm_auth_status_lock); | |
44 | ||
45 | static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) | |
46 | { | |
47 | struct nvm_auth_status *st; | |
48 | ||
49 | list_for_each_entry(st, &nvm_auth_status_cache, list) { | |
7c39ffe7 | 50 | if (uuid_equal(&st->uuid, sw->uuid)) |
e6b245cc MW |
51 | return st; |
52 | } | |
53 | ||
54 | return NULL; | |
55 | } | |
56 | ||
57 | static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) | |
58 | { | |
59 | struct nvm_auth_status *st; | |
60 | ||
61 | mutex_lock(&nvm_auth_status_lock); | |
62 | st = __nvm_get_auth_status(sw); | |
63 | mutex_unlock(&nvm_auth_status_lock); | |
64 | ||
65 | *status = st ? st->status : 0; | |
66 | } | |
67 | ||
68 | static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) | |
69 | { | |
70 | struct nvm_auth_status *st; | |
71 | ||
72 | if (WARN_ON(!sw->uuid)) | |
73 | return; | |
74 | ||
75 | mutex_lock(&nvm_auth_status_lock); | |
76 | st = __nvm_get_auth_status(sw); | |
77 | ||
78 | if (!st) { | |
79 | st = kzalloc(sizeof(*st), GFP_KERNEL); | |
80 | if (!st) | |
81 | goto unlock; | |
82 | ||
83 | memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); | |
84 | INIT_LIST_HEAD(&st->list); | |
85 | list_add_tail(&st->list, &nvm_auth_status_cache); | |
86 | } | |
87 | ||
88 | st->status = status; | |
89 | unlock: | |
90 | mutex_unlock(&nvm_auth_status_lock); | |
91 | } | |
92 | ||
93 | static void nvm_clear_auth_status(const struct tb_switch *sw) | |
94 | { | |
95 | struct nvm_auth_status *st; | |
96 | ||
97 | mutex_lock(&nvm_auth_status_lock); | |
98 | st = __nvm_get_auth_status(sw); | |
99 | if (st) { | |
100 | list_del(&st->list); | |
101 | kfree(st); | |
102 | } | |
103 | mutex_unlock(&nvm_auth_status_lock); | |
104 | } | |
105 | ||
106 | static int nvm_validate_and_write(struct tb_switch *sw) | |
107 | { | |
108 | unsigned int image_size, hdr_size; | |
109 | const u8 *buf = sw->nvm->buf; | |
110 | u16 ds_size; | |
111 | int ret; | |
112 | ||
113 | if (!buf) | |
114 | return -EINVAL; | |
115 | ||
116 | image_size = sw->nvm->buf_data_size; | |
117 | if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) | |
118 | return -EINVAL; | |
119 | ||
120 | /* | |
121 | * FARB pointer must point inside the image and must at least | |
122 | * contain parts of the digital section we will be reading here. | |
123 | */ | |
124 | hdr_size = (*(u32 *)buf) & 0xffffff; | |
125 | if (hdr_size + NVM_DEVID + 2 >= image_size) | |
126 | return -EINVAL; | |
127 | ||
128 | /* Digital section start should be aligned to 4k page */ | |
129 | if (!IS_ALIGNED(hdr_size, SZ_4K)) | |
130 | return -EINVAL; | |
131 | ||
132 | /* | |
133 | * Read digital section size and check that it also fits inside | |
134 | * the image. | |
135 | */ | |
136 | ds_size = *(u16 *)(buf + hdr_size); | |
137 | if (ds_size >= image_size) | |
138 | return -EINVAL; | |
139 | ||
140 | if (!sw->safe_mode) { | |
141 | u16 device_id; | |
142 | ||
143 | /* | |
144 | * Make sure the device ID in the image matches the one | |
145 | * we read from the switch config space. | |
146 | */ | |
147 | device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); | |
148 | if (device_id != sw->config.device_id) | |
149 | return -EINVAL; | |
150 | ||
151 | if (sw->generation < 3) { | |
152 | /* Write CSS headers first */ | |
153 | ret = dma_port_flash_write(sw->dma_port, | |
154 | DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, | |
155 | DMA_PORT_CSS_MAX_SIZE); | |
156 | if (ret) | |
157 | return ret; | |
158 | } | |
159 | ||
160 | /* Skip headers in the image */ | |
161 | buf += hdr_size; | |
162 | image_size -= hdr_size; | |
163 | } | |
164 | ||
165 | return dma_port_flash_write(sw->dma_port, 0, buf, image_size); | |
166 | } | |
167 | ||
168 | static int nvm_authenticate_host(struct tb_switch *sw) | |
169 | { | |
170 | int ret; | |
171 | ||
172 | /* | |
173 | * Root switch NVM upgrade requires that we disconnect the | |
d1ff7024 | 174 | * existing paths first (in case it is not in safe mode |
e6b245cc MW |
175 | * already). |
176 | */ | |
177 | if (!sw->safe_mode) { | |
d1ff7024 | 178 | ret = tb_domain_disconnect_all_paths(sw->tb); |
e6b245cc MW |
179 | if (ret) |
180 | return ret; | |
181 | /* | |
182 | * The host controller goes away pretty soon after this if | |
183 | * everything goes well so getting timeout is expected. | |
184 | */ | |
185 | ret = dma_port_flash_update_auth(sw->dma_port); | |
186 | return ret == -ETIMEDOUT ? 0 : ret; | |
187 | } | |
188 | ||
189 | /* | |
190 | * From safe mode we can get out by just power cycling the | |
191 | * switch. | |
192 | */ | |
193 | dma_port_power_cycle(sw->dma_port); | |
194 | return 0; | |
195 | } | |
196 | ||
197 | static int nvm_authenticate_device(struct tb_switch *sw) | |
198 | { | |
199 | int ret, retries = 10; | |
200 | ||
201 | ret = dma_port_flash_update_auth(sw->dma_port); | |
202 | if (ret && ret != -ETIMEDOUT) | |
203 | return ret; | |
204 | ||
205 | /* | |
206 | * Poll here for the authentication status. It takes some time | |
207 | * for the device to respond (we get timeout for a while). Once | |
208 | * we get response the device needs to be power cycled in order | |
209 | * to the new NVM to be taken into use. | |
210 | */ | |
211 | do { | |
212 | u32 status; | |
213 | ||
214 | ret = dma_port_flash_update_auth_status(sw->dma_port, &status); | |
215 | if (ret < 0 && ret != -ETIMEDOUT) | |
216 | return ret; | |
217 | if (ret > 0) { | |
218 | if (status) { | |
219 | tb_sw_warn(sw, "failed to authenticate NVM\n"); | |
220 | nvm_set_auth_status(sw, status); | |
221 | } | |
222 | ||
223 | tb_sw_info(sw, "power cycling the switch now\n"); | |
224 | dma_port_power_cycle(sw->dma_port); | |
225 | return 0; | |
226 | } | |
227 | ||
228 | msleep(500); | |
229 | } while (--retries); | |
230 | ||
231 | return -ETIMEDOUT; | |
232 | } | |
233 | ||
234 | static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, | |
235 | size_t bytes) | |
236 | { | |
237 | struct tb_switch *sw = priv; | |
2de98e05 MW |
238 | int ret; |
239 | ||
240 | pm_runtime_get_sync(&sw->dev); | |
241 | ret = dma_port_flash_read(sw->dma_port, offset, val, bytes); | |
242 | pm_runtime_mark_last_busy(&sw->dev); | |
243 | pm_runtime_put_autosuspend(&sw->dev); | |
e6b245cc | 244 | |
2de98e05 | 245 | return ret; |
e6b245cc MW |
246 | } |
247 | ||
248 | static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, | |
249 | size_t bytes) | |
250 | { | |
251 | struct tb_switch *sw = priv; | |
252 | int ret = 0; | |
253 | ||
0413d617 MW |
254 | if (!mutex_trylock(&sw->tb->lock)) |
255 | return restart_syscall(); | |
e6b245cc MW |
256 | |
257 | /* | |
258 | * Since writing the NVM image might require some special steps, | |
259 | * for example when CSS headers are written, we cache the image | |
260 | * locally here and handle the special cases when the user asks | |
261 | * us to authenticate the image. | |
262 | */ | |
263 | if (!sw->nvm->buf) { | |
264 | sw->nvm->buf = vmalloc(NVM_MAX_SIZE); | |
265 | if (!sw->nvm->buf) { | |
266 | ret = -ENOMEM; | |
267 | goto unlock; | |
268 | } | |
269 | } | |
270 | ||
271 | sw->nvm->buf_data_size = offset + bytes; | |
272 | memcpy(sw->nvm->buf + offset, val, bytes); | |
273 | ||
274 | unlock: | |
0413d617 | 275 | mutex_unlock(&sw->tb->lock); |
e6b245cc MW |
276 | |
277 | return ret; | |
278 | } | |
279 | ||
280 | static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, | |
281 | size_t size, bool active) | |
282 | { | |
283 | struct nvmem_config config; | |
284 | ||
285 | memset(&config, 0, sizeof(config)); | |
286 | ||
287 | if (active) { | |
288 | config.name = "nvm_active"; | |
289 | config.reg_read = tb_switch_nvm_read; | |
800161bd | 290 | config.read_only = true; |
e6b245cc MW |
291 | } else { |
292 | config.name = "nvm_non_active"; | |
293 | config.reg_write = tb_switch_nvm_write; | |
800161bd | 294 | config.root_only = true; |
e6b245cc MW |
295 | } |
296 | ||
297 | config.id = id; | |
298 | config.stride = 4; | |
299 | config.word_size = 4; | |
300 | config.size = size; | |
301 | config.dev = &sw->dev; | |
302 | config.owner = THIS_MODULE; | |
e6b245cc MW |
303 | config.priv = sw; |
304 | ||
305 | return nvmem_register(&config); | |
306 | } | |
307 | ||
308 | static int tb_switch_nvm_add(struct tb_switch *sw) | |
309 | { | |
310 | struct nvmem_device *nvm_dev; | |
311 | struct tb_switch_nvm *nvm; | |
312 | u32 val; | |
313 | int ret; | |
314 | ||
315 | if (!sw->dma_port) | |
316 | return 0; | |
317 | ||
318 | nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); | |
319 | if (!nvm) | |
320 | return -ENOMEM; | |
321 | ||
322 | nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); | |
323 | ||
324 | /* | |
325 | * If the switch is in safe-mode the only accessible portion of | |
326 | * the NVM is the non-active one where userspace is expected to | |
327 | * write new functional NVM. | |
328 | */ | |
329 | if (!sw->safe_mode) { | |
330 | u32 nvm_size, hdr_size; | |
331 | ||
332 | ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val, | |
333 | sizeof(val)); | |
334 | if (ret) | |
335 | goto err_ida; | |
336 | ||
337 | hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; | |
338 | nvm_size = (SZ_1M << (val & 7)) / 8; | |
339 | nvm_size = (nvm_size - hdr_size) / 2; | |
340 | ||
341 | ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val, | |
342 | sizeof(val)); | |
343 | if (ret) | |
344 | goto err_ida; | |
345 | ||
346 | nvm->major = val >> 16; | |
347 | nvm->minor = val >> 8; | |
348 | ||
349 | nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); | |
350 | if (IS_ERR(nvm_dev)) { | |
351 | ret = PTR_ERR(nvm_dev); | |
352 | goto err_ida; | |
353 | } | |
354 | nvm->active = nvm_dev; | |
355 | } | |
356 | ||
357 | nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); | |
358 | if (IS_ERR(nvm_dev)) { | |
359 | ret = PTR_ERR(nvm_dev); | |
360 | goto err_nvm_active; | |
361 | } | |
362 | nvm->non_active = nvm_dev; | |
363 | ||
e6b245cc | 364 | sw->nvm = nvm; |
e6b245cc MW |
365 | return 0; |
366 | ||
367 | err_nvm_active: | |
368 | if (nvm->active) | |
369 | nvmem_unregister(nvm->active); | |
370 | err_ida: | |
371 | ida_simple_remove(&nvm_ida, nvm->id); | |
372 | kfree(nvm); | |
373 | ||
374 | return ret; | |
375 | } | |
376 | ||
377 | static void tb_switch_nvm_remove(struct tb_switch *sw) | |
378 | { | |
379 | struct tb_switch_nvm *nvm; | |
380 | ||
e6b245cc MW |
381 | nvm = sw->nvm; |
382 | sw->nvm = NULL; | |
e6b245cc MW |
383 | |
384 | if (!nvm) | |
385 | return; | |
386 | ||
387 | /* Remove authentication status in case the switch is unplugged */ | |
388 | if (!nvm->authenticating) | |
389 | nvm_clear_auth_status(sw); | |
390 | ||
391 | nvmem_unregister(nvm->non_active); | |
392 | if (nvm->active) | |
393 | nvmem_unregister(nvm->active); | |
394 | ida_simple_remove(&nvm_ida, nvm->id); | |
395 | vfree(nvm->buf); | |
396 | kfree(nvm); | |
397 | } | |
398 | ||
a25c8b2f AN |
399 | /* port utility functions */ |
400 | ||
401 | static const char *tb_port_type(struct tb_regs_port_header *port) | |
402 | { | |
403 | switch (port->type >> 16) { | |
404 | case 0: | |
405 | switch ((u8) port->type) { | |
406 | case 0: | |
407 | return "Inactive"; | |
408 | case 1: | |
409 | return "Port"; | |
410 | case 2: | |
411 | return "NHI"; | |
412 | default: | |
413 | return "unknown"; | |
414 | } | |
415 | case 0x2: | |
416 | return "Ethernet"; | |
417 | case 0x8: | |
418 | return "SATA"; | |
419 | case 0xe: | |
420 | return "DP/HDMI"; | |
421 | case 0x10: | |
422 | return "PCIe"; | |
423 | case 0x20: | |
424 | return "USB"; | |
425 | default: | |
426 | return "unknown"; | |
427 | } | |
428 | } | |
429 | ||
430 | static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) | |
431 | { | |
432 | tb_info(tb, | |
433 | " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", | |
434 | port->port_number, port->vendor_id, port->device_id, | |
435 | port->revision, port->thunderbolt_version, tb_port_type(port), | |
436 | port->type); | |
437 | tb_info(tb, " Max hop id (in/out): %d/%d\n", | |
438 | port->max_in_hop_id, port->max_out_hop_id); | |
439 | tb_info(tb, " Max counters: %d\n", port->max_counters); | |
440 | tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits); | |
441 | } | |
442 | ||
9da672a4 AN |
443 | /** |
444 | * tb_port_state() - get connectedness state of a port | |
445 | * | |
446 | * The port must have a TB_CAP_PHY (i.e. it should be a real port). | |
447 | * | |
448 | * Return: Returns an enum tb_port_state on success or an error code on failure. | |
449 | */ | |
450 | static int tb_port_state(struct tb_port *port) | |
451 | { | |
452 | struct tb_cap_phy phy; | |
453 | int res; | |
454 | if (port->cap_phy == 0) { | |
455 | tb_port_WARN(port, "does not have a PHY\n"); | |
456 | return -EINVAL; | |
457 | } | |
458 | res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); | |
459 | if (res) | |
460 | return res; | |
461 | return phy.state; | |
462 | } | |
463 | ||
464 | /** | |
465 | * tb_wait_for_port() - wait for a port to become ready | |
466 | * | |
467 | * Wait up to 1 second for a port to reach state TB_PORT_UP. If | |
468 | * wait_if_unplugged is set then we also wait if the port is in state | |
469 | * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after | |
470 | * switch resume). Otherwise we only wait if a device is registered but the link | |
471 | * has not yet been established. | |
472 | * | |
473 | * Return: Returns an error code on failure. Returns 0 if the port is not | |
474 | * connected or failed to reach state TB_PORT_UP within one second. Returns 1 | |
475 | * if the port is connected and in state TB_PORT_UP. | |
476 | */ | |
477 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) | |
478 | { | |
479 | int retries = 10; | |
480 | int state; | |
481 | if (!port->cap_phy) { | |
482 | tb_port_WARN(port, "does not have PHY\n"); | |
483 | return -EINVAL; | |
484 | } | |
485 | if (tb_is_upstream_port(port)) { | |
486 | tb_port_WARN(port, "is the upstream port\n"); | |
487 | return -EINVAL; | |
488 | } | |
489 | ||
490 | while (retries--) { | |
491 | state = tb_port_state(port); | |
492 | if (state < 0) | |
493 | return state; | |
494 | if (state == TB_PORT_DISABLED) { | |
495 | tb_port_info(port, "is disabled (state: 0)\n"); | |
496 | return 0; | |
497 | } | |
498 | if (state == TB_PORT_UNPLUGGED) { | |
499 | if (wait_if_unplugged) { | |
500 | /* used during resume */ | |
501 | tb_port_info(port, | |
502 | "is unplugged (state: 7), retrying...\n"); | |
503 | msleep(100); | |
504 | continue; | |
505 | } | |
506 | tb_port_info(port, "is unplugged (state: 7)\n"); | |
507 | return 0; | |
508 | } | |
509 | if (state == TB_PORT_UP) { | |
510 | tb_port_info(port, | |
511 | "is connected, link is up (state: 2)\n"); | |
512 | return 1; | |
513 | } | |
514 | ||
515 | /* | |
516 | * After plug-in the state is TB_PORT_CONNECTING. Give it some | |
517 | * time. | |
518 | */ | |
519 | tb_port_info(port, | |
520 | "is connected, link is not up (state: %d), retrying...\n", | |
521 | state); | |
522 | msleep(100); | |
523 | } | |
524 | tb_port_warn(port, | |
525 | "failed to reach state TB_PORT_UP. Ignoring port...\n"); | |
526 | return 0; | |
527 | } | |
528 | ||
520b6702 AN |
529 | /** |
530 | * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port | |
531 | * | |
532 | * Change the number of NFC credits allocated to @port by @credits. To remove | |
533 | * NFC credits pass a negative amount of credits. | |
534 | * | |
535 | * Return: Returns 0 on success or an error code on failure. | |
536 | */ | |
537 | int tb_port_add_nfc_credits(struct tb_port *port, int credits) | |
538 | { | |
539 | if (credits == 0) | |
540 | return 0; | |
541 | tb_port_info(port, | |
542 | "adding %#x NFC credits (%#x -> %#x)", | |
543 | credits, | |
544 | port->config.nfc_credits, | |
545 | port->config.nfc_credits + credits); | |
546 | port->config.nfc_credits += credits; | |
547 | return tb_port_write(port, &port->config.nfc_credits, | |
548 | TB_CFG_PORT, 4, 1); | |
549 | } | |
550 | ||
551 | /** | |
552 | * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER | |
553 | * | |
554 | * Return: Returns 0 on success or an error code on failure. | |
555 | */ | |
556 | int tb_port_clear_counter(struct tb_port *port, int counter) | |
557 | { | |
558 | u32 zero[3] = { 0, 0, 0 }; | |
559 | tb_port_info(port, "clearing counter %d\n", counter); | |
560 | return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); | |
561 | } | |
562 | ||
a25c8b2f AN |
563 | /** |
564 | * tb_init_port() - initialize a port | |
565 | * | |
566 | * This is a helper method for tb_switch_alloc. Does not check or initialize | |
567 | * any downstream switches. | |
568 | * | |
569 | * Return: Returns 0 on success or an error code on failure. | |
570 | */ | |
343fcb8c | 571 | static int tb_init_port(struct tb_port *port) |
a25c8b2f AN |
572 | { |
573 | int res; | |
9da672a4 | 574 | int cap; |
343fcb8c | 575 | |
a25c8b2f AN |
576 | res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); |
577 | if (res) | |
578 | return res; | |
579 | ||
9da672a4 | 580 | /* Port 0 is the switch itself and has no PHY. */ |
343fcb8c | 581 | if (port->config.type == TB_TYPE_PORT && port->port != 0) { |
da2da04b | 582 | cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); |
9da672a4 AN |
583 | |
584 | if (cap > 0) | |
585 | port->cap_phy = cap; | |
586 | else | |
587 | tb_port_WARN(port, "non switch port without a PHY\n"); | |
588 | } | |
589 | ||
343fcb8c | 590 | tb_dump_port(port->sw->tb, &port->config); |
a25c8b2f AN |
591 | |
592 | /* TODO: Read dual link port, DP port and more from EEPROM. */ | |
593 | return 0; | |
594 | ||
595 | } | |
596 | ||
597 | /* switch utility functions */ | |
598 | ||
599 | static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) | |
600 | { | |
601 | tb_info(tb, | |
602 | " Switch: %x:%x (Revision: %d, TB Version: %d)\n", | |
603 | sw->vendor_id, sw->device_id, sw->revision, | |
604 | sw->thunderbolt_version); | |
605 | tb_info(tb, " Max Port Number: %d\n", sw->max_port_number); | |
606 | tb_info(tb, " Config:\n"); | |
607 | tb_info(tb, | |
608 | " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", | |
609 | sw->upstream_port_number, sw->depth, | |
610 | (((u64) sw->route_hi) << 32) | sw->route_lo, | |
611 | sw->enabled, sw->plug_events_delay); | |
612 | tb_info(tb, | |
613 | " unknown1: %#x unknown4: %#x\n", | |
614 | sw->__unknown1, sw->__unknown4); | |
615 | } | |
616 | ||
23dd5bb4 AN |
617 | /** |
618 | * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET | |
619 | * | |
620 | * Return: Returns 0 on success or an error code on failure. | |
621 | */ | |
622 | int tb_switch_reset(struct tb *tb, u64 route) | |
623 | { | |
624 | struct tb_cfg_result res; | |
625 | struct tb_regs_switch_header header = { | |
626 | header.route_hi = route >> 32, | |
627 | header.route_lo = route, | |
628 | header.enabled = true, | |
629 | }; | |
630 | tb_info(tb, "resetting switch at %llx\n", route); | |
631 | res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, | |
632 | 0, 2, 2, 2); | |
633 | if (res.err) | |
634 | return res.err; | |
635 | res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); | |
636 | if (res.err > 0) | |
637 | return -EIO; | |
638 | return res.err; | |
639 | } | |
640 | ||
053596d9 AN |
641 | struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route) |
642 | { | |
643 | u8 next_port = route; /* | |
644 | * Routes use a stride of 8 bits, | |
645 | * eventhough a port index has 6 bits at most. | |
646 | * */ | |
647 | if (route == 0) | |
648 | return sw; | |
649 | if (next_port > sw->config.max_port_number) | |
c9c2deef | 650 | return NULL; |
053596d9 | 651 | if (tb_is_upstream_port(&sw->ports[next_port])) |
c9c2deef | 652 | return NULL; |
053596d9 | 653 | if (!sw->ports[next_port].remote) |
c9c2deef | 654 | return NULL; |
053596d9 AN |
655 | return get_switch_at_route(sw->ports[next_port].remote->sw, |
656 | route >> TB_ROUTE_SHIFT); | |
657 | } | |
658 | ||
ca389f71 AN |
659 | /** |
660 | * tb_plug_events_active() - enable/disable plug events on a switch | |
661 | * | |
662 | * Also configures a sane plug_events_delay of 255ms. | |
663 | * | |
664 | * Return: Returns 0 on success or an error code on failure. | |
665 | */ | |
666 | static int tb_plug_events_active(struct tb_switch *sw, bool active) | |
667 | { | |
668 | u32 data; | |
669 | int res; | |
670 | ||
bfe778ac MW |
671 | if (!sw->config.enabled) |
672 | return 0; | |
673 | ||
ca389f71 AN |
674 | sw->config.plug_events_delay = 0xff; |
675 | res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); | |
676 | if (res) | |
677 | return res; | |
678 | ||
679 | res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); | |
680 | if (res) | |
681 | return res; | |
682 | ||
683 | if (active) { | |
684 | data = data & 0xFFFFFF83; | |
685 | switch (sw->config.device_id) { | |
1d111406 LW |
686 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
687 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: | |
688 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: | |
ca389f71 AN |
689 | break; |
690 | default: | |
691 | data |= 4; | |
692 | } | |
693 | } else { | |
694 | data = data | 0x7c; | |
695 | } | |
696 | return tb_sw_write(sw, &data, TB_CFG_SWITCH, | |
697 | sw->cap_plug_events + 1, 1); | |
698 | } | |
699 | ||
f67cf491 MW |
700 | static ssize_t authorized_show(struct device *dev, |
701 | struct device_attribute *attr, | |
702 | char *buf) | |
703 | { | |
704 | struct tb_switch *sw = tb_to_switch(dev); | |
705 | ||
706 | return sprintf(buf, "%u\n", sw->authorized); | |
707 | } | |
708 | ||
709 | static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) | |
710 | { | |
711 | int ret = -EINVAL; | |
712 | ||
0413d617 MW |
713 | if (!mutex_trylock(&sw->tb->lock)) |
714 | return restart_syscall(); | |
f67cf491 MW |
715 | |
716 | if (sw->authorized) | |
717 | goto unlock; | |
718 | ||
88fed0cd MW |
719 | /* |
720 | * Make sure there is no PCIe rescan ongoing when a new PCIe | |
721 | * tunnel is created. Otherwise the PCIe rescan code might find | |
722 | * the new tunnel too early. | |
723 | */ | |
724 | pci_lock_rescan_remove(); | |
2de98e05 | 725 | pm_runtime_get_sync(&sw->dev); |
88fed0cd | 726 | |
f67cf491 MW |
727 | switch (val) { |
728 | /* Approve switch */ | |
729 | case 1: | |
730 | if (sw->key) | |
731 | ret = tb_domain_approve_switch_key(sw->tb, sw); | |
732 | else | |
733 | ret = tb_domain_approve_switch(sw->tb, sw); | |
734 | break; | |
735 | ||
736 | /* Challenge switch */ | |
737 | case 2: | |
738 | if (sw->key) | |
739 | ret = tb_domain_challenge_switch_key(sw->tb, sw); | |
740 | break; | |
741 | ||
742 | default: | |
743 | break; | |
744 | } | |
745 | ||
2de98e05 MW |
746 | pm_runtime_mark_last_busy(&sw->dev); |
747 | pm_runtime_put_autosuspend(&sw->dev); | |
88fed0cd MW |
748 | pci_unlock_rescan_remove(); |
749 | ||
f67cf491 MW |
750 | if (!ret) { |
751 | sw->authorized = val; | |
752 | /* Notify status change to the userspace */ | |
753 | kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); | |
754 | } | |
755 | ||
756 | unlock: | |
0413d617 | 757 | mutex_unlock(&sw->tb->lock); |
f67cf491 MW |
758 | return ret; |
759 | } | |
760 | ||
761 | static ssize_t authorized_store(struct device *dev, | |
762 | struct device_attribute *attr, | |
763 | const char *buf, size_t count) | |
764 | { | |
765 | struct tb_switch *sw = tb_to_switch(dev); | |
766 | unsigned int val; | |
767 | ssize_t ret; | |
768 | ||
769 | ret = kstrtouint(buf, 0, &val); | |
770 | if (ret) | |
771 | return ret; | |
772 | if (val > 2) | |
773 | return -EINVAL; | |
774 | ||
775 | ret = tb_switch_set_authorized(sw, val); | |
776 | ||
777 | return ret ? ret : count; | |
778 | } | |
779 | static DEVICE_ATTR_RW(authorized); | |
780 | ||
269f6def YB |
781 | static ssize_t boot_show(struct device *dev, struct device_attribute *attr, |
782 | char *buf) | |
783 | { | |
784 | struct tb_switch *sw = tb_to_switch(dev); | |
785 | ||
786 | return sprintf(buf, "%u\n", sw->boot); | |
787 | } | |
788 | static DEVICE_ATTR_RO(boot); | |
789 | ||
bfe778ac MW |
790 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
791 | char *buf) | |
792 | { | |
793 | struct tb_switch *sw = tb_to_switch(dev); | |
ca389f71 | 794 | |
bfe778ac MW |
795 | return sprintf(buf, "%#x\n", sw->device); |
796 | } | |
797 | static DEVICE_ATTR_RO(device); | |
798 | ||
72ee3390 MW |
799 | static ssize_t |
800 | device_name_show(struct device *dev, struct device_attribute *attr, char *buf) | |
801 | { | |
802 | struct tb_switch *sw = tb_to_switch(dev); | |
803 | ||
804 | return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); | |
805 | } | |
806 | static DEVICE_ATTR_RO(device_name); | |
807 | ||
f67cf491 MW |
808 | static ssize_t key_show(struct device *dev, struct device_attribute *attr, |
809 | char *buf) | |
810 | { | |
811 | struct tb_switch *sw = tb_to_switch(dev); | |
812 | ssize_t ret; | |
813 | ||
0413d617 MW |
814 | if (!mutex_trylock(&sw->tb->lock)) |
815 | return restart_syscall(); | |
f67cf491 MW |
816 | |
817 | if (sw->key) | |
818 | ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); | |
819 | else | |
820 | ret = sprintf(buf, "\n"); | |
821 | ||
0413d617 | 822 | mutex_unlock(&sw->tb->lock); |
f67cf491 MW |
823 | return ret; |
824 | } | |
825 | ||
826 | static ssize_t key_store(struct device *dev, struct device_attribute *attr, | |
827 | const char *buf, size_t count) | |
828 | { | |
829 | struct tb_switch *sw = tb_to_switch(dev); | |
830 | u8 key[TB_SWITCH_KEY_SIZE]; | |
831 | ssize_t ret = count; | |
e545f0d8 | 832 | bool clear = false; |
f67cf491 | 833 | |
e545f0d8 BY |
834 | if (!strcmp(buf, "\n")) |
835 | clear = true; | |
836 | else if (hex2bin(key, buf, sizeof(key))) | |
f67cf491 MW |
837 | return -EINVAL; |
838 | ||
0413d617 MW |
839 | if (!mutex_trylock(&sw->tb->lock)) |
840 | return restart_syscall(); | |
f67cf491 MW |
841 | |
842 | if (sw->authorized) { | |
843 | ret = -EBUSY; | |
844 | } else { | |
845 | kfree(sw->key); | |
e545f0d8 BY |
846 | if (clear) { |
847 | sw->key = NULL; | |
848 | } else { | |
849 | sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); | |
850 | if (!sw->key) | |
851 | ret = -ENOMEM; | |
852 | } | |
f67cf491 MW |
853 | } |
854 | ||
0413d617 | 855 | mutex_unlock(&sw->tb->lock); |
f67cf491 MW |
856 | return ret; |
857 | } | |
0956e411 | 858 | static DEVICE_ATTR(key, 0600, key_show, key_store); |
f67cf491 | 859 | |
44eaad53 MW |
860 | static void nvm_authenticate_start(struct tb_switch *sw) |
861 | { | |
862 | struct pci_dev *root_port; | |
863 | ||
864 | /* | |
865 | * During host router NVM upgrade we should not allow root port to | |
866 | * go into D3cold because some root ports cannot trigger PME | |
867 | * itself. To be on the safe side keep the root port in D0 during | |
868 | * the whole upgrade process. | |
869 | */ | |
870 | root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); | |
871 | if (root_port) | |
872 | pm_runtime_get_noresume(&root_port->dev); | |
873 | } | |
874 | ||
875 | static void nvm_authenticate_complete(struct tb_switch *sw) | |
876 | { | |
877 | struct pci_dev *root_port; | |
878 | ||
879 | root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); | |
880 | if (root_port) | |
881 | pm_runtime_put(&root_port->dev); | |
882 | } | |
883 | ||
e6b245cc MW |
884 | static ssize_t nvm_authenticate_show(struct device *dev, |
885 | struct device_attribute *attr, char *buf) | |
886 | { | |
887 | struct tb_switch *sw = tb_to_switch(dev); | |
888 | u32 status; | |
889 | ||
890 | nvm_get_auth_status(sw, &status); | |
891 | return sprintf(buf, "%#x\n", status); | |
892 | } | |
893 | ||
894 | static ssize_t nvm_authenticate_store(struct device *dev, | |
895 | struct device_attribute *attr, const char *buf, size_t count) | |
896 | { | |
897 | struct tb_switch *sw = tb_to_switch(dev); | |
898 | bool val; | |
899 | int ret; | |
900 | ||
0413d617 MW |
901 | if (!mutex_trylock(&sw->tb->lock)) |
902 | return restart_syscall(); | |
e6b245cc MW |
903 | |
904 | /* If NVMem devices are not yet added */ | |
905 | if (!sw->nvm) { | |
906 | ret = -EAGAIN; | |
907 | goto exit_unlock; | |
908 | } | |
909 | ||
910 | ret = kstrtobool(buf, &val); | |
911 | if (ret) | |
912 | goto exit_unlock; | |
913 | ||
914 | /* Always clear the authentication status */ | |
915 | nvm_clear_auth_status(sw); | |
916 | ||
917 | if (val) { | |
2de98e05 MW |
918 | if (!sw->nvm->buf) { |
919 | ret = -EINVAL; | |
920 | goto exit_unlock; | |
921 | } | |
922 | ||
923 | pm_runtime_get_sync(&sw->dev); | |
e6b245cc | 924 | ret = nvm_validate_and_write(sw); |
2de98e05 MW |
925 | if (ret) { |
926 | pm_runtime_mark_last_busy(&sw->dev); | |
927 | pm_runtime_put_autosuspend(&sw->dev); | |
e6b245cc | 928 | goto exit_unlock; |
2de98e05 | 929 | } |
e6b245cc MW |
930 | |
931 | sw->nvm->authenticating = true; | |
932 | ||
44eaad53 MW |
933 | if (!tb_route(sw)) { |
934 | /* | |
935 | * Keep root port from suspending as long as the | |
936 | * NVM upgrade process is running. | |
937 | */ | |
938 | nvm_authenticate_start(sw); | |
e6b245cc | 939 | ret = nvm_authenticate_host(sw); |
44eaad53 MW |
940 | if (ret) |
941 | nvm_authenticate_complete(sw); | |
942 | } else { | |
e6b245cc | 943 | ret = nvm_authenticate_device(sw); |
44eaad53 | 944 | } |
2de98e05 MW |
945 | pm_runtime_mark_last_busy(&sw->dev); |
946 | pm_runtime_put_autosuspend(&sw->dev); | |
e6b245cc MW |
947 | } |
948 | ||
949 | exit_unlock: | |
0413d617 | 950 | mutex_unlock(&sw->tb->lock); |
e6b245cc MW |
951 | |
952 | if (ret) | |
953 | return ret; | |
954 | return count; | |
955 | } | |
956 | static DEVICE_ATTR_RW(nvm_authenticate); | |
957 | ||
958 | static ssize_t nvm_version_show(struct device *dev, | |
959 | struct device_attribute *attr, char *buf) | |
960 | { | |
961 | struct tb_switch *sw = tb_to_switch(dev); | |
962 | int ret; | |
963 | ||
0413d617 MW |
964 | if (!mutex_trylock(&sw->tb->lock)) |
965 | return restart_syscall(); | |
e6b245cc MW |
966 | |
967 | if (sw->safe_mode) | |
968 | ret = -ENODATA; | |
969 | else if (!sw->nvm) | |
970 | ret = -EAGAIN; | |
971 | else | |
972 | ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); | |
973 | ||
0413d617 | 974 | mutex_unlock(&sw->tb->lock); |
e6b245cc MW |
975 | |
976 | return ret; | |
977 | } | |
978 | static DEVICE_ATTR_RO(nvm_version); | |
979 | ||
bfe778ac MW |
980 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
981 | char *buf) | |
a25c8b2f | 982 | { |
bfe778ac | 983 | struct tb_switch *sw = tb_to_switch(dev); |
a25c8b2f | 984 | |
bfe778ac MW |
985 | return sprintf(buf, "%#x\n", sw->vendor); |
986 | } | |
987 | static DEVICE_ATTR_RO(vendor); | |
988 | ||
72ee3390 MW |
989 | static ssize_t |
990 | vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) | |
991 | { | |
992 | struct tb_switch *sw = tb_to_switch(dev); | |
993 | ||
994 | return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); | |
995 | } | |
996 | static DEVICE_ATTR_RO(vendor_name); | |
997 | ||
bfe778ac MW |
998 | static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, |
999 | char *buf) | |
1000 | { | |
1001 | struct tb_switch *sw = tb_to_switch(dev); | |
1002 | ||
1003 | return sprintf(buf, "%pUb\n", sw->uuid); | |
1004 | } | |
1005 | static DEVICE_ATTR_RO(unique_id); | |
1006 | ||
1007 | static struct attribute *switch_attrs[] = { | |
f67cf491 | 1008 | &dev_attr_authorized.attr, |
269f6def | 1009 | &dev_attr_boot.attr, |
bfe778ac | 1010 | &dev_attr_device.attr, |
72ee3390 | 1011 | &dev_attr_device_name.attr, |
f67cf491 | 1012 | &dev_attr_key.attr, |
e6b245cc MW |
1013 | &dev_attr_nvm_authenticate.attr, |
1014 | &dev_attr_nvm_version.attr, | |
bfe778ac | 1015 | &dev_attr_vendor.attr, |
72ee3390 | 1016 | &dev_attr_vendor_name.attr, |
bfe778ac MW |
1017 | &dev_attr_unique_id.attr, |
1018 | NULL, | |
1019 | }; | |
1020 | ||
f67cf491 MW |
1021 | static umode_t switch_attr_is_visible(struct kobject *kobj, |
1022 | struct attribute *attr, int n) | |
1023 | { | |
1024 | struct device *dev = container_of(kobj, struct device, kobj); | |
1025 | struct tb_switch *sw = tb_to_switch(dev); | |
1026 | ||
1027 | if (attr == &dev_attr_key.attr) { | |
1028 | if (tb_route(sw) && | |
1029 | sw->tb->security_level == TB_SECURITY_SECURE && | |
1030 | sw->security_level == TB_SECURITY_SECURE) | |
1031 | return attr->mode; | |
1032 | return 0; | |
e6b245cc MW |
1033 | } else if (attr == &dev_attr_nvm_authenticate.attr || |
1034 | attr == &dev_attr_nvm_version.attr) { | |
1035 | if (sw->dma_port) | |
1036 | return attr->mode; | |
1037 | return 0; | |
269f6def YB |
1038 | } else if (attr == &dev_attr_boot.attr) { |
1039 | if (tb_route(sw)) | |
1040 | return attr->mode; | |
1041 | return 0; | |
f67cf491 MW |
1042 | } |
1043 | ||
e6b245cc | 1044 | return sw->safe_mode ? 0 : attr->mode; |
f67cf491 MW |
1045 | } |
1046 | ||
bfe778ac | 1047 | static struct attribute_group switch_group = { |
f67cf491 | 1048 | .is_visible = switch_attr_is_visible, |
bfe778ac MW |
1049 | .attrs = switch_attrs, |
1050 | }; | |
ca389f71 | 1051 | |
bfe778ac MW |
1052 | static const struct attribute_group *switch_groups[] = { |
1053 | &switch_group, | |
1054 | NULL, | |
1055 | }; | |
1056 | ||
1057 | static void tb_switch_release(struct device *dev) | |
1058 | { | |
1059 | struct tb_switch *sw = tb_to_switch(dev); | |
1060 | ||
3e136768 MW |
1061 | dma_port_free(sw->dma_port); |
1062 | ||
bfe778ac | 1063 | kfree(sw->uuid); |
72ee3390 MW |
1064 | kfree(sw->device_name); |
1065 | kfree(sw->vendor_name); | |
a25c8b2f | 1066 | kfree(sw->ports); |
343fcb8c | 1067 | kfree(sw->drom); |
f67cf491 | 1068 | kfree(sw->key); |
a25c8b2f AN |
1069 | kfree(sw); |
1070 | } | |
1071 | ||
2de98e05 MW |
1072 | /* |
1073 | * Currently only need to provide the callbacks. Everything else is handled | |
1074 | * in the connection manager. | |
1075 | */ | |
1076 | static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) | |
1077 | { | |
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | static int __maybe_unused tb_switch_runtime_resume(struct device *dev) | |
1082 | { | |
1083 | return 0; | |
1084 | } | |
1085 | ||
1086 | static const struct dev_pm_ops tb_switch_pm_ops = { | |
1087 | SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, | |
1088 | NULL) | |
1089 | }; | |
1090 | ||
bfe778ac MW |
1091 | struct device_type tb_switch_type = { |
1092 | .name = "thunderbolt_device", | |
1093 | .release = tb_switch_release, | |
2de98e05 | 1094 | .pm = &tb_switch_pm_ops, |
bfe778ac MW |
1095 | }; |
1096 | ||
2c3c4197 MW |
1097 | static int tb_switch_get_generation(struct tb_switch *sw) |
1098 | { | |
1099 | switch (sw->config.device_id) { | |
1100 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: | |
1101 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: | |
1102 | case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: | |
1103 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: | |
1104 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: | |
1105 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: | |
1106 | case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: | |
1107 | case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: | |
1108 | return 1; | |
1109 | ||
1110 | case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: | |
1111 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: | |
1112 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: | |
1113 | return 2; | |
1114 | ||
1115 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: | |
1116 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: | |
1117 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: | |
1118 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: | |
1119 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: | |
d1d8b263 RM |
1120 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: |
1121 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: | |
1122 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: | |
2c3c4197 MW |
1123 | return 3; |
1124 | ||
1125 | default: | |
1126 | /* | |
1127 | * For unknown switches assume generation to be 1 to be | |
1128 | * on the safe side. | |
1129 | */ | |
1130 | tb_sw_warn(sw, "unsupported switch device id %#x\n", | |
1131 | sw->config.device_id); | |
1132 | return 1; | |
1133 | } | |
1134 | } | |
1135 | ||
a25c8b2f | 1136 | /** |
bfe778ac MW |
1137 | * tb_switch_alloc() - allocate a switch |
1138 | * @tb: Pointer to the owning domain | |
1139 | * @parent: Parent device for this switch | |
1140 | * @route: Route string for this switch | |
a25c8b2f | 1141 | * |
bfe778ac MW |
1142 | * Allocates and initializes a switch. Will not upload configuration to |
1143 | * the switch. For that you need to call tb_switch_configure() | |
1144 | * separately. The returned switch should be released by calling | |
1145 | * tb_switch_put(). | |
1146 | * | |
1147 | * Return: Pointer to the allocated switch or %NULL in case of failure | |
a25c8b2f | 1148 | */ |
bfe778ac MW |
1149 | struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, |
1150 | u64 route) | |
a25c8b2f AN |
1151 | { |
1152 | int i; | |
ca389f71 | 1153 | int cap; |
a25c8b2f AN |
1154 | struct tb_switch *sw; |
1155 | int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); | |
1156 | if (upstream_port < 0) | |
1157 | return NULL; | |
1158 | ||
1159 | sw = kzalloc(sizeof(*sw), GFP_KERNEL); | |
1160 | if (!sw) | |
1161 | return NULL; | |
1162 | ||
1163 | sw->tb = tb; | |
aae20bb6 | 1164 | if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5)) |
bfe778ac MW |
1165 | goto err_free_sw_ports; |
1166 | ||
1167 | tb_info(tb, "current switch config:\n"); | |
a25c8b2f AN |
1168 | tb_dump_switch(tb, &sw->config); |
1169 | ||
1170 | /* configure switch */ | |
1171 | sw->config.upstream_port_number = upstream_port; | |
1172 | sw->config.depth = tb_route_length(route); | |
1173 | sw->config.route_lo = route; | |
1174 | sw->config.route_hi = route >> 32; | |
bfe778ac | 1175 | sw->config.enabled = 0; |
a25c8b2f AN |
1176 | |
1177 | /* initialize ports */ | |
1178 | sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), | |
343fcb8c | 1179 | GFP_KERNEL); |
a25c8b2f | 1180 | if (!sw->ports) |
bfe778ac | 1181 | goto err_free_sw_ports; |
a25c8b2f AN |
1182 | |
1183 | for (i = 0; i <= sw->config.max_port_number; i++) { | |
343fcb8c AN |
1184 | /* minimum setup for tb_find_cap and tb_drom_read to work */ |
1185 | sw->ports[i].sw = sw; | |
1186 | sw->ports[i].port = i; | |
a25c8b2f AN |
1187 | } |
1188 | ||
2c3c4197 MW |
1189 | sw->generation = tb_switch_get_generation(sw); |
1190 | ||
da2da04b | 1191 | cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); |
ca389f71 | 1192 | if (cap < 0) { |
da2da04b | 1193 | tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); |
bfe778ac | 1194 | goto err_free_sw_ports; |
ca389f71 AN |
1195 | } |
1196 | sw->cap_plug_events = cap; | |
1197 | ||
f67cf491 MW |
1198 | /* Root switch is always authorized */ |
1199 | if (!route) | |
1200 | sw->authorized = true; | |
1201 | ||
bfe778ac MW |
1202 | device_initialize(&sw->dev); |
1203 | sw->dev.parent = parent; | |
1204 | sw->dev.bus = &tb_bus_type; | |
1205 | sw->dev.type = &tb_switch_type; | |
1206 | sw->dev.groups = switch_groups; | |
1207 | dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); | |
1208 | ||
1209 | return sw; | |
1210 | ||
1211 | err_free_sw_ports: | |
1212 | kfree(sw->ports); | |
1213 | kfree(sw); | |
1214 | ||
1215 | return NULL; | |
1216 | } | |
1217 | ||
e6b245cc MW |
1218 | /** |
1219 | * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode | |
1220 | * @tb: Pointer to the owning domain | |
1221 | * @parent: Parent device for this switch | |
1222 | * @route: Route string for this switch | |
1223 | * | |
1224 | * This creates a switch in safe mode. This means the switch pretty much | |
1225 | * lacks all capabilities except DMA configuration port before it is | |
1226 | * flashed with a valid NVM firmware. | |
1227 | * | |
1228 | * The returned switch must be released by calling tb_switch_put(). | |
1229 | * | |
1230 | * Return: Pointer to the allocated switch or %NULL in case of failure | |
1231 | */ | |
1232 | struct tb_switch * | |
1233 | tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) | |
1234 | { | |
1235 | struct tb_switch *sw; | |
1236 | ||
1237 | sw = kzalloc(sizeof(*sw), GFP_KERNEL); | |
1238 | if (!sw) | |
1239 | return NULL; | |
1240 | ||
1241 | sw->tb = tb; | |
1242 | sw->config.depth = tb_route_length(route); | |
1243 | sw->config.route_hi = upper_32_bits(route); | |
1244 | sw->config.route_lo = lower_32_bits(route); | |
1245 | sw->safe_mode = true; | |
1246 | ||
1247 | device_initialize(&sw->dev); | |
1248 | sw->dev.parent = parent; | |
1249 | sw->dev.bus = &tb_bus_type; | |
1250 | sw->dev.type = &tb_switch_type; | |
1251 | sw->dev.groups = switch_groups; | |
1252 | dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); | |
1253 | ||
1254 | return sw; | |
1255 | } | |
1256 | ||
bfe778ac MW |
1257 | /** |
1258 | * tb_switch_configure() - Uploads configuration to the switch | |
1259 | * @sw: Switch to configure | |
1260 | * | |
1261 | * Call this function before the switch is added to the system. It will | |
1262 | * upload configuration to the switch and makes it available for the | |
1263 | * connection manager to use. | |
1264 | * | |
1265 | * Return: %0 in case of success and negative errno in case of failure | |
1266 | */ | |
1267 | int tb_switch_configure(struct tb_switch *sw) | |
1268 | { | |
1269 | struct tb *tb = sw->tb; | |
1270 | u64 route; | |
1271 | int ret; | |
1272 | ||
1273 | route = tb_route(sw); | |
1274 | tb_info(tb, | |
1275 | "initializing Switch at %#llx (depth: %d, up port: %d)\n", | |
1276 | route, tb_route_length(route), sw->config.upstream_port_number); | |
1277 | ||
1278 | if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) | |
1279 | tb_sw_warn(sw, "unknown switch vendor id %#x\n", | |
1280 | sw->config.vendor_id); | |
1281 | ||
bfe778ac MW |
1282 | sw->config.enabled = 1; |
1283 | ||
1284 | /* upload configuration */ | |
1285 | ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3); | |
1286 | if (ret) | |
1287 | return ret; | |
1288 | ||
1289 | return tb_plug_events_active(sw, true); | |
1290 | } | |
1291 | ||
89cde9f1 | 1292 | static int tb_switch_set_uuid(struct tb_switch *sw) |
bfe778ac MW |
1293 | { |
1294 | u32 uuid[4]; | |
89cde9f1 | 1295 | int cap, ret; |
bfe778ac | 1296 | |
89cde9f1 | 1297 | ret = 0; |
bfe778ac | 1298 | if (sw->uuid) |
89cde9f1 | 1299 | return ret; |
bfe778ac MW |
1300 | |
1301 | /* | |
1302 | * The newer controllers include fused UUID as part of link | |
1303 | * controller specific registers | |
1304 | */ | |
1305 | cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); | |
1306 | if (cap > 0) { | |
89cde9f1 AP |
1307 | ret = tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4); |
1308 | if (ret) | |
1309 | return ret; | |
bfe778ac MW |
1310 | } else { |
1311 | /* | |
1312 | * ICM generates UUID based on UID and fills the upper | |
1313 | * two words with ones. This is not strictly following | |
1314 | * UUID format but we want to be compatible with it so | |
1315 | * we do the same here. | |
1316 | */ | |
1317 | uuid[0] = sw->uid & 0xffffffff; | |
1318 | uuid[1] = (sw->uid >> 32) & 0xffffffff; | |
1319 | uuid[2] = 0xffffffff; | |
1320 | uuid[3] = 0xffffffff; | |
1321 | } | |
1322 | ||
1323 | sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); | |
89cde9f1 AP |
1324 | if (!sw->uuid) |
1325 | ret = -ENOMEM; | |
1326 | return ret; | |
bfe778ac MW |
1327 | } |
1328 | ||
e6b245cc | 1329 | static int tb_switch_add_dma_port(struct tb_switch *sw) |
3e136768 | 1330 | { |
e6b245cc MW |
1331 | u32 status; |
1332 | int ret; | |
1333 | ||
3e136768 MW |
1334 | switch (sw->generation) { |
1335 | case 3: | |
1336 | break; | |
1337 | ||
1338 | case 2: | |
1339 | /* Only root switch can be upgraded */ | |
1340 | if (tb_route(sw)) | |
e6b245cc | 1341 | return 0; |
3e136768 MW |
1342 | break; |
1343 | ||
1344 | default: | |
e6b245cc MW |
1345 | /* |
1346 | * DMA port is the only thing available when the switch | |
1347 | * is in safe mode. | |
1348 | */ | |
1349 | if (!sw->safe_mode) | |
1350 | return 0; | |
1351 | break; | |
3e136768 MW |
1352 | } |
1353 | ||
e6b245cc MW |
1354 | if (sw->no_nvm_upgrade) |
1355 | return 0; | |
1356 | ||
3e136768 | 1357 | sw->dma_port = dma_port_alloc(sw); |
e6b245cc MW |
1358 | if (!sw->dma_port) |
1359 | return 0; | |
1360 | ||
1361 | /* | |
1362 | * Check status of the previous flash authentication. If there | |
1363 | * is one we need to power cycle the switch in any case to make | |
1364 | * it functional again. | |
1365 | */ | |
1366 | ret = dma_port_flash_update_auth_status(sw->dma_port, &status); | |
1367 | if (ret <= 0) | |
1368 | return ret; | |
1369 | ||
44eaad53 MW |
1370 | /* Now we can allow root port to suspend again */ |
1371 | if (!tb_route(sw)) | |
1372 | nvm_authenticate_complete(sw); | |
1373 | ||
e6b245cc MW |
1374 | if (status) { |
1375 | tb_sw_info(sw, "switch flash authentication failed\n"); | |
89cde9f1 AP |
1376 | ret = tb_switch_set_uuid(sw); |
1377 | if (ret) | |
1378 | return ret; | |
e6b245cc MW |
1379 | nvm_set_auth_status(sw, status); |
1380 | } | |
1381 | ||
1382 | tb_sw_info(sw, "power cycling the switch now\n"); | |
1383 | dma_port_power_cycle(sw->dma_port); | |
1384 | ||
1385 | /* | |
1386 | * We return error here which causes the switch adding failure. | |
1387 | * It should appear back after power cycle is complete. | |
1388 | */ | |
1389 | return -ESHUTDOWN; | |
3e136768 MW |
1390 | } |
1391 | ||
bfe778ac MW |
1392 | /** |
1393 | * tb_switch_add() - Add a switch to the domain | |
1394 | * @sw: Switch to add | |
1395 | * | |
1396 | * This is the last step in adding switch to the domain. It will read | |
1397 | * identification information from DROM and initializes ports so that | |
1398 | * they can be used to connect other switches. The switch will be | |
1399 | * exposed to the userspace when this function successfully returns. To | |
1400 | * remove and release the switch, call tb_switch_remove(). | |
1401 | * | |
1402 | * Return: %0 in case of success and negative errno in case of failure | |
1403 | */ | |
1404 | int tb_switch_add(struct tb_switch *sw) | |
1405 | { | |
1406 | int i, ret; | |
1407 | ||
3e136768 MW |
1408 | /* |
1409 | * Initialize DMA control port now before we read DROM. Recent | |
1410 | * host controllers have more complete DROM on NVM that includes | |
1411 | * vendor and model identification strings which we then expose | |
1412 | * to the userspace. NVM can be accessed through DMA | |
1413 | * configuration based mailbox. | |
1414 | */ | |
e6b245cc MW |
1415 | ret = tb_switch_add_dma_port(sw); |
1416 | if (ret) | |
f53e7676 | 1417 | return ret; |
343fcb8c | 1418 | |
e6b245cc MW |
1419 | if (!sw->safe_mode) { |
1420 | /* read drom */ | |
1421 | ret = tb_drom_read(sw); | |
1422 | if (ret) { | |
1423 | tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); | |
1424 | return ret; | |
1425 | } | |
1426 | tb_sw_info(sw, "uid: %#llx\n", sw->uid); | |
bfe778ac | 1427 | |
89cde9f1 AP |
1428 | ret = tb_switch_set_uuid(sw); |
1429 | if (ret) | |
1430 | return ret; | |
e6b245cc MW |
1431 | |
1432 | for (i = 0; i <= sw->config.max_port_number; i++) { | |
1433 | if (sw->ports[i].disabled) { | |
1434 | tb_port_info(&sw->ports[i], "disabled by eeprom\n"); | |
1435 | continue; | |
1436 | } | |
1437 | ret = tb_init_port(&sw->ports[i]); | |
1438 | if (ret) | |
1439 | return ret; | |
343fcb8c | 1440 | } |
343fcb8c AN |
1441 | } |
1442 | ||
e6b245cc MW |
1443 | ret = device_add(&sw->dev); |
1444 | if (ret) | |
1445 | return ret; | |
1446 | ||
1447 | ret = tb_switch_nvm_add(sw); | |
2de98e05 | 1448 | if (ret) { |
e6b245cc | 1449 | device_del(&sw->dev); |
2de98e05 MW |
1450 | return ret; |
1451 | } | |
e6b245cc | 1452 | |
2de98e05 MW |
1453 | pm_runtime_set_active(&sw->dev); |
1454 | if (sw->rpm) { | |
1455 | pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); | |
1456 | pm_runtime_use_autosuspend(&sw->dev); | |
1457 | pm_runtime_mark_last_busy(&sw->dev); | |
1458 | pm_runtime_enable(&sw->dev); | |
1459 | pm_request_autosuspend(&sw->dev); | |
1460 | } | |
1461 | ||
1462 | return 0; | |
bfe778ac | 1463 | } |
c90553b3 | 1464 | |
bfe778ac MW |
1465 | /** |
1466 | * tb_switch_remove() - Remove and release a switch | |
1467 | * @sw: Switch to remove | |
1468 | * | |
1469 | * This will remove the switch from the domain and release it after last | |
1470 | * reference count drops to zero. If there are switches connected below | |
1471 | * this switch, they will be removed as well. | |
1472 | */ | |
1473 | void tb_switch_remove(struct tb_switch *sw) | |
1474 | { | |
1475 | int i; | |
ca389f71 | 1476 | |
2de98e05 MW |
1477 | if (sw->rpm) { |
1478 | pm_runtime_get_sync(&sw->dev); | |
1479 | pm_runtime_disable(&sw->dev); | |
1480 | } | |
1481 | ||
bfe778ac MW |
1482 | /* port 0 is the switch itself and never has a remote */ |
1483 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1484 | if (tb_is_upstream_port(&sw->ports[i])) | |
1485 | continue; | |
1486 | if (sw->ports[i].remote) | |
1487 | tb_switch_remove(sw->ports[i].remote->sw); | |
1488 | sw->ports[i].remote = NULL; | |
d1ff7024 MW |
1489 | if (sw->ports[i].xdomain) |
1490 | tb_xdomain_remove(sw->ports[i].xdomain); | |
1491 | sw->ports[i].xdomain = NULL; | |
bfe778ac MW |
1492 | } |
1493 | ||
1494 | if (!sw->is_unplugged) | |
1495 | tb_plug_events_active(sw, false); | |
1496 | ||
e6b245cc | 1497 | tb_switch_nvm_remove(sw); |
bfe778ac | 1498 | device_unregister(&sw->dev); |
a25c8b2f AN |
1499 | } |
1500 | ||
053596d9 | 1501 | /** |
aae20bb6 | 1502 | * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches |
053596d9 | 1503 | */ |
aae20bb6 | 1504 | void tb_sw_set_unplugged(struct tb_switch *sw) |
053596d9 AN |
1505 | { |
1506 | int i; | |
1507 | if (sw == sw->tb->root_switch) { | |
1508 | tb_sw_WARN(sw, "cannot unplug root switch\n"); | |
1509 | return; | |
1510 | } | |
1511 | if (sw->is_unplugged) { | |
1512 | tb_sw_WARN(sw, "is_unplugged already set\n"); | |
1513 | return; | |
1514 | } | |
1515 | sw->is_unplugged = true; | |
1516 | for (i = 0; i <= sw->config.max_port_number; i++) { | |
1517 | if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) | |
aae20bb6 | 1518 | tb_sw_set_unplugged(sw->ports[i].remote->sw); |
053596d9 AN |
1519 | } |
1520 | } | |
1521 | ||
23dd5bb4 AN |
1522 | int tb_switch_resume(struct tb_switch *sw) |
1523 | { | |
1524 | int i, err; | |
23dd5bb4 AN |
1525 | tb_sw_info(sw, "resuming switch\n"); |
1526 | ||
08a5e4ce MW |
1527 | /* |
1528 | * Check for UID of the connected switches except for root | |
1529 | * switch which we assume cannot be removed. | |
1530 | */ | |
1531 | if (tb_route(sw)) { | |
1532 | u64 uid; | |
1533 | ||
1534 | err = tb_drom_read_uid_only(sw, &uid); | |
1535 | if (err) { | |
1536 | tb_sw_warn(sw, "uid read failed\n"); | |
1537 | return err; | |
1538 | } | |
1539 | if (sw->uid != uid) { | |
1540 | tb_sw_info(sw, | |
1541 | "changed while suspended (uid %#llx -> %#llx)\n", | |
1542 | sw->uid, uid); | |
1543 | return -ENODEV; | |
1544 | } | |
23dd5bb4 AN |
1545 | } |
1546 | ||
1547 | /* upload configuration */ | |
1548 | err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3); | |
1549 | if (err) | |
1550 | return err; | |
1551 | ||
1552 | err = tb_plug_events_active(sw, true); | |
1553 | if (err) | |
1554 | return err; | |
1555 | ||
1556 | /* check for surviving downstream switches */ | |
1557 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1558 | struct tb_port *port = &sw->ports[i]; | |
1559 | if (tb_is_upstream_port(port)) | |
1560 | continue; | |
1561 | if (!port->remote) | |
1562 | continue; | |
1563 | if (tb_wait_for_port(port, true) <= 0 | |
1564 | || tb_switch_resume(port->remote->sw)) { | |
1565 | tb_port_warn(port, | |
1566 | "lost during suspend, disconnecting\n"); | |
aae20bb6 | 1567 | tb_sw_set_unplugged(port->remote->sw); |
23dd5bb4 AN |
1568 | } |
1569 | } | |
1570 | return 0; | |
1571 | } | |
1572 | ||
1573 | void tb_switch_suspend(struct tb_switch *sw) | |
1574 | { | |
1575 | int i, err; | |
1576 | err = tb_plug_events_active(sw, false); | |
1577 | if (err) | |
1578 | return; | |
1579 | ||
1580 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1581 | if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) | |
1582 | tb_switch_suspend(sw->ports[i].remote->sw); | |
1583 | } | |
1584 | /* | |
1585 | * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any | |
1586 | * effect? | |
1587 | */ | |
1588 | } | |
f67cf491 MW |
1589 | |
1590 | struct tb_sw_lookup { | |
1591 | struct tb *tb; | |
1592 | u8 link; | |
1593 | u8 depth; | |
7c39ffe7 | 1594 | const uuid_t *uuid; |
0e9b3d49 | 1595 | u64 route; |
f67cf491 MW |
1596 | }; |
1597 | ||
1598 | static int tb_switch_match(struct device *dev, void *data) | |
1599 | { | |
1600 | struct tb_switch *sw = tb_to_switch(dev); | |
1601 | struct tb_sw_lookup *lookup = data; | |
1602 | ||
1603 | if (!sw) | |
1604 | return 0; | |
1605 | if (sw->tb != lookup->tb) | |
1606 | return 0; | |
1607 | ||
1608 | if (lookup->uuid) | |
1609 | return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); | |
1610 | ||
0e9b3d49 RM |
1611 | if (lookup->route) { |
1612 | return sw->config.route_lo == lower_32_bits(lookup->route) && | |
1613 | sw->config.route_hi == upper_32_bits(lookup->route); | |
1614 | } | |
1615 | ||
f67cf491 MW |
1616 | /* Root switch is matched only by depth */ |
1617 | if (!lookup->depth) | |
1618 | return !sw->depth; | |
1619 | ||
1620 | return sw->link == lookup->link && sw->depth == lookup->depth; | |
1621 | } | |
1622 | ||
1623 | /** | |
1624 | * tb_switch_find_by_link_depth() - Find switch by link and depth | |
1625 | * @tb: Domain the switch belongs | |
1626 | * @link: Link number the switch is connected | |
1627 | * @depth: Depth of the switch in link | |
1628 | * | |
1629 | * Returned switch has reference count increased so the caller needs to | |
1630 | * call tb_switch_put() when done with the switch. | |
1631 | */ | |
1632 | struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) | |
1633 | { | |
1634 | struct tb_sw_lookup lookup; | |
1635 | struct device *dev; | |
1636 | ||
1637 | memset(&lookup, 0, sizeof(lookup)); | |
1638 | lookup.tb = tb; | |
1639 | lookup.link = link; | |
1640 | lookup.depth = depth; | |
1641 | ||
1642 | dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); | |
1643 | if (dev) | |
1644 | return tb_to_switch(dev); | |
1645 | ||
1646 | return NULL; | |
1647 | } | |
1648 | ||
1649 | /** | |
cd9b3b9c | 1650 | * tb_switch_find_by_uuid() - Find switch by UUID |
f67cf491 MW |
1651 | * @tb: Domain the switch belongs |
1652 | * @uuid: UUID to look for | |
1653 | * | |
1654 | * Returned switch has reference count increased so the caller needs to | |
1655 | * call tb_switch_put() when done with the switch. | |
1656 | */ | |
7c39ffe7 | 1657 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) |
f67cf491 MW |
1658 | { |
1659 | struct tb_sw_lookup lookup; | |
1660 | struct device *dev; | |
1661 | ||
1662 | memset(&lookup, 0, sizeof(lookup)); | |
1663 | lookup.tb = tb; | |
1664 | lookup.uuid = uuid; | |
1665 | ||
1666 | dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); | |
1667 | if (dev) | |
1668 | return tb_to_switch(dev); | |
1669 | ||
1670 | return NULL; | |
1671 | } | |
e6b245cc | 1672 | |
0e9b3d49 RM |
1673 | /** |
1674 | * tb_switch_find_by_route() - Find switch by route string | |
1675 | * @tb: Domain the switch belongs | |
1676 | * @route: Route string to look for | |
1677 | * | |
1678 | * Returned switch has reference count increased so the caller needs to | |
1679 | * call tb_switch_put() when done with the switch. | |
1680 | */ | |
1681 | struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) | |
1682 | { | |
1683 | struct tb_sw_lookup lookup; | |
1684 | struct device *dev; | |
1685 | ||
1686 | if (!route) | |
1687 | return tb_switch_get(tb->root_switch); | |
1688 | ||
1689 | memset(&lookup, 0, sizeof(lookup)); | |
1690 | lookup.tb = tb; | |
1691 | lookup.route = route; | |
1692 | ||
1693 | dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); | |
1694 | if (dev) | |
1695 | return tb_to_switch(dev); | |
1696 | ||
1697 | return NULL; | |
1698 | } | |
1699 | ||
e6b245cc MW |
1700 | void tb_switch_exit(void) |
1701 | { | |
1702 | ida_destroy(&nvm_ida); | |
1703 | } |