]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/thunderbolt/switch.c
treewide: Use fallthrough pseudo-keyword
[mirror_ubuntu-jammy-kernel.git] / drivers / thunderbolt / switch.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - switch/port utility functions
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16
17 #include "tb.h"
18
19 /* Switch NVM support */
20
21 #define NVM_CSS 0x10
22
23 struct nvm_auth_status {
24 struct list_head list;
25 uuid_t uuid;
26 u32 status;
27 };
28
29 enum nvm_write_ops {
30 WRITE_AND_AUTHENTICATE = 1,
31 WRITE_ONLY = 2,
32 };
33
34 /*
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
37 * keep it separately.
38 */
39 static LIST_HEAD(nvm_auth_status_cache);
40 static DEFINE_MUTEX(nvm_auth_status_lock);
41
42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
43 {
44 struct nvm_auth_status *st;
45
46 list_for_each_entry(st, &nvm_auth_status_cache, list) {
47 if (uuid_equal(&st->uuid, sw->uuid))
48 return st;
49 }
50
51 return NULL;
52 }
53
54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
55 {
56 struct nvm_auth_status *st;
57
58 mutex_lock(&nvm_auth_status_lock);
59 st = __nvm_get_auth_status(sw);
60 mutex_unlock(&nvm_auth_status_lock);
61
62 *status = st ? st->status : 0;
63 }
64
65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
66 {
67 struct nvm_auth_status *st;
68
69 if (WARN_ON(!sw->uuid))
70 return;
71
72 mutex_lock(&nvm_auth_status_lock);
73 st = __nvm_get_auth_status(sw);
74
75 if (!st) {
76 st = kzalloc(sizeof(*st), GFP_KERNEL);
77 if (!st)
78 goto unlock;
79
80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81 INIT_LIST_HEAD(&st->list);
82 list_add_tail(&st->list, &nvm_auth_status_cache);
83 }
84
85 st->status = status;
86 unlock:
87 mutex_unlock(&nvm_auth_status_lock);
88 }
89
90 static void nvm_clear_auth_status(const struct tb_switch *sw)
91 {
92 struct nvm_auth_status *st;
93
94 mutex_lock(&nvm_auth_status_lock);
95 st = __nvm_get_auth_status(sw);
96 if (st) {
97 list_del(&st->list);
98 kfree(st);
99 }
100 mutex_unlock(&nvm_auth_status_lock);
101 }
102
103 static int nvm_validate_and_write(struct tb_switch *sw)
104 {
105 unsigned int image_size, hdr_size;
106 const u8 *buf = sw->nvm->buf;
107 u16 ds_size;
108 int ret;
109
110 if (!buf)
111 return -EINVAL;
112
113 image_size = sw->nvm->buf_data_size;
114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
115 return -EINVAL;
116
117 /*
118 * FARB pointer must point inside the image and must at least
119 * contain parts of the digital section we will be reading here.
120 */
121 hdr_size = (*(u32 *)buf) & 0xffffff;
122 if (hdr_size + NVM_DEVID + 2 >= image_size)
123 return -EINVAL;
124
125 /* Digital section start should be aligned to 4k page */
126 if (!IS_ALIGNED(hdr_size, SZ_4K))
127 return -EINVAL;
128
129 /*
130 * Read digital section size and check that it also fits inside
131 * the image.
132 */
133 ds_size = *(u16 *)(buf + hdr_size);
134 if (ds_size >= image_size)
135 return -EINVAL;
136
137 if (!sw->safe_mode) {
138 u16 device_id;
139
140 /*
141 * Make sure the device ID in the image matches the one
142 * we read from the switch config space.
143 */
144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145 if (device_id != sw->config.device_id)
146 return -EINVAL;
147
148 if (sw->generation < 3) {
149 /* Write CSS headers first */
150 ret = dma_port_flash_write(sw->dma_port,
151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152 DMA_PORT_CSS_MAX_SIZE);
153 if (ret)
154 return ret;
155 }
156
157 /* Skip headers in the image */
158 buf += hdr_size;
159 image_size -= hdr_size;
160 }
161
162 if (tb_switch_is_usb4(sw))
163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
164 else
165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
166 if (!ret)
167 sw->nvm->flushed = true;
168 return ret;
169 }
170
171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
172 {
173 int ret = 0;
174
175 /*
176 * Root switch NVM upgrade requires that we disconnect the
177 * existing paths first (in case it is not in safe mode
178 * already).
179 */
180 if (!sw->safe_mode) {
181 u32 status;
182
183 ret = tb_domain_disconnect_all_paths(sw->tb);
184 if (ret)
185 return ret;
186 /*
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
189 */
190 ret = dma_port_flash_update_auth(sw->dma_port);
191 if (!ret || ret == -ETIMEDOUT)
192 return 0;
193
194 /*
195 * Any error from update auth operation requires power
196 * cycling of the host router.
197 */
198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 nvm_set_auth_status(sw, status);
201 }
202
203 /*
204 * From safe mode we can get out by just power cycling the
205 * switch.
206 */
207 dma_port_power_cycle(sw->dma_port);
208 return ret;
209 }
210
211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
212 {
213 int ret, retries = 10;
214
215 ret = dma_port_flash_update_auth(sw->dma_port);
216 switch (ret) {
217 case 0:
218 case -ETIMEDOUT:
219 case -EACCES:
220 case -EINVAL:
221 /* Power cycle is required */
222 break;
223 default:
224 return ret;
225 }
226
227 /*
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
232 */
233 do {
234 u32 status;
235
236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237 if (ret < 0 && ret != -ETIMEDOUT)
238 return ret;
239 if (ret > 0) {
240 if (status) {
241 tb_sw_warn(sw, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw, status);
243 }
244
245 tb_sw_info(sw, "power cycling the switch now\n");
246 dma_port_power_cycle(sw->dma_port);
247 return 0;
248 }
249
250 msleep(500);
251 } while (--retries);
252
253 return -ETIMEDOUT;
254 }
255
256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
257 {
258 struct pci_dev *root_port;
259
260 /*
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
265 */
266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
267 if (root_port)
268 pm_runtime_get_noresume(&root_port->dev);
269 }
270
271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
272 {
273 struct pci_dev *root_port;
274
275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
276 if (root_port)
277 pm_runtime_put(&root_port->dev);
278 }
279
280 static inline bool nvm_readable(struct tb_switch *sw)
281 {
282 if (tb_switch_is_usb4(sw)) {
283 /*
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
288 */
289 return usb4_switch_nvm_sector_size(sw) > 0;
290 }
291
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw->dma_port;
294 }
295
296 static inline bool nvm_upgradeable(struct tb_switch *sw)
297 {
298 if (sw->no_nvm_upgrade)
299 return false;
300 return nvm_readable(sw);
301 }
302
303 static inline int nvm_read(struct tb_switch *sw, unsigned int address,
304 void *buf, size_t size)
305 {
306 if (tb_switch_is_usb4(sw))
307 return usb4_switch_nvm_read(sw, address, buf, size);
308 return dma_port_flash_read(sw->dma_port, address, buf, size);
309 }
310
311 static int nvm_authenticate(struct tb_switch *sw)
312 {
313 int ret;
314
315 if (tb_switch_is_usb4(sw))
316 return usb4_switch_nvm_authenticate(sw);
317
318 if (!tb_route(sw)) {
319 nvm_authenticate_start_dma_port(sw);
320 ret = nvm_authenticate_host_dma_port(sw);
321 } else {
322 ret = nvm_authenticate_device_dma_port(sw);
323 }
324
325 return ret;
326 }
327
328 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
329 size_t bytes)
330 {
331 struct tb_nvm *nvm = priv;
332 struct tb_switch *sw = tb_to_switch(nvm->dev);
333 int ret;
334
335 pm_runtime_get_sync(&sw->dev);
336
337 if (!mutex_trylock(&sw->tb->lock)) {
338 ret = restart_syscall();
339 goto out;
340 }
341
342 ret = nvm_read(sw, offset, val, bytes);
343 mutex_unlock(&sw->tb->lock);
344
345 out:
346 pm_runtime_mark_last_busy(&sw->dev);
347 pm_runtime_put_autosuspend(&sw->dev);
348
349 return ret;
350 }
351
352 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
353 size_t bytes)
354 {
355 struct tb_nvm *nvm = priv;
356 struct tb_switch *sw = tb_to_switch(nvm->dev);
357 int ret;
358
359 if (!mutex_trylock(&sw->tb->lock))
360 return restart_syscall();
361
362 /*
363 * Since writing the NVM image might require some special steps,
364 * for example when CSS headers are written, we cache the image
365 * locally here and handle the special cases when the user asks
366 * us to authenticate the image.
367 */
368 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
369 mutex_unlock(&sw->tb->lock);
370
371 return ret;
372 }
373
374 static int tb_switch_nvm_add(struct tb_switch *sw)
375 {
376 struct tb_nvm *nvm;
377 u32 val;
378 int ret;
379
380 if (!nvm_readable(sw))
381 return 0;
382
383 /*
384 * The NVM format of non-Intel hardware is not known so
385 * currently restrict NVM upgrade for Intel hardware. We may
386 * relax this in the future when we learn other NVM formats.
387 */
388 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
389 sw->config.vendor_id != 0x8087) {
390 dev_info(&sw->dev,
391 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
392 sw->config.vendor_id);
393 return 0;
394 }
395
396 nvm = tb_nvm_alloc(&sw->dev);
397 if (IS_ERR(nvm))
398 return PTR_ERR(nvm);
399
400 /*
401 * If the switch is in safe-mode the only accessible portion of
402 * the NVM is the non-active one where userspace is expected to
403 * write new functional NVM.
404 */
405 if (!sw->safe_mode) {
406 u32 nvm_size, hdr_size;
407
408 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
409 if (ret)
410 goto err_nvm;
411
412 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
413 nvm_size = (SZ_1M << (val & 7)) / 8;
414 nvm_size = (nvm_size - hdr_size) / 2;
415
416 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
417 if (ret)
418 goto err_nvm;
419
420 nvm->major = val >> 16;
421 nvm->minor = val >> 8;
422
423 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
424 if (ret)
425 goto err_nvm;
426 }
427
428 if (!sw->no_nvm_upgrade) {
429 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
430 tb_switch_nvm_write);
431 if (ret)
432 goto err_nvm;
433 }
434
435 sw->nvm = nvm;
436 return 0;
437
438 err_nvm:
439 tb_nvm_free(nvm);
440 return ret;
441 }
442
443 static void tb_switch_nvm_remove(struct tb_switch *sw)
444 {
445 struct tb_nvm *nvm;
446
447 nvm = sw->nvm;
448 sw->nvm = NULL;
449
450 if (!nvm)
451 return;
452
453 /* Remove authentication status in case the switch is unplugged */
454 if (!nvm->authenticating)
455 nvm_clear_auth_status(sw);
456
457 tb_nvm_free(nvm);
458 }
459
460 /* port utility functions */
461
462 static const char *tb_port_type(struct tb_regs_port_header *port)
463 {
464 switch (port->type >> 16) {
465 case 0:
466 switch ((u8) port->type) {
467 case 0:
468 return "Inactive";
469 case 1:
470 return "Port";
471 case 2:
472 return "NHI";
473 default:
474 return "unknown";
475 }
476 case 0x2:
477 return "Ethernet";
478 case 0x8:
479 return "SATA";
480 case 0xe:
481 return "DP/HDMI";
482 case 0x10:
483 return "PCIe";
484 case 0x20:
485 return "USB";
486 default:
487 return "unknown";
488 }
489 }
490
491 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
492 {
493 tb_dbg(tb,
494 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
495 port->port_number, port->vendor_id, port->device_id,
496 port->revision, port->thunderbolt_version, tb_port_type(port),
497 port->type);
498 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
499 port->max_in_hop_id, port->max_out_hop_id);
500 tb_dbg(tb, " Max counters: %d\n", port->max_counters);
501 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
502 }
503
504 /**
505 * tb_port_state() - get connectedness state of a port
506 *
507 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
508 *
509 * Return: Returns an enum tb_port_state on success or an error code on failure.
510 */
511 static int tb_port_state(struct tb_port *port)
512 {
513 struct tb_cap_phy phy;
514 int res;
515 if (port->cap_phy == 0) {
516 tb_port_WARN(port, "does not have a PHY\n");
517 return -EINVAL;
518 }
519 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
520 if (res)
521 return res;
522 return phy.state;
523 }
524
525 /**
526 * tb_wait_for_port() - wait for a port to become ready
527 *
528 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
529 * wait_if_unplugged is set then we also wait if the port is in state
530 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
531 * switch resume). Otherwise we only wait if a device is registered but the link
532 * has not yet been established.
533 *
534 * Return: Returns an error code on failure. Returns 0 if the port is not
535 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
536 * if the port is connected and in state TB_PORT_UP.
537 */
538 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
539 {
540 int retries = 10;
541 int state;
542 if (!port->cap_phy) {
543 tb_port_WARN(port, "does not have PHY\n");
544 return -EINVAL;
545 }
546 if (tb_is_upstream_port(port)) {
547 tb_port_WARN(port, "is the upstream port\n");
548 return -EINVAL;
549 }
550
551 while (retries--) {
552 state = tb_port_state(port);
553 if (state < 0)
554 return state;
555 if (state == TB_PORT_DISABLED) {
556 tb_port_dbg(port, "is disabled (state: 0)\n");
557 return 0;
558 }
559 if (state == TB_PORT_UNPLUGGED) {
560 if (wait_if_unplugged) {
561 /* used during resume */
562 tb_port_dbg(port,
563 "is unplugged (state: 7), retrying...\n");
564 msleep(100);
565 continue;
566 }
567 tb_port_dbg(port, "is unplugged (state: 7)\n");
568 return 0;
569 }
570 if (state == TB_PORT_UP) {
571 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
572 return 1;
573 }
574
575 /*
576 * After plug-in the state is TB_PORT_CONNECTING. Give it some
577 * time.
578 */
579 tb_port_dbg(port,
580 "is connected, link is not up (state: %d), retrying...\n",
581 state);
582 msleep(100);
583 }
584 tb_port_warn(port,
585 "failed to reach state TB_PORT_UP. Ignoring port...\n");
586 return 0;
587 }
588
589 /**
590 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
591 *
592 * Change the number of NFC credits allocated to @port by @credits. To remove
593 * NFC credits pass a negative amount of credits.
594 *
595 * Return: Returns 0 on success or an error code on failure.
596 */
597 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
598 {
599 u32 nfc_credits;
600
601 if (credits == 0 || port->sw->is_unplugged)
602 return 0;
603
604 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
605 nfc_credits += credits;
606
607 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
608 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
609
610 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
611 port->config.nfc_credits |= nfc_credits;
612
613 return tb_port_write(port, &port->config.nfc_credits,
614 TB_CFG_PORT, ADP_CS_4, 1);
615 }
616
617 /**
618 * tb_port_set_initial_credits() - Set initial port link credits allocated
619 * @port: Port to set the initial credits
620 * @credits: Number of credits to to allocate
621 *
622 * Set initial credits value to be used for ingress shared buffering.
623 */
624 int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
625 {
626 u32 data;
627 int ret;
628
629 ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
630 if (ret)
631 return ret;
632
633 data &= ~ADP_CS_5_LCA_MASK;
634 data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
635
636 return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
637 }
638
639 /**
640 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
641 *
642 * Return: Returns 0 on success or an error code on failure.
643 */
644 int tb_port_clear_counter(struct tb_port *port, int counter)
645 {
646 u32 zero[3] = { 0, 0, 0 };
647 tb_port_dbg(port, "clearing counter %d\n", counter);
648 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
649 }
650
651 /**
652 * tb_port_unlock() - Unlock downstream port
653 * @port: Port to unlock
654 *
655 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
656 * downstream router accessible for CM.
657 */
658 int tb_port_unlock(struct tb_port *port)
659 {
660 if (tb_switch_is_icm(port->sw))
661 return 0;
662 if (!tb_port_is_null(port))
663 return -EINVAL;
664 if (tb_switch_is_usb4(port->sw))
665 return usb4_port_unlock(port);
666 return 0;
667 }
668
669 /**
670 * tb_init_port() - initialize a port
671 *
672 * This is a helper method for tb_switch_alloc. Does not check or initialize
673 * any downstream switches.
674 *
675 * Return: Returns 0 on success or an error code on failure.
676 */
677 static int tb_init_port(struct tb_port *port)
678 {
679 int res;
680 int cap;
681
682 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
683 if (res) {
684 if (res == -ENODEV) {
685 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
686 port->port);
687 return 0;
688 }
689 return res;
690 }
691
692 /* Port 0 is the switch itself and has no PHY. */
693 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
694 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
695
696 if (cap > 0)
697 port->cap_phy = cap;
698 else
699 tb_port_WARN(port, "non switch port without a PHY\n");
700
701 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
702 if (cap > 0)
703 port->cap_usb4 = cap;
704 } else if (port->port != 0) {
705 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
706 if (cap > 0)
707 port->cap_adap = cap;
708 }
709
710 tb_dump_port(port->sw->tb, &port->config);
711
712 /* Control port does not need HopID allocation */
713 if (port->port) {
714 ida_init(&port->in_hopids);
715 ida_init(&port->out_hopids);
716 }
717
718 INIT_LIST_HEAD(&port->list);
719 return 0;
720
721 }
722
723 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
724 int max_hopid)
725 {
726 int port_max_hopid;
727 struct ida *ida;
728
729 if (in) {
730 port_max_hopid = port->config.max_in_hop_id;
731 ida = &port->in_hopids;
732 } else {
733 port_max_hopid = port->config.max_out_hop_id;
734 ida = &port->out_hopids;
735 }
736
737 /*
738 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
739 * reserved.
740 */
741 if (port->config.type != TB_TYPE_NHI && min_hopid < TB_PATH_MIN_HOPID)
742 min_hopid = TB_PATH_MIN_HOPID;
743
744 if (max_hopid < 0 || max_hopid > port_max_hopid)
745 max_hopid = port_max_hopid;
746
747 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
748 }
749
750 /**
751 * tb_port_alloc_in_hopid() - Allocate input HopID from port
752 * @port: Port to allocate HopID for
753 * @min_hopid: Minimum acceptable input HopID
754 * @max_hopid: Maximum acceptable input HopID
755 *
756 * Return: HopID between @min_hopid and @max_hopid or negative errno in
757 * case of error.
758 */
759 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
760 {
761 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
762 }
763
764 /**
765 * tb_port_alloc_out_hopid() - Allocate output HopID from port
766 * @port: Port to allocate HopID for
767 * @min_hopid: Minimum acceptable output HopID
768 * @max_hopid: Maximum acceptable output HopID
769 *
770 * Return: HopID between @min_hopid and @max_hopid or negative errno in
771 * case of error.
772 */
773 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
774 {
775 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
776 }
777
778 /**
779 * tb_port_release_in_hopid() - Release allocated input HopID from port
780 * @port: Port whose HopID to release
781 * @hopid: HopID to release
782 */
783 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
784 {
785 ida_simple_remove(&port->in_hopids, hopid);
786 }
787
788 /**
789 * tb_port_release_out_hopid() - Release allocated output HopID from port
790 * @port: Port whose HopID to release
791 * @hopid: HopID to release
792 */
793 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
794 {
795 ida_simple_remove(&port->out_hopids, hopid);
796 }
797
798 static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
799 const struct tb_switch *sw)
800 {
801 u64 mask = (1ULL << parent->config.depth * 8) - 1;
802 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
803 }
804
805 /**
806 * tb_next_port_on_path() - Return next port for given port on a path
807 * @start: Start port of the walk
808 * @end: End port of the walk
809 * @prev: Previous port (%NULL if this is the first)
810 *
811 * This function can be used to walk from one port to another if they
812 * are connected through zero or more switches. If the @prev is dual
813 * link port, the function follows that link and returns another end on
814 * that same link.
815 *
816 * If the @end port has been reached, return %NULL.
817 *
818 * Domain tb->lock must be held when this function is called.
819 */
820 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
821 struct tb_port *prev)
822 {
823 struct tb_port *next;
824
825 if (!prev)
826 return start;
827
828 if (prev->sw == end->sw) {
829 if (prev == end)
830 return NULL;
831 return end;
832 }
833
834 if (tb_switch_is_reachable(prev->sw, end->sw)) {
835 next = tb_port_at(tb_route(end->sw), prev->sw);
836 /* Walk down the topology if next == prev */
837 if (prev->remote &&
838 (next == prev || next->dual_link_port == prev))
839 next = prev->remote;
840 } else {
841 if (tb_is_upstream_port(prev)) {
842 next = prev->remote;
843 } else {
844 next = tb_upstream_port(prev->sw);
845 /*
846 * Keep the same link if prev and next are both
847 * dual link ports.
848 */
849 if (next->dual_link_port &&
850 next->link_nr != prev->link_nr) {
851 next = next->dual_link_port;
852 }
853 }
854 }
855
856 return next != prev ? next : NULL;
857 }
858
859 /**
860 * tb_port_get_link_speed() - Get current link speed
861 * @port: Port to check (USB4 or CIO)
862 *
863 * Returns link speed in Gb/s or negative errno in case of failure.
864 */
865 int tb_port_get_link_speed(struct tb_port *port)
866 {
867 u32 val, speed;
868 int ret;
869
870 if (!port->cap_phy)
871 return -EINVAL;
872
873 ret = tb_port_read(port, &val, TB_CFG_PORT,
874 port->cap_phy + LANE_ADP_CS_1, 1);
875 if (ret)
876 return ret;
877
878 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
879 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
880 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
881 }
882
883 static int tb_port_get_link_width(struct tb_port *port)
884 {
885 u32 val;
886 int ret;
887
888 if (!port->cap_phy)
889 return -EINVAL;
890
891 ret = tb_port_read(port, &val, TB_CFG_PORT,
892 port->cap_phy + LANE_ADP_CS_1, 1);
893 if (ret)
894 return ret;
895
896 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
897 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
898 }
899
900 static bool tb_port_is_width_supported(struct tb_port *port, int width)
901 {
902 u32 phy, widths;
903 int ret;
904
905 if (!port->cap_phy)
906 return false;
907
908 ret = tb_port_read(port, &phy, TB_CFG_PORT,
909 port->cap_phy + LANE_ADP_CS_0, 1);
910 if (ret)
911 return false;
912
913 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
914 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
915
916 return !!(widths & width);
917 }
918
919 static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
920 {
921 u32 val;
922 int ret;
923
924 if (!port->cap_phy)
925 return -EINVAL;
926
927 ret = tb_port_read(port, &val, TB_CFG_PORT,
928 port->cap_phy + LANE_ADP_CS_1, 1);
929 if (ret)
930 return ret;
931
932 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
933 switch (width) {
934 case 1:
935 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
936 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
937 break;
938 case 2:
939 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
940 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
941 break;
942 default:
943 return -EINVAL;
944 }
945
946 val |= LANE_ADP_CS_1_LB;
947
948 return tb_port_write(port, &val, TB_CFG_PORT,
949 port->cap_phy + LANE_ADP_CS_1, 1);
950 }
951
952 static int tb_port_lane_bonding_enable(struct tb_port *port)
953 {
954 int ret;
955
956 /*
957 * Enable lane bonding for both links if not already enabled by
958 * for example the boot firmware.
959 */
960 ret = tb_port_get_link_width(port);
961 if (ret == 1) {
962 ret = tb_port_set_link_width(port, 2);
963 if (ret)
964 return ret;
965 }
966
967 ret = tb_port_get_link_width(port->dual_link_port);
968 if (ret == 1) {
969 ret = tb_port_set_link_width(port->dual_link_port, 2);
970 if (ret) {
971 tb_port_set_link_width(port, 1);
972 return ret;
973 }
974 }
975
976 port->bonded = true;
977 port->dual_link_port->bonded = true;
978
979 return 0;
980 }
981
982 static void tb_port_lane_bonding_disable(struct tb_port *port)
983 {
984 port->dual_link_port->bonded = false;
985 port->bonded = false;
986
987 tb_port_set_link_width(port->dual_link_port, 1);
988 tb_port_set_link_width(port, 1);
989 }
990
991 /**
992 * tb_port_is_enabled() - Is the adapter port enabled
993 * @port: Port to check
994 */
995 bool tb_port_is_enabled(struct tb_port *port)
996 {
997 switch (port->config.type) {
998 case TB_TYPE_PCIE_UP:
999 case TB_TYPE_PCIE_DOWN:
1000 return tb_pci_port_is_enabled(port);
1001
1002 case TB_TYPE_DP_HDMI_IN:
1003 case TB_TYPE_DP_HDMI_OUT:
1004 return tb_dp_port_is_enabled(port);
1005
1006 case TB_TYPE_USB3_UP:
1007 case TB_TYPE_USB3_DOWN:
1008 return tb_usb3_port_is_enabled(port);
1009
1010 default:
1011 return false;
1012 }
1013 }
1014
1015 /**
1016 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1017 * @port: USB3 adapter port to check
1018 */
1019 bool tb_usb3_port_is_enabled(struct tb_port *port)
1020 {
1021 u32 data;
1022
1023 if (tb_port_read(port, &data, TB_CFG_PORT,
1024 port->cap_adap + ADP_USB3_CS_0, 1))
1025 return false;
1026
1027 return !!(data & ADP_USB3_CS_0_PE);
1028 }
1029
1030 /**
1031 * tb_usb3_port_enable() - Enable USB3 adapter port
1032 * @port: USB3 adapter port to enable
1033 * @enable: Enable/disable the USB3 adapter
1034 */
1035 int tb_usb3_port_enable(struct tb_port *port, bool enable)
1036 {
1037 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1038 : ADP_USB3_CS_0_V;
1039
1040 if (!port->cap_adap)
1041 return -ENXIO;
1042 return tb_port_write(port, &word, TB_CFG_PORT,
1043 port->cap_adap + ADP_USB3_CS_0, 1);
1044 }
1045
1046 /**
1047 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1048 * @port: PCIe port to check
1049 */
1050 bool tb_pci_port_is_enabled(struct tb_port *port)
1051 {
1052 u32 data;
1053
1054 if (tb_port_read(port, &data, TB_CFG_PORT,
1055 port->cap_adap + ADP_PCIE_CS_0, 1))
1056 return false;
1057
1058 return !!(data & ADP_PCIE_CS_0_PE);
1059 }
1060
1061 /**
1062 * tb_pci_port_enable() - Enable PCIe adapter port
1063 * @port: PCIe port to enable
1064 * @enable: Enable/disable the PCIe adapter
1065 */
1066 int tb_pci_port_enable(struct tb_port *port, bool enable)
1067 {
1068 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1069 if (!port->cap_adap)
1070 return -ENXIO;
1071 return tb_port_write(port, &word, TB_CFG_PORT,
1072 port->cap_adap + ADP_PCIE_CS_0, 1);
1073 }
1074
1075 /**
1076 * tb_dp_port_hpd_is_active() - Is HPD already active
1077 * @port: DP out port to check
1078 *
1079 * Checks if the DP OUT adapter port has HDP bit already set.
1080 */
1081 int tb_dp_port_hpd_is_active(struct tb_port *port)
1082 {
1083 u32 data;
1084 int ret;
1085
1086 ret = tb_port_read(port, &data, TB_CFG_PORT,
1087 port->cap_adap + ADP_DP_CS_2, 1);
1088 if (ret)
1089 return ret;
1090
1091 return !!(data & ADP_DP_CS_2_HDP);
1092 }
1093
1094 /**
1095 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1096 * @port: Port to clear HPD
1097 *
1098 * If the DP IN port has HDP set, this function can be used to clear it.
1099 */
1100 int tb_dp_port_hpd_clear(struct tb_port *port)
1101 {
1102 u32 data;
1103 int ret;
1104
1105 ret = tb_port_read(port, &data, TB_CFG_PORT,
1106 port->cap_adap + ADP_DP_CS_3, 1);
1107 if (ret)
1108 return ret;
1109
1110 data |= ADP_DP_CS_3_HDPC;
1111 return tb_port_write(port, &data, TB_CFG_PORT,
1112 port->cap_adap + ADP_DP_CS_3, 1);
1113 }
1114
1115 /**
1116 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1117 * @port: DP IN/OUT port to set hops
1118 * @video: Video Hop ID
1119 * @aux_tx: AUX TX Hop ID
1120 * @aux_rx: AUX RX Hop ID
1121 *
1122 * Programs specified Hop IDs for DP IN/OUT port.
1123 */
1124 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1125 unsigned int aux_tx, unsigned int aux_rx)
1126 {
1127 u32 data[2];
1128 int ret;
1129
1130 ret = tb_port_read(port, data, TB_CFG_PORT,
1131 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1132 if (ret)
1133 return ret;
1134
1135 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1136 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1137 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1138
1139 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1140 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1141 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1142 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1143 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1144
1145 return tb_port_write(port, data, TB_CFG_PORT,
1146 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1147 }
1148
1149 /**
1150 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1151 * @port: DP adapter port to check
1152 */
1153 bool tb_dp_port_is_enabled(struct tb_port *port)
1154 {
1155 u32 data[2];
1156
1157 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1158 ARRAY_SIZE(data)))
1159 return false;
1160
1161 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1162 }
1163
1164 /**
1165 * tb_dp_port_enable() - Enables/disables DP paths of a port
1166 * @port: DP IN/OUT port
1167 * @enable: Enable/disable DP path
1168 *
1169 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1170 * calling this function.
1171 */
1172 int tb_dp_port_enable(struct tb_port *port, bool enable)
1173 {
1174 u32 data[2];
1175 int ret;
1176
1177 ret = tb_port_read(port, data, TB_CFG_PORT,
1178 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1179 if (ret)
1180 return ret;
1181
1182 if (enable)
1183 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1184 else
1185 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1186
1187 return tb_port_write(port, data, TB_CFG_PORT,
1188 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1189 }
1190
1191 /* switch utility functions */
1192
1193 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1194 {
1195 switch (sw->generation) {
1196 case 1:
1197 return "Thunderbolt 1";
1198 case 2:
1199 return "Thunderbolt 2";
1200 case 3:
1201 return "Thunderbolt 3";
1202 case 4:
1203 return "USB4";
1204 default:
1205 return "Unknown";
1206 }
1207 }
1208
1209 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1210 {
1211 const struct tb_regs_switch_header *regs = &sw->config;
1212
1213 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1214 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1215 regs->revision, regs->thunderbolt_version);
1216 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
1217 tb_dbg(tb, " Config:\n");
1218 tb_dbg(tb,
1219 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1220 regs->upstream_port_number, regs->depth,
1221 (((u64) regs->route_hi) << 32) | regs->route_lo,
1222 regs->enabled, regs->plug_events_delay);
1223 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
1224 regs->__unknown1, regs->__unknown4);
1225 }
1226
1227 /**
1228 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
1229 *
1230 * Return: Returns 0 on success or an error code on failure.
1231 */
1232 int tb_switch_reset(struct tb *tb, u64 route)
1233 {
1234 struct tb_cfg_result res;
1235 struct tb_regs_switch_header header = {
1236 header.route_hi = route >> 32,
1237 header.route_lo = route,
1238 header.enabled = true,
1239 };
1240 tb_dbg(tb, "resetting switch at %llx\n", route);
1241 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
1242 0, 2, 2, 2);
1243 if (res.err)
1244 return res.err;
1245 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
1246 if (res.err > 0)
1247 return -EIO;
1248 return res.err;
1249 }
1250
1251 /**
1252 * tb_plug_events_active() - enable/disable plug events on a switch
1253 *
1254 * Also configures a sane plug_events_delay of 255ms.
1255 *
1256 * Return: Returns 0 on success or an error code on failure.
1257 */
1258 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1259 {
1260 u32 data;
1261 int res;
1262
1263 if (tb_switch_is_icm(sw))
1264 return 0;
1265
1266 sw->config.plug_events_delay = 0xff;
1267 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1268 if (res)
1269 return res;
1270
1271 /* Plug events are always enabled in USB4 */
1272 if (tb_switch_is_usb4(sw))
1273 return 0;
1274
1275 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1276 if (res)
1277 return res;
1278
1279 if (active) {
1280 data = data & 0xFFFFFF83;
1281 switch (sw->config.device_id) {
1282 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1283 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1284 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1285 break;
1286 default:
1287 data |= 4;
1288 }
1289 } else {
1290 data = data | 0x7c;
1291 }
1292 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1293 sw->cap_plug_events + 1, 1);
1294 }
1295
1296 static ssize_t authorized_show(struct device *dev,
1297 struct device_attribute *attr,
1298 char *buf)
1299 {
1300 struct tb_switch *sw = tb_to_switch(dev);
1301
1302 return sprintf(buf, "%u\n", sw->authorized);
1303 }
1304
1305 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1306 {
1307 int ret = -EINVAL;
1308
1309 if (!mutex_trylock(&sw->tb->lock))
1310 return restart_syscall();
1311
1312 if (sw->authorized)
1313 goto unlock;
1314
1315 switch (val) {
1316 /* Approve switch */
1317 case 1:
1318 if (sw->key)
1319 ret = tb_domain_approve_switch_key(sw->tb, sw);
1320 else
1321 ret = tb_domain_approve_switch(sw->tb, sw);
1322 break;
1323
1324 /* Challenge switch */
1325 case 2:
1326 if (sw->key)
1327 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1328 break;
1329
1330 default:
1331 break;
1332 }
1333
1334 if (!ret) {
1335 sw->authorized = val;
1336 /* Notify status change to the userspace */
1337 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1338 }
1339
1340 unlock:
1341 mutex_unlock(&sw->tb->lock);
1342 return ret;
1343 }
1344
1345 static ssize_t authorized_store(struct device *dev,
1346 struct device_attribute *attr,
1347 const char *buf, size_t count)
1348 {
1349 struct tb_switch *sw = tb_to_switch(dev);
1350 unsigned int val;
1351 ssize_t ret;
1352
1353 ret = kstrtouint(buf, 0, &val);
1354 if (ret)
1355 return ret;
1356 if (val > 2)
1357 return -EINVAL;
1358
1359 pm_runtime_get_sync(&sw->dev);
1360 ret = tb_switch_set_authorized(sw, val);
1361 pm_runtime_mark_last_busy(&sw->dev);
1362 pm_runtime_put_autosuspend(&sw->dev);
1363
1364 return ret ? ret : count;
1365 }
1366 static DEVICE_ATTR_RW(authorized);
1367
1368 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1369 char *buf)
1370 {
1371 struct tb_switch *sw = tb_to_switch(dev);
1372
1373 return sprintf(buf, "%u\n", sw->boot);
1374 }
1375 static DEVICE_ATTR_RO(boot);
1376
1377 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1378 char *buf)
1379 {
1380 struct tb_switch *sw = tb_to_switch(dev);
1381
1382 return sprintf(buf, "%#x\n", sw->device);
1383 }
1384 static DEVICE_ATTR_RO(device);
1385
1386 static ssize_t
1387 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1388 {
1389 struct tb_switch *sw = tb_to_switch(dev);
1390
1391 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1392 }
1393 static DEVICE_ATTR_RO(device_name);
1394
1395 static ssize_t
1396 generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1397 {
1398 struct tb_switch *sw = tb_to_switch(dev);
1399
1400 return sprintf(buf, "%u\n", sw->generation);
1401 }
1402 static DEVICE_ATTR_RO(generation);
1403
1404 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1405 char *buf)
1406 {
1407 struct tb_switch *sw = tb_to_switch(dev);
1408 ssize_t ret;
1409
1410 if (!mutex_trylock(&sw->tb->lock))
1411 return restart_syscall();
1412
1413 if (sw->key)
1414 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1415 else
1416 ret = sprintf(buf, "\n");
1417
1418 mutex_unlock(&sw->tb->lock);
1419 return ret;
1420 }
1421
1422 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1423 const char *buf, size_t count)
1424 {
1425 struct tb_switch *sw = tb_to_switch(dev);
1426 u8 key[TB_SWITCH_KEY_SIZE];
1427 ssize_t ret = count;
1428 bool clear = false;
1429
1430 if (!strcmp(buf, "\n"))
1431 clear = true;
1432 else if (hex2bin(key, buf, sizeof(key)))
1433 return -EINVAL;
1434
1435 if (!mutex_trylock(&sw->tb->lock))
1436 return restart_syscall();
1437
1438 if (sw->authorized) {
1439 ret = -EBUSY;
1440 } else {
1441 kfree(sw->key);
1442 if (clear) {
1443 sw->key = NULL;
1444 } else {
1445 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1446 if (!sw->key)
1447 ret = -ENOMEM;
1448 }
1449 }
1450
1451 mutex_unlock(&sw->tb->lock);
1452 return ret;
1453 }
1454 static DEVICE_ATTR(key, 0600, key_show, key_store);
1455
1456 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1457 char *buf)
1458 {
1459 struct tb_switch *sw = tb_to_switch(dev);
1460
1461 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1462 }
1463
1464 /*
1465 * Currently all lanes must run at the same speed but we expose here
1466 * both directions to allow possible asymmetric links in the future.
1467 */
1468 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1469 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1470
1471 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1472 char *buf)
1473 {
1474 struct tb_switch *sw = tb_to_switch(dev);
1475
1476 return sprintf(buf, "%u\n", sw->link_width);
1477 }
1478
1479 /*
1480 * Currently link has same amount of lanes both directions (1 or 2) but
1481 * expose them separately to allow possible asymmetric links in the future.
1482 */
1483 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1484 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1485
1486 static ssize_t nvm_authenticate_show(struct device *dev,
1487 struct device_attribute *attr, char *buf)
1488 {
1489 struct tb_switch *sw = tb_to_switch(dev);
1490 u32 status;
1491
1492 nvm_get_auth_status(sw, &status);
1493 return sprintf(buf, "%#x\n", status);
1494 }
1495
1496 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1497 bool disconnect)
1498 {
1499 struct tb_switch *sw = tb_to_switch(dev);
1500 int val;
1501 int ret;
1502
1503 pm_runtime_get_sync(&sw->dev);
1504
1505 if (!mutex_trylock(&sw->tb->lock)) {
1506 ret = restart_syscall();
1507 goto exit_rpm;
1508 }
1509
1510 /* If NVMem devices are not yet added */
1511 if (!sw->nvm) {
1512 ret = -EAGAIN;
1513 goto exit_unlock;
1514 }
1515
1516 ret = kstrtoint(buf, 10, &val);
1517 if (ret)
1518 goto exit_unlock;
1519
1520 /* Always clear the authentication status */
1521 nvm_clear_auth_status(sw);
1522
1523 if (val > 0) {
1524 if (!sw->nvm->flushed) {
1525 if (!sw->nvm->buf) {
1526 ret = -EINVAL;
1527 goto exit_unlock;
1528 }
1529
1530 ret = nvm_validate_and_write(sw);
1531 if (ret || val == WRITE_ONLY)
1532 goto exit_unlock;
1533 }
1534 if (val == WRITE_AND_AUTHENTICATE) {
1535 if (disconnect) {
1536 ret = tb_lc_force_power(sw);
1537 } else {
1538 sw->nvm->authenticating = true;
1539 ret = nvm_authenticate(sw);
1540 }
1541 }
1542 }
1543
1544 exit_unlock:
1545 mutex_unlock(&sw->tb->lock);
1546 exit_rpm:
1547 pm_runtime_mark_last_busy(&sw->dev);
1548 pm_runtime_put_autosuspend(&sw->dev);
1549
1550 return ret;
1551 }
1552
1553 static ssize_t nvm_authenticate_store(struct device *dev,
1554 struct device_attribute *attr, const char *buf, size_t count)
1555 {
1556 int ret = nvm_authenticate_sysfs(dev, buf, false);
1557 if (ret)
1558 return ret;
1559 return count;
1560 }
1561 static DEVICE_ATTR_RW(nvm_authenticate);
1562
1563 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1564 struct device_attribute *attr, char *buf)
1565 {
1566 return nvm_authenticate_show(dev, attr, buf);
1567 }
1568
1569 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1570 struct device_attribute *attr, const char *buf, size_t count)
1571 {
1572 int ret;
1573
1574 ret = nvm_authenticate_sysfs(dev, buf, true);
1575 return ret ? ret : count;
1576 }
1577 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1578
1579 static ssize_t nvm_version_show(struct device *dev,
1580 struct device_attribute *attr, char *buf)
1581 {
1582 struct tb_switch *sw = tb_to_switch(dev);
1583 int ret;
1584
1585 if (!mutex_trylock(&sw->tb->lock))
1586 return restart_syscall();
1587
1588 if (sw->safe_mode)
1589 ret = -ENODATA;
1590 else if (!sw->nvm)
1591 ret = -EAGAIN;
1592 else
1593 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1594
1595 mutex_unlock(&sw->tb->lock);
1596
1597 return ret;
1598 }
1599 static DEVICE_ATTR_RO(nvm_version);
1600
1601 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1602 char *buf)
1603 {
1604 struct tb_switch *sw = tb_to_switch(dev);
1605
1606 return sprintf(buf, "%#x\n", sw->vendor);
1607 }
1608 static DEVICE_ATTR_RO(vendor);
1609
1610 static ssize_t
1611 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1612 {
1613 struct tb_switch *sw = tb_to_switch(dev);
1614
1615 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1616 }
1617 static DEVICE_ATTR_RO(vendor_name);
1618
1619 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1620 char *buf)
1621 {
1622 struct tb_switch *sw = tb_to_switch(dev);
1623
1624 return sprintf(buf, "%pUb\n", sw->uuid);
1625 }
1626 static DEVICE_ATTR_RO(unique_id);
1627
1628 static struct attribute *switch_attrs[] = {
1629 &dev_attr_authorized.attr,
1630 &dev_attr_boot.attr,
1631 &dev_attr_device.attr,
1632 &dev_attr_device_name.attr,
1633 &dev_attr_generation.attr,
1634 &dev_attr_key.attr,
1635 &dev_attr_nvm_authenticate.attr,
1636 &dev_attr_nvm_authenticate_on_disconnect.attr,
1637 &dev_attr_nvm_version.attr,
1638 &dev_attr_rx_speed.attr,
1639 &dev_attr_rx_lanes.attr,
1640 &dev_attr_tx_speed.attr,
1641 &dev_attr_tx_lanes.attr,
1642 &dev_attr_vendor.attr,
1643 &dev_attr_vendor_name.attr,
1644 &dev_attr_unique_id.attr,
1645 NULL,
1646 };
1647
1648 static umode_t switch_attr_is_visible(struct kobject *kobj,
1649 struct attribute *attr, int n)
1650 {
1651 struct device *dev = container_of(kobj, struct device, kobj);
1652 struct tb_switch *sw = tb_to_switch(dev);
1653
1654 if (attr == &dev_attr_device.attr) {
1655 if (!sw->device)
1656 return 0;
1657 } else if (attr == &dev_attr_device_name.attr) {
1658 if (!sw->device_name)
1659 return 0;
1660 } else if (attr == &dev_attr_vendor.attr) {
1661 if (!sw->vendor)
1662 return 0;
1663 } else if (attr == &dev_attr_vendor_name.attr) {
1664 if (!sw->vendor_name)
1665 return 0;
1666 } else if (attr == &dev_attr_key.attr) {
1667 if (tb_route(sw) &&
1668 sw->tb->security_level == TB_SECURITY_SECURE &&
1669 sw->security_level == TB_SECURITY_SECURE)
1670 return attr->mode;
1671 return 0;
1672 } else if (attr == &dev_attr_rx_speed.attr ||
1673 attr == &dev_attr_rx_lanes.attr ||
1674 attr == &dev_attr_tx_speed.attr ||
1675 attr == &dev_attr_tx_lanes.attr) {
1676 if (tb_route(sw))
1677 return attr->mode;
1678 return 0;
1679 } else if (attr == &dev_attr_nvm_authenticate.attr) {
1680 if (nvm_upgradeable(sw))
1681 return attr->mode;
1682 return 0;
1683 } else if (attr == &dev_attr_nvm_version.attr) {
1684 if (nvm_readable(sw))
1685 return attr->mode;
1686 return 0;
1687 } else if (attr == &dev_attr_boot.attr) {
1688 if (tb_route(sw))
1689 return attr->mode;
1690 return 0;
1691 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1692 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1693 return attr->mode;
1694 return 0;
1695 }
1696
1697 return sw->safe_mode ? 0 : attr->mode;
1698 }
1699
1700 static struct attribute_group switch_group = {
1701 .is_visible = switch_attr_is_visible,
1702 .attrs = switch_attrs,
1703 };
1704
1705 static const struct attribute_group *switch_groups[] = {
1706 &switch_group,
1707 NULL,
1708 };
1709
1710 static void tb_switch_release(struct device *dev)
1711 {
1712 struct tb_switch *sw = tb_to_switch(dev);
1713 struct tb_port *port;
1714
1715 dma_port_free(sw->dma_port);
1716
1717 tb_switch_for_each_port(sw, port) {
1718 if (!port->disabled) {
1719 ida_destroy(&port->in_hopids);
1720 ida_destroy(&port->out_hopids);
1721 }
1722 }
1723
1724 kfree(sw->uuid);
1725 kfree(sw->device_name);
1726 kfree(sw->vendor_name);
1727 kfree(sw->ports);
1728 kfree(sw->drom);
1729 kfree(sw->key);
1730 kfree(sw);
1731 }
1732
1733 /*
1734 * Currently only need to provide the callbacks. Everything else is handled
1735 * in the connection manager.
1736 */
1737 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1738 {
1739 struct tb_switch *sw = tb_to_switch(dev);
1740 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1741
1742 if (cm_ops->runtime_suspend_switch)
1743 return cm_ops->runtime_suspend_switch(sw);
1744
1745 return 0;
1746 }
1747
1748 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1749 {
1750 struct tb_switch *sw = tb_to_switch(dev);
1751 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1752
1753 if (cm_ops->runtime_resume_switch)
1754 return cm_ops->runtime_resume_switch(sw);
1755 return 0;
1756 }
1757
1758 static const struct dev_pm_ops tb_switch_pm_ops = {
1759 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1760 NULL)
1761 };
1762
1763 struct device_type tb_switch_type = {
1764 .name = "thunderbolt_device",
1765 .release = tb_switch_release,
1766 .pm = &tb_switch_pm_ops,
1767 };
1768
1769 static int tb_switch_get_generation(struct tb_switch *sw)
1770 {
1771 switch (sw->config.device_id) {
1772 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1773 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1774 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1775 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1776 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1777 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1778 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1779 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1780 return 1;
1781
1782 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1783 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1784 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1785 return 2;
1786
1787 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1788 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1789 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1790 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1791 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1792 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1793 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1794 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1795 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
1796 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
1797 return 3;
1798
1799 default:
1800 if (tb_switch_is_usb4(sw))
1801 return 4;
1802
1803 /*
1804 * For unknown switches assume generation to be 1 to be
1805 * on the safe side.
1806 */
1807 tb_sw_warn(sw, "unsupported switch device id %#x\n",
1808 sw->config.device_id);
1809 return 1;
1810 }
1811 }
1812
1813 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
1814 {
1815 int max_depth;
1816
1817 if (tb_switch_is_usb4(sw) ||
1818 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
1819 max_depth = USB4_SWITCH_MAX_DEPTH;
1820 else
1821 max_depth = TB_SWITCH_MAX_DEPTH;
1822
1823 return depth > max_depth;
1824 }
1825
1826 /**
1827 * tb_switch_alloc() - allocate a switch
1828 * @tb: Pointer to the owning domain
1829 * @parent: Parent device for this switch
1830 * @route: Route string for this switch
1831 *
1832 * Allocates and initializes a switch. Will not upload configuration to
1833 * the switch. For that you need to call tb_switch_configure()
1834 * separately. The returned switch should be released by calling
1835 * tb_switch_put().
1836 *
1837 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1838 * failure.
1839 */
1840 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1841 u64 route)
1842 {
1843 struct tb_switch *sw;
1844 int upstream_port;
1845 int i, ret, depth;
1846
1847 /* Unlock the downstream port so we can access the switch below */
1848 if (route) {
1849 struct tb_switch *parent_sw = tb_to_switch(parent);
1850 struct tb_port *down;
1851
1852 down = tb_port_at(route, parent_sw);
1853 tb_port_unlock(down);
1854 }
1855
1856 depth = tb_route_length(route);
1857
1858 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1859 if (upstream_port < 0)
1860 return ERR_PTR(upstream_port);
1861
1862 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1863 if (!sw)
1864 return ERR_PTR(-ENOMEM);
1865
1866 sw->tb = tb;
1867 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1868 if (ret)
1869 goto err_free_sw_ports;
1870
1871 sw->generation = tb_switch_get_generation(sw);
1872
1873 tb_dbg(tb, "current switch config:\n");
1874 tb_dump_switch(tb, sw);
1875
1876 /* configure switch */
1877 sw->config.upstream_port_number = upstream_port;
1878 sw->config.depth = depth;
1879 sw->config.route_hi = upper_32_bits(route);
1880 sw->config.route_lo = lower_32_bits(route);
1881 sw->config.enabled = 0;
1882
1883 /* Make sure we do not exceed maximum topology limit */
1884 if (tb_switch_exceeds_max_depth(sw, depth)) {
1885 ret = -EADDRNOTAVAIL;
1886 goto err_free_sw_ports;
1887 }
1888
1889 /* initialize ports */
1890 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1891 GFP_KERNEL);
1892 if (!sw->ports) {
1893 ret = -ENOMEM;
1894 goto err_free_sw_ports;
1895 }
1896
1897 for (i = 0; i <= sw->config.max_port_number; i++) {
1898 /* minimum setup for tb_find_cap and tb_drom_read to work */
1899 sw->ports[i].sw = sw;
1900 sw->ports[i].port = i;
1901 }
1902
1903 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1904 if (ret > 0)
1905 sw->cap_plug_events = ret;
1906
1907 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1908 if (ret > 0)
1909 sw->cap_lc = ret;
1910
1911 /* Root switch is always authorized */
1912 if (!route)
1913 sw->authorized = true;
1914
1915 device_initialize(&sw->dev);
1916 sw->dev.parent = parent;
1917 sw->dev.bus = &tb_bus_type;
1918 sw->dev.type = &tb_switch_type;
1919 sw->dev.groups = switch_groups;
1920 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1921
1922 return sw;
1923
1924 err_free_sw_ports:
1925 kfree(sw->ports);
1926 kfree(sw);
1927
1928 return ERR_PTR(ret);
1929 }
1930
1931 /**
1932 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1933 * @tb: Pointer to the owning domain
1934 * @parent: Parent device for this switch
1935 * @route: Route string for this switch
1936 *
1937 * This creates a switch in safe mode. This means the switch pretty much
1938 * lacks all capabilities except DMA configuration port before it is
1939 * flashed with a valid NVM firmware.
1940 *
1941 * The returned switch must be released by calling tb_switch_put().
1942 *
1943 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
1944 */
1945 struct tb_switch *
1946 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1947 {
1948 struct tb_switch *sw;
1949
1950 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1951 if (!sw)
1952 return ERR_PTR(-ENOMEM);
1953
1954 sw->tb = tb;
1955 sw->config.depth = tb_route_length(route);
1956 sw->config.route_hi = upper_32_bits(route);
1957 sw->config.route_lo = lower_32_bits(route);
1958 sw->safe_mode = true;
1959
1960 device_initialize(&sw->dev);
1961 sw->dev.parent = parent;
1962 sw->dev.bus = &tb_bus_type;
1963 sw->dev.type = &tb_switch_type;
1964 sw->dev.groups = switch_groups;
1965 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1966
1967 return sw;
1968 }
1969
1970 /**
1971 * tb_switch_configure() - Uploads configuration to the switch
1972 * @sw: Switch to configure
1973 *
1974 * Call this function before the switch is added to the system. It will
1975 * upload configuration to the switch and makes it available for the
1976 * connection manager to use. Can be called to the switch again after
1977 * resume from low power states to re-initialize it.
1978 *
1979 * Return: %0 in case of success and negative errno in case of failure
1980 */
1981 int tb_switch_configure(struct tb_switch *sw)
1982 {
1983 struct tb *tb = sw->tb;
1984 u64 route;
1985 int ret;
1986
1987 route = tb_route(sw);
1988
1989 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
1990 sw->config.enabled ? "restoring " : "initializing", route,
1991 tb_route_length(route), sw->config.upstream_port_number);
1992
1993 sw->config.enabled = 1;
1994
1995 if (tb_switch_is_usb4(sw)) {
1996 /*
1997 * For USB4 devices, we need to program the CM version
1998 * accordingly so that it knows to expose all the
1999 * additional capabilities.
2000 */
2001 sw->config.cmuv = USB4_VERSION_1_0;
2002
2003 /* Enumerate the switch */
2004 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2005 ROUTER_CS_1, 4);
2006 if (ret)
2007 return ret;
2008
2009 ret = usb4_switch_setup(sw);
2010 if (ret)
2011 return ret;
2012
2013 ret = usb4_switch_configure_link(sw);
2014 } else {
2015 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2016 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2017 sw->config.vendor_id);
2018
2019 if (!sw->cap_plug_events) {
2020 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2021 return -ENODEV;
2022 }
2023
2024 /* Enumerate the switch */
2025 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2026 ROUTER_CS_1, 3);
2027 if (ret)
2028 return ret;
2029
2030 ret = tb_lc_configure_link(sw);
2031 }
2032 if (ret)
2033 return ret;
2034
2035 return tb_plug_events_active(sw, true);
2036 }
2037
2038 static int tb_switch_set_uuid(struct tb_switch *sw)
2039 {
2040 bool uid = false;
2041 u32 uuid[4];
2042 int ret;
2043
2044 if (sw->uuid)
2045 return 0;
2046
2047 if (tb_switch_is_usb4(sw)) {
2048 ret = usb4_switch_read_uid(sw, &sw->uid);
2049 if (ret)
2050 return ret;
2051 uid = true;
2052 } else {
2053 /*
2054 * The newer controllers include fused UUID as part of
2055 * link controller specific registers
2056 */
2057 ret = tb_lc_read_uuid(sw, uuid);
2058 if (ret) {
2059 if (ret != -EINVAL)
2060 return ret;
2061 uid = true;
2062 }
2063 }
2064
2065 if (uid) {
2066 /*
2067 * ICM generates UUID based on UID and fills the upper
2068 * two words with ones. This is not strictly following
2069 * UUID format but we want to be compatible with it so
2070 * we do the same here.
2071 */
2072 uuid[0] = sw->uid & 0xffffffff;
2073 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2074 uuid[2] = 0xffffffff;
2075 uuid[3] = 0xffffffff;
2076 }
2077
2078 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2079 if (!sw->uuid)
2080 return -ENOMEM;
2081 return 0;
2082 }
2083
2084 static int tb_switch_add_dma_port(struct tb_switch *sw)
2085 {
2086 u32 status;
2087 int ret;
2088
2089 switch (sw->generation) {
2090 case 2:
2091 /* Only root switch can be upgraded */
2092 if (tb_route(sw))
2093 return 0;
2094
2095 fallthrough;
2096 case 3:
2097 ret = tb_switch_set_uuid(sw);
2098 if (ret)
2099 return ret;
2100 break;
2101
2102 default:
2103 /*
2104 * DMA port is the only thing available when the switch
2105 * is in safe mode.
2106 */
2107 if (!sw->safe_mode)
2108 return 0;
2109 break;
2110 }
2111
2112 /* Root switch DMA port requires running firmware */
2113 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2114 return 0;
2115
2116 sw->dma_port = dma_port_alloc(sw);
2117 if (!sw->dma_port)
2118 return 0;
2119
2120 if (sw->no_nvm_upgrade)
2121 return 0;
2122
2123 /*
2124 * If there is status already set then authentication failed
2125 * when the dma_port_flash_update_auth() returned. Power cycling
2126 * is not needed (it was done already) so only thing we do here
2127 * is to unblock runtime PM of the root port.
2128 */
2129 nvm_get_auth_status(sw, &status);
2130 if (status) {
2131 if (!tb_route(sw))
2132 nvm_authenticate_complete_dma_port(sw);
2133 return 0;
2134 }
2135
2136 /*
2137 * Check status of the previous flash authentication. If there
2138 * is one we need to power cycle the switch in any case to make
2139 * it functional again.
2140 */
2141 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2142 if (ret <= 0)
2143 return ret;
2144
2145 /* Now we can allow root port to suspend again */
2146 if (!tb_route(sw))
2147 nvm_authenticate_complete_dma_port(sw);
2148
2149 if (status) {
2150 tb_sw_info(sw, "switch flash authentication failed\n");
2151 nvm_set_auth_status(sw, status);
2152 }
2153
2154 tb_sw_info(sw, "power cycling the switch now\n");
2155 dma_port_power_cycle(sw->dma_port);
2156
2157 /*
2158 * We return error here which causes the switch adding failure.
2159 * It should appear back after power cycle is complete.
2160 */
2161 return -ESHUTDOWN;
2162 }
2163
2164 static void tb_switch_default_link_ports(struct tb_switch *sw)
2165 {
2166 int i;
2167
2168 for (i = 1; i <= sw->config.max_port_number; i += 2) {
2169 struct tb_port *port = &sw->ports[i];
2170 struct tb_port *subordinate;
2171
2172 if (!tb_port_is_null(port))
2173 continue;
2174
2175 /* Check for the subordinate port */
2176 if (i == sw->config.max_port_number ||
2177 !tb_port_is_null(&sw->ports[i + 1]))
2178 continue;
2179
2180 /* Link them if not already done so (by DROM) */
2181 subordinate = &sw->ports[i + 1];
2182 if (!port->dual_link_port && !subordinate->dual_link_port) {
2183 port->link_nr = 0;
2184 port->dual_link_port = subordinate;
2185 subordinate->link_nr = 1;
2186 subordinate->dual_link_port = port;
2187
2188 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2189 port->port, subordinate->port);
2190 }
2191 }
2192 }
2193
2194 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2195 {
2196 const struct tb_port *up = tb_upstream_port(sw);
2197
2198 if (!up->dual_link_port || !up->dual_link_port->remote)
2199 return false;
2200
2201 if (tb_switch_is_usb4(sw))
2202 return usb4_switch_lane_bonding_possible(sw);
2203 return tb_lc_lane_bonding_possible(sw);
2204 }
2205
2206 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2207 {
2208 struct tb_port *up;
2209 bool change = false;
2210 int ret;
2211
2212 if (!tb_route(sw) || tb_switch_is_icm(sw))
2213 return 0;
2214
2215 up = tb_upstream_port(sw);
2216
2217 ret = tb_port_get_link_speed(up);
2218 if (ret < 0)
2219 return ret;
2220 if (sw->link_speed != ret)
2221 change = true;
2222 sw->link_speed = ret;
2223
2224 ret = tb_port_get_link_width(up);
2225 if (ret < 0)
2226 return ret;
2227 if (sw->link_width != ret)
2228 change = true;
2229 sw->link_width = ret;
2230
2231 /* Notify userspace that there is possible link attribute change */
2232 if (device_is_registered(&sw->dev) && change)
2233 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2234
2235 return 0;
2236 }
2237
2238 /**
2239 * tb_switch_lane_bonding_enable() - Enable lane bonding
2240 * @sw: Switch to enable lane bonding
2241 *
2242 * Connection manager can call this function to enable lane bonding of a
2243 * switch. If conditions are correct and both switches support the feature,
2244 * lanes are bonded. It is safe to call this to any switch.
2245 */
2246 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2247 {
2248 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2249 struct tb_port *up, *down;
2250 u64 route = tb_route(sw);
2251 int ret;
2252
2253 if (!route)
2254 return 0;
2255
2256 if (!tb_switch_lane_bonding_possible(sw))
2257 return 0;
2258
2259 up = tb_upstream_port(sw);
2260 down = tb_port_at(route, parent);
2261
2262 if (!tb_port_is_width_supported(up, 2) ||
2263 !tb_port_is_width_supported(down, 2))
2264 return 0;
2265
2266 ret = tb_port_lane_bonding_enable(up);
2267 if (ret) {
2268 tb_port_warn(up, "failed to enable lane bonding\n");
2269 return ret;
2270 }
2271
2272 ret = tb_port_lane_bonding_enable(down);
2273 if (ret) {
2274 tb_port_warn(down, "failed to enable lane bonding\n");
2275 tb_port_lane_bonding_disable(up);
2276 return ret;
2277 }
2278
2279 tb_switch_update_link_attributes(sw);
2280
2281 tb_sw_dbg(sw, "lane bonding enabled\n");
2282 return ret;
2283 }
2284
2285 /**
2286 * tb_switch_lane_bonding_disable() - Disable lane bonding
2287 * @sw: Switch whose lane bonding to disable
2288 *
2289 * Disables lane bonding between @sw and parent. This can be called even
2290 * if lanes were not bonded originally.
2291 */
2292 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2293 {
2294 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2295 struct tb_port *up, *down;
2296
2297 if (!tb_route(sw))
2298 return;
2299
2300 up = tb_upstream_port(sw);
2301 if (!up->bonded)
2302 return;
2303
2304 down = tb_port_at(tb_route(sw), parent);
2305
2306 tb_port_lane_bonding_disable(up);
2307 tb_port_lane_bonding_disable(down);
2308
2309 tb_switch_update_link_attributes(sw);
2310 tb_sw_dbg(sw, "lane bonding disabled\n");
2311 }
2312
2313 /**
2314 * tb_switch_add() - Add a switch to the domain
2315 * @sw: Switch to add
2316 *
2317 * This is the last step in adding switch to the domain. It will read
2318 * identification information from DROM and initializes ports so that
2319 * they can be used to connect other switches. The switch will be
2320 * exposed to the userspace when this function successfully returns. To
2321 * remove and release the switch, call tb_switch_remove().
2322 *
2323 * Return: %0 in case of success and negative errno in case of failure
2324 */
2325 int tb_switch_add(struct tb_switch *sw)
2326 {
2327 int i, ret;
2328
2329 /*
2330 * Initialize DMA control port now before we read DROM. Recent
2331 * host controllers have more complete DROM on NVM that includes
2332 * vendor and model identification strings which we then expose
2333 * to the userspace. NVM can be accessed through DMA
2334 * configuration based mailbox.
2335 */
2336 ret = tb_switch_add_dma_port(sw);
2337 if (ret) {
2338 dev_err(&sw->dev, "failed to add DMA port\n");
2339 return ret;
2340 }
2341
2342 if (!sw->safe_mode) {
2343 /* read drom */
2344 ret = tb_drom_read(sw);
2345 if (ret) {
2346 dev_err(&sw->dev, "reading DROM failed\n");
2347 return ret;
2348 }
2349 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2350
2351 ret = tb_switch_set_uuid(sw);
2352 if (ret) {
2353 dev_err(&sw->dev, "failed to set UUID\n");
2354 return ret;
2355 }
2356
2357 for (i = 0; i <= sw->config.max_port_number; i++) {
2358 if (sw->ports[i].disabled) {
2359 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2360 continue;
2361 }
2362 ret = tb_init_port(&sw->ports[i]);
2363 if (ret) {
2364 dev_err(&sw->dev, "failed to initialize port %d\n", i);
2365 return ret;
2366 }
2367 }
2368
2369 tb_switch_default_link_ports(sw);
2370
2371 ret = tb_switch_update_link_attributes(sw);
2372 if (ret)
2373 return ret;
2374
2375 ret = tb_switch_tmu_init(sw);
2376 if (ret)
2377 return ret;
2378 }
2379
2380 ret = device_add(&sw->dev);
2381 if (ret) {
2382 dev_err(&sw->dev, "failed to add device: %d\n", ret);
2383 return ret;
2384 }
2385
2386 if (tb_route(sw)) {
2387 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2388 sw->vendor, sw->device);
2389 if (sw->vendor_name && sw->device_name)
2390 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2391 sw->device_name);
2392 }
2393
2394 ret = tb_switch_nvm_add(sw);
2395 if (ret) {
2396 dev_err(&sw->dev, "failed to add NVM devices\n");
2397 device_del(&sw->dev);
2398 return ret;
2399 }
2400
2401 pm_runtime_set_active(&sw->dev);
2402 if (sw->rpm) {
2403 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2404 pm_runtime_use_autosuspend(&sw->dev);
2405 pm_runtime_mark_last_busy(&sw->dev);
2406 pm_runtime_enable(&sw->dev);
2407 pm_request_autosuspend(&sw->dev);
2408 }
2409
2410 return 0;
2411 }
2412
2413 /**
2414 * tb_switch_remove() - Remove and release a switch
2415 * @sw: Switch to remove
2416 *
2417 * This will remove the switch from the domain and release it after last
2418 * reference count drops to zero. If there are switches connected below
2419 * this switch, they will be removed as well.
2420 */
2421 void tb_switch_remove(struct tb_switch *sw)
2422 {
2423 struct tb_port *port;
2424
2425 if (sw->rpm) {
2426 pm_runtime_get_sync(&sw->dev);
2427 pm_runtime_disable(&sw->dev);
2428 }
2429
2430 /* port 0 is the switch itself and never has a remote */
2431 tb_switch_for_each_port(sw, port) {
2432 if (tb_port_has_remote(port)) {
2433 tb_switch_remove(port->remote->sw);
2434 port->remote = NULL;
2435 } else if (port->xdomain) {
2436 tb_xdomain_remove(port->xdomain);
2437 port->xdomain = NULL;
2438 }
2439
2440 /* Remove any downstream retimers */
2441 tb_retimer_remove_all(port);
2442 }
2443
2444 if (!sw->is_unplugged)
2445 tb_plug_events_active(sw, false);
2446
2447 if (tb_switch_is_usb4(sw))
2448 usb4_switch_unconfigure_link(sw);
2449 else
2450 tb_lc_unconfigure_link(sw);
2451
2452 tb_switch_nvm_remove(sw);
2453
2454 if (tb_route(sw))
2455 dev_info(&sw->dev, "device disconnected\n");
2456 device_unregister(&sw->dev);
2457 }
2458
2459 /**
2460 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2461 */
2462 void tb_sw_set_unplugged(struct tb_switch *sw)
2463 {
2464 struct tb_port *port;
2465
2466 if (sw == sw->tb->root_switch) {
2467 tb_sw_WARN(sw, "cannot unplug root switch\n");
2468 return;
2469 }
2470 if (sw->is_unplugged) {
2471 tb_sw_WARN(sw, "is_unplugged already set\n");
2472 return;
2473 }
2474 sw->is_unplugged = true;
2475 tb_switch_for_each_port(sw, port) {
2476 if (tb_port_has_remote(port))
2477 tb_sw_set_unplugged(port->remote->sw);
2478 else if (port->xdomain)
2479 port->xdomain->is_unplugged = true;
2480 }
2481 }
2482
2483 int tb_switch_resume(struct tb_switch *sw)
2484 {
2485 struct tb_port *port;
2486 int err;
2487
2488 tb_sw_dbg(sw, "resuming switch\n");
2489
2490 /*
2491 * Check for UID of the connected switches except for root
2492 * switch which we assume cannot be removed.
2493 */
2494 if (tb_route(sw)) {
2495 u64 uid;
2496
2497 /*
2498 * Check first that we can still read the switch config
2499 * space. It may be that there is now another domain
2500 * connected.
2501 */
2502 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2503 if (err < 0) {
2504 tb_sw_info(sw, "switch not present anymore\n");
2505 return err;
2506 }
2507
2508 if (tb_switch_is_usb4(sw))
2509 err = usb4_switch_read_uid(sw, &uid);
2510 else
2511 err = tb_drom_read_uid_only(sw, &uid);
2512 if (err) {
2513 tb_sw_warn(sw, "uid read failed\n");
2514 return err;
2515 }
2516 if (sw->uid != uid) {
2517 tb_sw_info(sw,
2518 "changed while suspended (uid %#llx -> %#llx)\n",
2519 sw->uid, uid);
2520 return -ENODEV;
2521 }
2522 }
2523
2524 err = tb_switch_configure(sw);
2525 if (err)
2526 return err;
2527
2528 /* check for surviving downstream switches */
2529 tb_switch_for_each_port(sw, port) {
2530 if (!tb_port_has_remote(port) && !port->xdomain)
2531 continue;
2532
2533 if (tb_wait_for_port(port, true) <= 0) {
2534 tb_port_warn(port,
2535 "lost during suspend, disconnecting\n");
2536 if (tb_port_has_remote(port))
2537 tb_sw_set_unplugged(port->remote->sw);
2538 else if (port->xdomain)
2539 port->xdomain->is_unplugged = true;
2540 } else if (tb_port_has_remote(port) || port->xdomain) {
2541 /*
2542 * Always unlock the port so the downstream
2543 * switch/domain is accessible.
2544 */
2545 if (tb_port_unlock(port))
2546 tb_port_warn(port, "failed to unlock port\n");
2547 if (port->remote && tb_switch_resume(port->remote->sw)) {
2548 tb_port_warn(port,
2549 "lost during suspend, disconnecting\n");
2550 tb_sw_set_unplugged(port->remote->sw);
2551 }
2552 }
2553 }
2554 return 0;
2555 }
2556
2557 void tb_switch_suspend(struct tb_switch *sw)
2558 {
2559 struct tb_port *port;
2560 int err;
2561
2562 err = tb_plug_events_active(sw, false);
2563 if (err)
2564 return;
2565
2566 tb_switch_for_each_port(sw, port) {
2567 if (tb_port_has_remote(port))
2568 tb_switch_suspend(port->remote->sw);
2569 }
2570
2571 if (tb_switch_is_usb4(sw))
2572 usb4_switch_set_sleep(sw);
2573 else
2574 tb_lc_set_sleep(sw);
2575 }
2576
2577 /**
2578 * tb_switch_query_dp_resource() - Query availability of DP resource
2579 * @sw: Switch whose DP resource is queried
2580 * @in: DP IN port
2581 *
2582 * Queries availability of DP resource for DP tunneling using switch
2583 * specific means. Returns %true if resource is available.
2584 */
2585 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
2586 {
2587 if (tb_switch_is_usb4(sw))
2588 return usb4_switch_query_dp_resource(sw, in);
2589 return tb_lc_dp_sink_query(sw, in);
2590 }
2591
2592 /**
2593 * tb_switch_alloc_dp_resource() - Allocate available DP resource
2594 * @sw: Switch whose DP resource is allocated
2595 * @in: DP IN port
2596 *
2597 * Allocates DP resource for DP tunneling. The resource must be
2598 * available for this to succeed (see tb_switch_query_dp_resource()).
2599 * Returns %0 in success and negative errno otherwise.
2600 */
2601 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2602 {
2603 if (tb_switch_is_usb4(sw))
2604 return usb4_switch_alloc_dp_resource(sw, in);
2605 return tb_lc_dp_sink_alloc(sw, in);
2606 }
2607
2608 /**
2609 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
2610 * @sw: Switch whose DP resource is de-allocated
2611 * @in: DP IN port
2612 *
2613 * De-allocates DP resource that was previously allocated for DP
2614 * tunneling.
2615 */
2616 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2617 {
2618 int ret;
2619
2620 if (tb_switch_is_usb4(sw))
2621 ret = usb4_switch_dealloc_dp_resource(sw, in);
2622 else
2623 ret = tb_lc_dp_sink_dealloc(sw, in);
2624
2625 if (ret)
2626 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
2627 in->port);
2628 }
2629
2630 struct tb_sw_lookup {
2631 struct tb *tb;
2632 u8 link;
2633 u8 depth;
2634 const uuid_t *uuid;
2635 u64 route;
2636 };
2637
2638 static int tb_switch_match(struct device *dev, const void *data)
2639 {
2640 struct tb_switch *sw = tb_to_switch(dev);
2641 const struct tb_sw_lookup *lookup = data;
2642
2643 if (!sw)
2644 return 0;
2645 if (sw->tb != lookup->tb)
2646 return 0;
2647
2648 if (lookup->uuid)
2649 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
2650
2651 if (lookup->route) {
2652 return sw->config.route_lo == lower_32_bits(lookup->route) &&
2653 sw->config.route_hi == upper_32_bits(lookup->route);
2654 }
2655
2656 /* Root switch is matched only by depth */
2657 if (!lookup->depth)
2658 return !sw->depth;
2659
2660 return sw->link == lookup->link && sw->depth == lookup->depth;
2661 }
2662
2663 /**
2664 * tb_switch_find_by_link_depth() - Find switch by link and depth
2665 * @tb: Domain the switch belongs
2666 * @link: Link number the switch is connected
2667 * @depth: Depth of the switch in link
2668 *
2669 * Returned switch has reference count increased so the caller needs to
2670 * call tb_switch_put() when done with the switch.
2671 */
2672 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
2673 {
2674 struct tb_sw_lookup lookup;
2675 struct device *dev;
2676
2677 memset(&lookup, 0, sizeof(lookup));
2678 lookup.tb = tb;
2679 lookup.link = link;
2680 lookup.depth = depth;
2681
2682 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2683 if (dev)
2684 return tb_to_switch(dev);
2685
2686 return NULL;
2687 }
2688
2689 /**
2690 * tb_switch_find_by_uuid() - Find switch by UUID
2691 * @tb: Domain the switch belongs
2692 * @uuid: UUID to look for
2693 *
2694 * Returned switch has reference count increased so the caller needs to
2695 * call tb_switch_put() when done with the switch.
2696 */
2697 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2698 {
2699 struct tb_sw_lookup lookup;
2700 struct device *dev;
2701
2702 memset(&lookup, 0, sizeof(lookup));
2703 lookup.tb = tb;
2704 lookup.uuid = uuid;
2705
2706 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2707 if (dev)
2708 return tb_to_switch(dev);
2709
2710 return NULL;
2711 }
2712
2713 /**
2714 * tb_switch_find_by_route() - Find switch by route string
2715 * @tb: Domain the switch belongs
2716 * @route: Route string to look for
2717 *
2718 * Returned switch has reference count increased so the caller needs to
2719 * call tb_switch_put() when done with the switch.
2720 */
2721 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
2722 {
2723 struct tb_sw_lookup lookup;
2724 struct device *dev;
2725
2726 if (!route)
2727 return tb_switch_get(tb->root_switch);
2728
2729 memset(&lookup, 0, sizeof(lookup));
2730 lookup.tb = tb;
2731 lookup.route = route;
2732
2733 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2734 if (dev)
2735 return tb_to_switch(dev);
2736
2737 return NULL;
2738 }
2739
2740 /**
2741 * tb_switch_find_port() - return the first port of @type on @sw or NULL
2742 * @sw: Switch to find the port from
2743 * @type: Port type to look for
2744 */
2745 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
2746 enum tb_port_type type)
2747 {
2748 struct tb_port *port;
2749
2750 tb_switch_for_each_port(sw, port) {
2751 if (port->config.type == type)
2752 return port;
2753 }
2754
2755 return NULL;
2756 }