]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/thunderbolt/switch.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / drivers / thunderbolt / switch.c
1 /*
2 * Thunderbolt Cactus Ridge driver - switch/port utility functions
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7 #include <linux/delay.h>
8 #include <linux/idr.h>
9 #include <linux/nvmem-provider.h>
10 #include <linux/sizes.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #include "tb.h"
15
16 /* Switch authorization from userspace is serialized by this lock */
17 static DEFINE_MUTEX(switch_lock);
18
19 /* Switch NVM support */
20
21 #define NVM_DEVID 0x05
22 #define NVM_VERSION 0x08
23 #define NVM_CSS 0x10
24 #define NVM_FLASH_SIZE 0x45
25
26 #define NVM_MIN_SIZE SZ_32K
27 #define NVM_MAX_SIZE SZ_512K
28
29 static DEFINE_IDA(nvm_ida);
30
31 struct nvm_auth_status {
32 struct list_head list;
33 uuid_be uuid;
34 u32 status;
35 };
36
37 /*
38 * Hold NVM authentication failure status per switch This information
39 * needs to stay around even when the switch gets power cycled so we
40 * keep it separately.
41 */
42 static LIST_HEAD(nvm_auth_status_cache);
43 static DEFINE_MUTEX(nvm_auth_status_lock);
44
45 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
46 {
47 struct nvm_auth_status *st;
48
49 list_for_each_entry(st, &nvm_auth_status_cache, list) {
50 if (!uuid_be_cmp(st->uuid, *sw->uuid))
51 return st;
52 }
53
54 return NULL;
55 }
56
57 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
58 {
59 struct nvm_auth_status *st;
60
61 mutex_lock(&nvm_auth_status_lock);
62 st = __nvm_get_auth_status(sw);
63 mutex_unlock(&nvm_auth_status_lock);
64
65 *status = st ? st->status : 0;
66 }
67
68 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
69 {
70 struct nvm_auth_status *st;
71
72 if (WARN_ON(!sw->uuid))
73 return;
74
75 mutex_lock(&nvm_auth_status_lock);
76 st = __nvm_get_auth_status(sw);
77
78 if (!st) {
79 st = kzalloc(sizeof(*st), GFP_KERNEL);
80 if (!st)
81 goto unlock;
82
83 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
84 INIT_LIST_HEAD(&st->list);
85 list_add_tail(&st->list, &nvm_auth_status_cache);
86 }
87
88 st->status = status;
89 unlock:
90 mutex_unlock(&nvm_auth_status_lock);
91 }
92
93 static void nvm_clear_auth_status(const struct tb_switch *sw)
94 {
95 struct nvm_auth_status *st;
96
97 mutex_lock(&nvm_auth_status_lock);
98 st = __nvm_get_auth_status(sw);
99 if (st) {
100 list_del(&st->list);
101 kfree(st);
102 }
103 mutex_unlock(&nvm_auth_status_lock);
104 }
105
106 static int nvm_validate_and_write(struct tb_switch *sw)
107 {
108 unsigned int image_size, hdr_size;
109 const u8 *buf = sw->nvm->buf;
110 u16 ds_size;
111 int ret;
112
113 if (!buf)
114 return -EINVAL;
115
116 image_size = sw->nvm->buf_data_size;
117 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
118 return -EINVAL;
119
120 /*
121 * FARB pointer must point inside the image and must at least
122 * contain parts of the digital section we will be reading here.
123 */
124 hdr_size = (*(u32 *)buf) & 0xffffff;
125 if (hdr_size + NVM_DEVID + 2 >= image_size)
126 return -EINVAL;
127
128 /* Digital section start should be aligned to 4k page */
129 if (!IS_ALIGNED(hdr_size, SZ_4K))
130 return -EINVAL;
131
132 /*
133 * Read digital section size and check that it also fits inside
134 * the image.
135 */
136 ds_size = *(u16 *)(buf + hdr_size);
137 if (ds_size >= image_size)
138 return -EINVAL;
139
140 if (!sw->safe_mode) {
141 u16 device_id;
142
143 /*
144 * Make sure the device ID in the image matches the one
145 * we read from the switch config space.
146 */
147 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
148 if (device_id != sw->config.device_id)
149 return -EINVAL;
150
151 if (sw->generation < 3) {
152 /* Write CSS headers first */
153 ret = dma_port_flash_write(sw->dma_port,
154 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
155 DMA_PORT_CSS_MAX_SIZE);
156 if (ret)
157 return ret;
158 }
159
160 /* Skip headers in the image */
161 buf += hdr_size;
162 image_size -= hdr_size;
163 }
164
165 return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
166 }
167
168 static int nvm_authenticate_host(struct tb_switch *sw)
169 {
170 int ret;
171
172 /*
173 * Root switch NVM upgrade requires that we disconnect the
174 * existing PCIe paths first (in case it is not in safe mode
175 * already).
176 */
177 if (!sw->safe_mode) {
178 ret = tb_domain_disconnect_pcie_paths(sw->tb);
179 if (ret)
180 return ret;
181 /*
182 * The host controller goes away pretty soon after this if
183 * everything goes well so getting timeout is expected.
184 */
185 ret = dma_port_flash_update_auth(sw->dma_port);
186 return ret == -ETIMEDOUT ? 0 : ret;
187 }
188
189 /*
190 * From safe mode we can get out by just power cycling the
191 * switch.
192 */
193 dma_port_power_cycle(sw->dma_port);
194 return 0;
195 }
196
197 static int nvm_authenticate_device(struct tb_switch *sw)
198 {
199 int ret, retries = 10;
200
201 ret = dma_port_flash_update_auth(sw->dma_port);
202 if (ret && ret != -ETIMEDOUT)
203 return ret;
204
205 /*
206 * Poll here for the authentication status. It takes some time
207 * for the device to respond (we get timeout for a while). Once
208 * we get response the device needs to be power cycled in order
209 * to the new NVM to be taken into use.
210 */
211 do {
212 u32 status;
213
214 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
215 if (ret < 0 && ret != -ETIMEDOUT)
216 return ret;
217 if (ret > 0) {
218 if (status) {
219 tb_sw_warn(sw, "failed to authenticate NVM\n");
220 nvm_set_auth_status(sw, status);
221 }
222
223 tb_sw_info(sw, "power cycling the switch now\n");
224 dma_port_power_cycle(sw->dma_port);
225 return 0;
226 }
227
228 msleep(500);
229 } while (--retries);
230
231 return -ETIMEDOUT;
232 }
233
234 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
235 size_t bytes)
236 {
237 struct tb_switch *sw = priv;
238
239 return dma_port_flash_read(sw->dma_port, offset, val, bytes);
240 }
241
242 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
243 size_t bytes)
244 {
245 struct tb_switch *sw = priv;
246 int ret = 0;
247
248 if (mutex_lock_interruptible(&switch_lock))
249 return -ERESTARTSYS;
250
251 /*
252 * Since writing the NVM image might require some special steps,
253 * for example when CSS headers are written, we cache the image
254 * locally here and handle the special cases when the user asks
255 * us to authenticate the image.
256 */
257 if (!sw->nvm->buf) {
258 sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
259 if (!sw->nvm->buf) {
260 ret = -ENOMEM;
261 goto unlock;
262 }
263 }
264
265 sw->nvm->buf_data_size = offset + bytes;
266 memcpy(sw->nvm->buf + offset, val, bytes);
267
268 unlock:
269 mutex_unlock(&switch_lock);
270
271 return ret;
272 }
273
274 static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
275 size_t size, bool active)
276 {
277 struct nvmem_config config;
278
279 memset(&config, 0, sizeof(config));
280
281 if (active) {
282 config.name = "nvm_active";
283 config.reg_read = tb_switch_nvm_read;
284 } else {
285 config.name = "nvm_non_active";
286 config.reg_write = tb_switch_nvm_write;
287 }
288
289 config.id = id;
290 config.stride = 4;
291 config.word_size = 4;
292 config.size = size;
293 config.dev = &sw->dev;
294 config.owner = THIS_MODULE;
295 config.root_only = true;
296 config.priv = sw;
297
298 return nvmem_register(&config);
299 }
300
301 static int tb_switch_nvm_add(struct tb_switch *sw)
302 {
303 struct nvmem_device *nvm_dev;
304 struct tb_switch_nvm *nvm;
305 u32 val;
306 int ret;
307
308 if (!sw->dma_port)
309 return 0;
310
311 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
312 if (!nvm)
313 return -ENOMEM;
314
315 nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
316
317 /*
318 * If the switch is in safe-mode the only accessible portion of
319 * the NVM is the non-active one where userspace is expected to
320 * write new functional NVM.
321 */
322 if (!sw->safe_mode) {
323 u32 nvm_size, hdr_size;
324
325 ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
326 sizeof(val));
327 if (ret)
328 goto err_ida;
329
330 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
331 nvm_size = (SZ_1M << (val & 7)) / 8;
332 nvm_size = (nvm_size - hdr_size) / 2;
333
334 ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
335 sizeof(val));
336 if (ret)
337 goto err_ida;
338
339 nvm->major = val >> 16;
340 nvm->minor = val >> 8;
341
342 nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
343 if (IS_ERR(nvm_dev)) {
344 ret = PTR_ERR(nvm_dev);
345 goto err_ida;
346 }
347 nvm->active = nvm_dev;
348 }
349
350 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
351 if (IS_ERR(nvm_dev)) {
352 ret = PTR_ERR(nvm_dev);
353 goto err_nvm_active;
354 }
355 nvm->non_active = nvm_dev;
356
357 mutex_lock(&switch_lock);
358 sw->nvm = nvm;
359 mutex_unlock(&switch_lock);
360
361 return 0;
362
363 err_nvm_active:
364 if (nvm->active)
365 nvmem_unregister(nvm->active);
366 err_ida:
367 ida_simple_remove(&nvm_ida, nvm->id);
368 kfree(nvm);
369
370 return ret;
371 }
372
373 static void tb_switch_nvm_remove(struct tb_switch *sw)
374 {
375 struct tb_switch_nvm *nvm;
376
377 mutex_lock(&switch_lock);
378 nvm = sw->nvm;
379 sw->nvm = NULL;
380 mutex_unlock(&switch_lock);
381
382 if (!nvm)
383 return;
384
385 /* Remove authentication status in case the switch is unplugged */
386 if (!nvm->authenticating)
387 nvm_clear_auth_status(sw);
388
389 nvmem_unregister(nvm->non_active);
390 if (nvm->active)
391 nvmem_unregister(nvm->active);
392 ida_simple_remove(&nvm_ida, nvm->id);
393 vfree(nvm->buf);
394 kfree(nvm);
395 }
396
397 /* port utility functions */
398
399 static const char *tb_port_type(struct tb_regs_port_header *port)
400 {
401 switch (port->type >> 16) {
402 case 0:
403 switch ((u8) port->type) {
404 case 0:
405 return "Inactive";
406 case 1:
407 return "Port";
408 case 2:
409 return "NHI";
410 default:
411 return "unknown";
412 }
413 case 0x2:
414 return "Ethernet";
415 case 0x8:
416 return "SATA";
417 case 0xe:
418 return "DP/HDMI";
419 case 0x10:
420 return "PCIe";
421 case 0x20:
422 return "USB";
423 default:
424 return "unknown";
425 }
426 }
427
428 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
429 {
430 tb_info(tb,
431 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
432 port->port_number, port->vendor_id, port->device_id,
433 port->revision, port->thunderbolt_version, tb_port_type(port),
434 port->type);
435 tb_info(tb, " Max hop id (in/out): %d/%d\n",
436 port->max_in_hop_id, port->max_out_hop_id);
437 tb_info(tb, " Max counters: %d\n", port->max_counters);
438 tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits);
439 }
440
441 /**
442 * tb_port_state() - get connectedness state of a port
443 *
444 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
445 *
446 * Return: Returns an enum tb_port_state on success or an error code on failure.
447 */
448 static int tb_port_state(struct tb_port *port)
449 {
450 struct tb_cap_phy phy;
451 int res;
452 if (port->cap_phy == 0) {
453 tb_port_WARN(port, "does not have a PHY\n");
454 return -EINVAL;
455 }
456 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
457 if (res)
458 return res;
459 return phy.state;
460 }
461
462 /**
463 * tb_wait_for_port() - wait for a port to become ready
464 *
465 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
466 * wait_if_unplugged is set then we also wait if the port is in state
467 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
468 * switch resume). Otherwise we only wait if a device is registered but the link
469 * has not yet been established.
470 *
471 * Return: Returns an error code on failure. Returns 0 if the port is not
472 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
473 * if the port is connected and in state TB_PORT_UP.
474 */
475 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
476 {
477 int retries = 10;
478 int state;
479 if (!port->cap_phy) {
480 tb_port_WARN(port, "does not have PHY\n");
481 return -EINVAL;
482 }
483 if (tb_is_upstream_port(port)) {
484 tb_port_WARN(port, "is the upstream port\n");
485 return -EINVAL;
486 }
487
488 while (retries--) {
489 state = tb_port_state(port);
490 if (state < 0)
491 return state;
492 if (state == TB_PORT_DISABLED) {
493 tb_port_info(port, "is disabled (state: 0)\n");
494 return 0;
495 }
496 if (state == TB_PORT_UNPLUGGED) {
497 if (wait_if_unplugged) {
498 /* used during resume */
499 tb_port_info(port,
500 "is unplugged (state: 7), retrying...\n");
501 msleep(100);
502 continue;
503 }
504 tb_port_info(port, "is unplugged (state: 7)\n");
505 return 0;
506 }
507 if (state == TB_PORT_UP) {
508 tb_port_info(port,
509 "is connected, link is up (state: 2)\n");
510 return 1;
511 }
512
513 /*
514 * After plug-in the state is TB_PORT_CONNECTING. Give it some
515 * time.
516 */
517 tb_port_info(port,
518 "is connected, link is not up (state: %d), retrying...\n",
519 state);
520 msleep(100);
521 }
522 tb_port_warn(port,
523 "failed to reach state TB_PORT_UP. Ignoring port...\n");
524 return 0;
525 }
526
527 /**
528 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
529 *
530 * Change the number of NFC credits allocated to @port by @credits. To remove
531 * NFC credits pass a negative amount of credits.
532 *
533 * Return: Returns 0 on success or an error code on failure.
534 */
535 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
536 {
537 if (credits == 0)
538 return 0;
539 tb_port_info(port,
540 "adding %#x NFC credits (%#x -> %#x)",
541 credits,
542 port->config.nfc_credits,
543 port->config.nfc_credits + credits);
544 port->config.nfc_credits += credits;
545 return tb_port_write(port, &port->config.nfc_credits,
546 TB_CFG_PORT, 4, 1);
547 }
548
549 /**
550 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
551 *
552 * Return: Returns 0 on success or an error code on failure.
553 */
554 int tb_port_clear_counter(struct tb_port *port, int counter)
555 {
556 u32 zero[3] = { 0, 0, 0 };
557 tb_port_info(port, "clearing counter %d\n", counter);
558 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
559 }
560
561 /**
562 * tb_init_port() - initialize a port
563 *
564 * This is a helper method for tb_switch_alloc. Does not check or initialize
565 * any downstream switches.
566 *
567 * Return: Returns 0 on success or an error code on failure.
568 */
569 static int tb_init_port(struct tb_port *port)
570 {
571 int res;
572 int cap;
573
574 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
575 if (res)
576 return res;
577
578 /* Port 0 is the switch itself and has no PHY. */
579 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
580 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
581
582 if (cap > 0)
583 port->cap_phy = cap;
584 else
585 tb_port_WARN(port, "non switch port without a PHY\n");
586 }
587
588 tb_dump_port(port->sw->tb, &port->config);
589
590 /* TODO: Read dual link port, DP port and more from EEPROM. */
591 return 0;
592
593 }
594
595 /* switch utility functions */
596
597 static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
598 {
599 tb_info(tb,
600 " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
601 sw->vendor_id, sw->device_id, sw->revision,
602 sw->thunderbolt_version);
603 tb_info(tb, " Max Port Number: %d\n", sw->max_port_number);
604 tb_info(tb, " Config:\n");
605 tb_info(tb,
606 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
607 sw->upstream_port_number, sw->depth,
608 (((u64) sw->route_hi) << 32) | sw->route_lo,
609 sw->enabled, sw->plug_events_delay);
610 tb_info(tb,
611 " unknown1: %#x unknown4: %#x\n",
612 sw->__unknown1, sw->__unknown4);
613 }
614
615 /**
616 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
617 *
618 * Return: Returns 0 on success or an error code on failure.
619 */
620 int tb_switch_reset(struct tb *tb, u64 route)
621 {
622 struct tb_cfg_result res;
623 struct tb_regs_switch_header header = {
624 header.route_hi = route >> 32,
625 header.route_lo = route,
626 header.enabled = true,
627 };
628 tb_info(tb, "resetting switch at %llx\n", route);
629 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
630 0, 2, 2, 2);
631 if (res.err)
632 return res.err;
633 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
634 if (res.err > 0)
635 return -EIO;
636 return res.err;
637 }
638
639 struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
640 {
641 u8 next_port = route; /*
642 * Routes use a stride of 8 bits,
643 * eventhough a port index has 6 bits at most.
644 * */
645 if (route == 0)
646 return sw;
647 if (next_port > sw->config.max_port_number)
648 return NULL;
649 if (tb_is_upstream_port(&sw->ports[next_port]))
650 return NULL;
651 if (!sw->ports[next_port].remote)
652 return NULL;
653 return get_switch_at_route(sw->ports[next_port].remote->sw,
654 route >> TB_ROUTE_SHIFT);
655 }
656
657 /**
658 * tb_plug_events_active() - enable/disable plug events on a switch
659 *
660 * Also configures a sane plug_events_delay of 255ms.
661 *
662 * Return: Returns 0 on success or an error code on failure.
663 */
664 static int tb_plug_events_active(struct tb_switch *sw, bool active)
665 {
666 u32 data;
667 int res;
668
669 if (!sw->config.enabled)
670 return 0;
671
672 sw->config.plug_events_delay = 0xff;
673 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
674 if (res)
675 return res;
676
677 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
678 if (res)
679 return res;
680
681 if (active) {
682 data = data & 0xFFFFFF83;
683 switch (sw->config.device_id) {
684 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
685 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
686 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
687 break;
688 default:
689 data |= 4;
690 }
691 } else {
692 data = data | 0x7c;
693 }
694 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
695 sw->cap_plug_events + 1, 1);
696 }
697
698 static ssize_t authorized_show(struct device *dev,
699 struct device_attribute *attr,
700 char *buf)
701 {
702 struct tb_switch *sw = tb_to_switch(dev);
703
704 return sprintf(buf, "%u\n", sw->authorized);
705 }
706
707 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
708 {
709 int ret = -EINVAL;
710
711 if (mutex_lock_interruptible(&switch_lock))
712 return -ERESTARTSYS;
713
714 if (sw->authorized)
715 goto unlock;
716
717 switch (val) {
718 /* Approve switch */
719 case 1:
720 if (sw->key)
721 ret = tb_domain_approve_switch_key(sw->tb, sw);
722 else
723 ret = tb_domain_approve_switch(sw->tb, sw);
724 break;
725
726 /* Challenge switch */
727 case 2:
728 if (sw->key)
729 ret = tb_domain_challenge_switch_key(sw->tb, sw);
730 break;
731
732 default:
733 break;
734 }
735
736 if (!ret) {
737 sw->authorized = val;
738 /* Notify status change to the userspace */
739 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
740 }
741
742 unlock:
743 mutex_unlock(&switch_lock);
744 return ret;
745 }
746
747 static ssize_t authorized_store(struct device *dev,
748 struct device_attribute *attr,
749 const char *buf, size_t count)
750 {
751 struct tb_switch *sw = tb_to_switch(dev);
752 unsigned int val;
753 ssize_t ret;
754
755 ret = kstrtouint(buf, 0, &val);
756 if (ret)
757 return ret;
758 if (val > 2)
759 return -EINVAL;
760
761 ret = tb_switch_set_authorized(sw, val);
762
763 return ret ? ret : count;
764 }
765 static DEVICE_ATTR_RW(authorized);
766
767 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
768 char *buf)
769 {
770 struct tb_switch *sw = tb_to_switch(dev);
771
772 return sprintf(buf, "%#x\n", sw->device);
773 }
774 static DEVICE_ATTR_RO(device);
775
776 static ssize_t
777 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
778 {
779 struct tb_switch *sw = tb_to_switch(dev);
780
781 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
782 }
783 static DEVICE_ATTR_RO(device_name);
784
785 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
786 char *buf)
787 {
788 struct tb_switch *sw = tb_to_switch(dev);
789 ssize_t ret;
790
791 if (mutex_lock_interruptible(&switch_lock))
792 return -ERESTARTSYS;
793
794 if (sw->key)
795 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
796 else
797 ret = sprintf(buf, "\n");
798
799 mutex_unlock(&switch_lock);
800 return ret;
801 }
802
803 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
804 const char *buf, size_t count)
805 {
806 struct tb_switch *sw = tb_to_switch(dev);
807 u8 key[TB_SWITCH_KEY_SIZE];
808 ssize_t ret = count;
809
810 if (count < 64)
811 return -EINVAL;
812
813 if (hex2bin(key, buf, sizeof(key)))
814 return -EINVAL;
815
816 if (mutex_lock_interruptible(&switch_lock))
817 return -ERESTARTSYS;
818
819 if (sw->authorized) {
820 ret = -EBUSY;
821 } else {
822 kfree(sw->key);
823 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
824 if (!sw->key)
825 ret = -ENOMEM;
826 }
827
828 mutex_unlock(&switch_lock);
829 return ret;
830 }
831 static DEVICE_ATTR_RW(key);
832
833 static ssize_t nvm_authenticate_show(struct device *dev,
834 struct device_attribute *attr, char *buf)
835 {
836 struct tb_switch *sw = tb_to_switch(dev);
837 u32 status;
838
839 nvm_get_auth_status(sw, &status);
840 return sprintf(buf, "%#x\n", status);
841 }
842
843 static ssize_t nvm_authenticate_store(struct device *dev,
844 struct device_attribute *attr, const char *buf, size_t count)
845 {
846 struct tb_switch *sw = tb_to_switch(dev);
847 bool val;
848 int ret;
849
850 if (mutex_lock_interruptible(&switch_lock))
851 return -ERESTARTSYS;
852
853 /* If NVMem devices are not yet added */
854 if (!sw->nvm) {
855 ret = -EAGAIN;
856 goto exit_unlock;
857 }
858
859 ret = kstrtobool(buf, &val);
860 if (ret)
861 goto exit_unlock;
862
863 /* Always clear the authentication status */
864 nvm_clear_auth_status(sw);
865
866 if (val) {
867 ret = nvm_validate_and_write(sw);
868 if (ret)
869 goto exit_unlock;
870
871 sw->nvm->authenticating = true;
872
873 if (!tb_route(sw))
874 ret = nvm_authenticate_host(sw);
875 else
876 ret = nvm_authenticate_device(sw);
877 }
878
879 exit_unlock:
880 mutex_unlock(&switch_lock);
881
882 if (ret)
883 return ret;
884 return count;
885 }
886 static DEVICE_ATTR_RW(nvm_authenticate);
887
888 static ssize_t nvm_version_show(struct device *dev,
889 struct device_attribute *attr, char *buf)
890 {
891 struct tb_switch *sw = tb_to_switch(dev);
892 int ret;
893
894 if (mutex_lock_interruptible(&switch_lock))
895 return -ERESTARTSYS;
896
897 if (sw->safe_mode)
898 ret = -ENODATA;
899 else if (!sw->nvm)
900 ret = -EAGAIN;
901 else
902 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
903
904 mutex_unlock(&switch_lock);
905
906 return ret;
907 }
908 static DEVICE_ATTR_RO(nvm_version);
909
910 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
911 char *buf)
912 {
913 struct tb_switch *sw = tb_to_switch(dev);
914
915 return sprintf(buf, "%#x\n", sw->vendor);
916 }
917 static DEVICE_ATTR_RO(vendor);
918
919 static ssize_t
920 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
921 {
922 struct tb_switch *sw = tb_to_switch(dev);
923
924 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
925 }
926 static DEVICE_ATTR_RO(vendor_name);
927
928 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
929 char *buf)
930 {
931 struct tb_switch *sw = tb_to_switch(dev);
932
933 return sprintf(buf, "%pUb\n", sw->uuid);
934 }
935 static DEVICE_ATTR_RO(unique_id);
936
937 static struct attribute *switch_attrs[] = {
938 &dev_attr_authorized.attr,
939 &dev_attr_device.attr,
940 &dev_attr_device_name.attr,
941 &dev_attr_key.attr,
942 &dev_attr_nvm_authenticate.attr,
943 &dev_attr_nvm_version.attr,
944 &dev_attr_vendor.attr,
945 &dev_attr_vendor_name.attr,
946 &dev_attr_unique_id.attr,
947 NULL,
948 };
949
950 static umode_t switch_attr_is_visible(struct kobject *kobj,
951 struct attribute *attr, int n)
952 {
953 struct device *dev = container_of(kobj, struct device, kobj);
954 struct tb_switch *sw = tb_to_switch(dev);
955
956 if (attr == &dev_attr_key.attr) {
957 if (tb_route(sw) &&
958 sw->tb->security_level == TB_SECURITY_SECURE &&
959 sw->security_level == TB_SECURITY_SECURE)
960 return attr->mode;
961 return 0;
962 } else if (attr == &dev_attr_nvm_authenticate.attr ||
963 attr == &dev_attr_nvm_version.attr) {
964 if (sw->dma_port)
965 return attr->mode;
966 return 0;
967 }
968
969 return sw->safe_mode ? 0 : attr->mode;
970 }
971
972 static struct attribute_group switch_group = {
973 .is_visible = switch_attr_is_visible,
974 .attrs = switch_attrs,
975 };
976
977 static const struct attribute_group *switch_groups[] = {
978 &switch_group,
979 NULL,
980 };
981
982 static void tb_switch_release(struct device *dev)
983 {
984 struct tb_switch *sw = tb_to_switch(dev);
985
986 dma_port_free(sw->dma_port);
987
988 kfree(sw->uuid);
989 kfree(sw->device_name);
990 kfree(sw->vendor_name);
991 kfree(sw->ports);
992 kfree(sw->drom);
993 kfree(sw->key);
994 kfree(sw);
995 }
996
997 struct device_type tb_switch_type = {
998 .name = "thunderbolt_device",
999 .release = tb_switch_release,
1000 };
1001
1002 static int tb_switch_get_generation(struct tb_switch *sw)
1003 {
1004 switch (sw->config.device_id) {
1005 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1006 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1007 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1008 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1009 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1010 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1011 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1012 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1013 return 1;
1014
1015 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1016 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1017 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1018 return 2;
1019
1020 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1021 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1022 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1023 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1024 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1025 return 3;
1026
1027 default:
1028 /*
1029 * For unknown switches assume generation to be 1 to be
1030 * on the safe side.
1031 */
1032 tb_sw_warn(sw, "unsupported switch device id %#x\n",
1033 sw->config.device_id);
1034 return 1;
1035 }
1036 }
1037
1038 /**
1039 * tb_switch_alloc() - allocate a switch
1040 * @tb: Pointer to the owning domain
1041 * @parent: Parent device for this switch
1042 * @route: Route string for this switch
1043 *
1044 * Allocates and initializes a switch. Will not upload configuration to
1045 * the switch. For that you need to call tb_switch_configure()
1046 * separately. The returned switch should be released by calling
1047 * tb_switch_put().
1048 *
1049 * Return: Pointer to the allocated switch or %NULL in case of failure
1050 */
1051 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1052 u64 route)
1053 {
1054 int i;
1055 int cap;
1056 struct tb_switch *sw;
1057 int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1058 if (upstream_port < 0)
1059 return NULL;
1060
1061 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1062 if (!sw)
1063 return NULL;
1064
1065 sw->tb = tb;
1066 if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
1067 goto err_free_sw_ports;
1068
1069 tb_info(tb, "current switch config:\n");
1070 tb_dump_switch(tb, &sw->config);
1071
1072 /* configure switch */
1073 sw->config.upstream_port_number = upstream_port;
1074 sw->config.depth = tb_route_length(route);
1075 sw->config.route_lo = route;
1076 sw->config.route_hi = route >> 32;
1077 sw->config.enabled = 0;
1078
1079 /* initialize ports */
1080 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1081 GFP_KERNEL);
1082 if (!sw->ports)
1083 goto err_free_sw_ports;
1084
1085 for (i = 0; i <= sw->config.max_port_number; i++) {
1086 /* minimum setup for tb_find_cap and tb_drom_read to work */
1087 sw->ports[i].sw = sw;
1088 sw->ports[i].port = i;
1089 }
1090
1091 sw->generation = tb_switch_get_generation(sw);
1092
1093 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1094 if (cap < 0) {
1095 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
1096 goto err_free_sw_ports;
1097 }
1098 sw->cap_plug_events = cap;
1099
1100 /* Root switch is always authorized */
1101 if (!route)
1102 sw->authorized = true;
1103
1104 device_initialize(&sw->dev);
1105 sw->dev.parent = parent;
1106 sw->dev.bus = &tb_bus_type;
1107 sw->dev.type = &tb_switch_type;
1108 sw->dev.groups = switch_groups;
1109 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1110
1111 return sw;
1112
1113 err_free_sw_ports:
1114 kfree(sw->ports);
1115 kfree(sw);
1116
1117 return NULL;
1118 }
1119
1120 /**
1121 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1122 * @tb: Pointer to the owning domain
1123 * @parent: Parent device for this switch
1124 * @route: Route string for this switch
1125 *
1126 * This creates a switch in safe mode. This means the switch pretty much
1127 * lacks all capabilities except DMA configuration port before it is
1128 * flashed with a valid NVM firmware.
1129 *
1130 * The returned switch must be released by calling tb_switch_put().
1131 *
1132 * Return: Pointer to the allocated switch or %NULL in case of failure
1133 */
1134 struct tb_switch *
1135 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1136 {
1137 struct tb_switch *sw;
1138
1139 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1140 if (!sw)
1141 return NULL;
1142
1143 sw->tb = tb;
1144 sw->config.depth = tb_route_length(route);
1145 sw->config.route_hi = upper_32_bits(route);
1146 sw->config.route_lo = lower_32_bits(route);
1147 sw->safe_mode = true;
1148
1149 device_initialize(&sw->dev);
1150 sw->dev.parent = parent;
1151 sw->dev.bus = &tb_bus_type;
1152 sw->dev.type = &tb_switch_type;
1153 sw->dev.groups = switch_groups;
1154 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1155
1156 return sw;
1157 }
1158
1159 /**
1160 * tb_switch_configure() - Uploads configuration to the switch
1161 * @sw: Switch to configure
1162 *
1163 * Call this function before the switch is added to the system. It will
1164 * upload configuration to the switch and makes it available for the
1165 * connection manager to use.
1166 *
1167 * Return: %0 in case of success and negative errno in case of failure
1168 */
1169 int tb_switch_configure(struct tb_switch *sw)
1170 {
1171 struct tb *tb = sw->tb;
1172 u64 route;
1173 int ret;
1174
1175 route = tb_route(sw);
1176 tb_info(tb,
1177 "initializing Switch at %#llx (depth: %d, up port: %d)\n",
1178 route, tb_route_length(route), sw->config.upstream_port_number);
1179
1180 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
1181 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
1182 sw->config.vendor_id);
1183
1184 sw->config.enabled = 1;
1185
1186 /* upload configuration */
1187 ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
1188 if (ret)
1189 return ret;
1190
1191 return tb_plug_events_active(sw, true);
1192 }
1193
1194 static void tb_switch_set_uuid(struct tb_switch *sw)
1195 {
1196 u32 uuid[4];
1197 int cap;
1198
1199 if (sw->uuid)
1200 return;
1201
1202 /*
1203 * The newer controllers include fused UUID as part of link
1204 * controller specific registers
1205 */
1206 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1207 if (cap > 0) {
1208 tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
1209 } else {
1210 /*
1211 * ICM generates UUID based on UID and fills the upper
1212 * two words with ones. This is not strictly following
1213 * UUID format but we want to be compatible with it so
1214 * we do the same here.
1215 */
1216 uuid[0] = sw->uid & 0xffffffff;
1217 uuid[1] = (sw->uid >> 32) & 0xffffffff;
1218 uuid[2] = 0xffffffff;
1219 uuid[3] = 0xffffffff;
1220 }
1221
1222 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1223 }
1224
1225 static int tb_switch_add_dma_port(struct tb_switch *sw)
1226 {
1227 u32 status;
1228 int ret;
1229
1230 switch (sw->generation) {
1231 case 3:
1232 break;
1233
1234 case 2:
1235 /* Only root switch can be upgraded */
1236 if (tb_route(sw))
1237 return 0;
1238 break;
1239
1240 default:
1241 /*
1242 * DMA port is the only thing available when the switch
1243 * is in safe mode.
1244 */
1245 if (!sw->safe_mode)
1246 return 0;
1247 break;
1248 }
1249
1250 if (sw->no_nvm_upgrade)
1251 return 0;
1252
1253 sw->dma_port = dma_port_alloc(sw);
1254 if (!sw->dma_port)
1255 return 0;
1256
1257 /*
1258 * Check status of the previous flash authentication. If there
1259 * is one we need to power cycle the switch in any case to make
1260 * it functional again.
1261 */
1262 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
1263 if (ret <= 0)
1264 return ret;
1265
1266 if (status) {
1267 tb_sw_info(sw, "switch flash authentication failed\n");
1268 tb_switch_set_uuid(sw);
1269 nvm_set_auth_status(sw, status);
1270 }
1271
1272 tb_sw_info(sw, "power cycling the switch now\n");
1273 dma_port_power_cycle(sw->dma_port);
1274
1275 /*
1276 * We return error here which causes the switch adding failure.
1277 * It should appear back after power cycle is complete.
1278 */
1279 return -ESHUTDOWN;
1280 }
1281
1282 /**
1283 * tb_switch_add() - Add a switch to the domain
1284 * @sw: Switch to add
1285 *
1286 * This is the last step in adding switch to the domain. It will read
1287 * identification information from DROM and initializes ports so that
1288 * they can be used to connect other switches. The switch will be
1289 * exposed to the userspace when this function successfully returns. To
1290 * remove and release the switch, call tb_switch_remove().
1291 *
1292 * Return: %0 in case of success and negative errno in case of failure
1293 */
1294 int tb_switch_add(struct tb_switch *sw)
1295 {
1296 int i, ret;
1297
1298 /*
1299 * Initialize DMA control port now before we read DROM. Recent
1300 * host controllers have more complete DROM on NVM that includes
1301 * vendor and model identification strings which we then expose
1302 * to the userspace. NVM can be accessed through DMA
1303 * configuration based mailbox.
1304 */
1305 ret = tb_switch_add_dma_port(sw);
1306 if (ret)
1307 return ret;
1308
1309 if (!sw->safe_mode) {
1310 /* read drom */
1311 ret = tb_drom_read(sw);
1312 if (ret) {
1313 tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1314 return ret;
1315 }
1316 tb_sw_info(sw, "uid: %#llx\n", sw->uid);
1317
1318 tb_switch_set_uuid(sw);
1319
1320 for (i = 0; i <= sw->config.max_port_number; i++) {
1321 if (sw->ports[i].disabled) {
1322 tb_port_info(&sw->ports[i], "disabled by eeprom\n");
1323 continue;
1324 }
1325 ret = tb_init_port(&sw->ports[i]);
1326 if (ret)
1327 return ret;
1328 }
1329 }
1330
1331 ret = device_add(&sw->dev);
1332 if (ret)
1333 return ret;
1334
1335 ret = tb_switch_nvm_add(sw);
1336 if (ret)
1337 device_del(&sw->dev);
1338
1339 return ret;
1340 }
1341
1342 /**
1343 * tb_switch_remove() - Remove and release a switch
1344 * @sw: Switch to remove
1345 *
1346 * This will remove the switch from the domain and release it after last
1347 * reference count drops to zero. If there are switches connected below
1348 * this switch, they will be removed as well.
1349 */
1350 void tb_switch_remove(struct tb_switch *sw)
1351 {
1352 int i;
1353
1354 /* port 0 is the switch itself and never has a remote */
1355 for (i = 1; i <= sw->config.max_port_number; i++) {
1356 if (tb_is_upstream_port(&sw->ports[i]))
1357 continue;
1358 if (sw->ports[i].remote)
1359 tb_switch_remove(sw->ports[i].remote->sw);
1360 sw->ports[i].remote = NULL;
1361 }
1362
1363 if (!sw->is_unplugged)
1364 tb_plug_events_active(sw, false);
1365
1366 tb_switch_nvm_remove(sw);
1367 device_unregister(&sw->dev);
1368 }
1369
1370 /**
1371 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
1372 */
1373 void tb_sw_set_unplugged(struct tb_switch *sw)
1374 {
1375 int i;
1376 if (sw == sw->tb->root_switch) {
1377 tb_sw_WARN(sw, "cannot unplug root switch\n");
1378 return;
1379 }
1380 if (sw->is_unplugged) {
1381 tb_sw_WARN(sw, "is_unplugged already set\n");
1382 return;
1383 }
1384 sw->is_unplugged = true;
1385 for (i = 0; i <= sw->config.max_port_number; i++) {
1386 if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
1387 tb_sw_set_unplugged(sw->ports[i].remote->sw);
1388 }
1389 }
1390
1391 int tb_switch_resume(struct tb_switch *sw)
1392 {
1393 int i, err;
1394 tb_sw_info(sw, "resuming switch\n");
1395
1396 /*
1397 * Check for UID of the connected switches except for root
1398 * switch which we assume cannot be removed.
1399 */
1400 if (tb_route(sw)) {
1401 u64 uid;
1402
1403 err = tb_drom_read_uid_only(sw, &uid);
1404 if (err) {
1405 tb_sw_warn(sw, "uid read failed\n");
1406 return err;
1407 }
1408 if (sw->uid != uid) {
1409 tb_sw_info(sw,
1410 "changed while suspended (uid %#llx -> %#llx)\n",
1411 sw->uid, uid);
1412 return -ENODEV;
1413 }
1414 }
1415
1416 /* upload configuration */
1417 err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
1418 if (err)
1419 return err;
1420
1421 err = tb_plug_events_active(sw, true);
1422 if (err)
1423 return err;
1424
1425 /* check for surviving downstream switches */
1426 for (i = 1; i <= sw->config.max_port_number; i++) {
1427 struct tb_port *port = &sw->ports[i];
1428 if (tb_is_upstream_port(port))
1429 continue;
1430 if (!port->remote)
1431 continue;
1432 if (tb_wait_for_port(port, true) <= 0
1433 || tb_switch_resume(port->remote->sw)) {
1434 tb_port_warn(port,
1435 "lost during suspend, disconnecting\n");
1436 tb_sw_set_unplugged(port->remote->sw);
1437 }
1438 }
1439 return 0;
1440 }
1441
1442 void tb_switch_suspend(struct tb_switch *sw)
1443 {
1444 int i, err;
1445 err = tb_plug_events_active(sw, false);
1446 if (err)
1447 return;
1448
1449 for (i = 1; i <= sw->config.max_port_number; i++) {
1450 if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
1451 tb_switch_suspend(sw->ports[i].remote->sw);
1452 }
1453 /*
1454 * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any
1455 * effect?
1456 */
1457 }
1458
1459 struct tb_sw_lookup {
1460 struct tb *tb;
1461 u8 link;
1462 u8 depth;
1463 const uuid_be *uuid;
1464 };
1465
1466 static int tb_switch_match(struct device *dev, void *data)
1467 {
1468 struct tb_switch *sw = tb_to_switch(dev);
1469 struct tb_sw_lookup *lookup = data;
1470
1471 if (!sw)
1472 return 0;
1473 if (sw->tb != lookup->tb)
1474 return 0;
1475
1476 if (lookup->uuid)
1477 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
1478
1479 /* Root switch is matched only by depth */
1480 if (!lookup->depth)
1481 return !sw->depth;
1482
1483 return sw->link == lookup->link && sw->depth == lookup->depth;
1484 }
1485
1486 /**
1487 * tb_switch_find_by_link_depth() - Find switch by link and depth
1488 * @tb: Domain the switch belongs
1489 * @link: Link number the switch is connected
1490 * @depth: Depth of the switch in link
1491 *
1492 * Returned switch has reference count increased so the caller needs to
1493 * call tb_switch_put() when done with the switch.
1494 */
1495 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
1496 {
1497 struct tb_sw_lookup lookup;
1498 struct device *dev;
1499
1500 memset(&lookup, 0, sizeof(lookup));
1501 lookup.tb = tb;
1502 lookup.link = link;
1503 lookup.depth = depth;
1504
1505 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1506 if (dev)
1507 return tb_to_switch(dev);
1508
1509 return NULL;
1510 }
1511
1512 /**
1513 * tb_switch_find_by_link_depth() - Find switch by UUID
1514 * @tb: Domain the switch belongs
1515 * @uuid: UUID to look for
1516 *
1517 * Returned switch has reference count increased so the caller needs to
1518 * call tb_switch_put() when done with the switch.
1519 */
1520 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid)
1521 {
1522 struct tb_sw_lookup lookup;
1523 struct device *dev;
1524
1525 memset(&lookup, 0, sizeof(lookup));
1526 lookup.tb = tb;
1527 lookup.uuid = uuid;
1528
1529 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1530 if (dev)
1531 return tb_to_switch(dev);
1532
1533 return NULL;
1534 }
1535
1536 void tb_switch_exit(void)
1537 {
1538 ida_destroy(&nvm_ida);
1539 }