]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/thunderbolt/switch.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / drivers / thunderbolt / switch.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a25c8b2f
AN
2/*
3 * Thunderbolt Cactus Ridge driver - switch/port utility functions
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 */
7
8#include <linux/delay.h>
e6b245cc
MW
9#include <linux/idr.h>
10#include <linux/nvmem-provider.h>
11#include <linux/sizes.h>
10fefe56 12#include <linux/slab.h>
e6b245cc 13#include <linux/vmalloc.h>
a25c8b2f
AN
14
15#include "tb.h"
16
f67cf491
MW
17/* Switch authorization from userspace is serialized by this lock */
18static DEFINE_MUTEX(switch_lock);
19
e6b245cc
MW
20/* Switch NVM support */
21
22#define NVM_DEVID 0x05
23#define NVM_VERSION 0x08
24#define NVM_CSS 0x10
25#define NVM_FLASH_SIZE 0x45
26
27#define NVM_MIN_SIZE SZ_32K
28#define NVM_MAX_SIZE SZ_512K
29
30static DEFINE_IDA(nvm_ida);
31
32struct nvm_auth_status {
33 struct list_head list;
7c39ffe7 34 uuid_t uuid;
e6b245cc
MW
35 u32 status;
36};
37
38/*
39 * Hold NVM authentication failure status per switch This information
40 * needs to stay around even when the switch gets power cycled so we
41 * keep it separately.
42 */
43static LIST_HEAD(nvm_auth_status_cache);
44static DEFINE_MUTEX(nvm_auth_status_lock);
45
46static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
47{
48 struct nvm_auth_status *st;
49
50 list_for_each_entry(st, &nvm_auth_status_cache, list) {
7c39ffe7 51 if (uuid_equal(&st->uuid, sw->uuid))
e6b245cc
MW
52 return st;
53 }
54
55 return NULL;
56}
57
58static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
59{
60 struct nvm_auth_status *st;
61
62 mutex_lock(&nvm_auth_status_lock);
63 st = __nvm_get_auth_status(sw);
64 mutex_unlock(&nvm_auth_status_lock);
65
66 *status = st ? st->status : 0;
67}
68
69static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
70{
71 struct nvm_auth_status *st;
72
73 if (WARN_ON(!sw->uuid))
74 return;
75
76 mutex_lock(&nvm_auth_status_lock);
77 st = __nvm_get_auth_status(sw);
78
79 if (!st) {
80 st = kzalloc(sizeof(*st), GFP_KERNEL);
81 if (!st)
82 goto unlock;
83
84 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
85 INIT_LIST_HEAD(&st->list);
86 list_add_tail(&st->list, &nvm_auth_status_cache);
87 }
88
89 st->status = status;
90unlock:
91 mutex_unlock(&nvm_auth_status_lock);
92}
93
94static void nvm_clear_auth_status(const struct tb_switch *sw)
95{
96 struct nvm_auth_status *st;
97
98 mutex_lock(&nvm_auth_status_lock);
99 st = __nvm_get_auth_status(sw);
100 if (st) {
101 list_del(&st->list);
102 kfree(st);
103 }
104 mutex_unlock(&nvm_auth_status_lock);
105}
106
107static int nvm_validate_and_write(struct tb_switch *sw)
108{
109 unsigned int image_size, hdr_size;
110 const u8 *buf = sw->nvm->buf;
111 u16 ds_size;
112 int ret;
113
114 if (!buf)
115 return -EINVAL;
116
117 image_size = sw->nvm->buf_data_size;
118 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
119 return -EINVAL;
120
121 /*
122 * FARB pointer must point inside the image and must at least
123 * contain parts of the digital section we will be reading here.
124 */
125 hdr_size = (*(u32 *)buf) & 0xffffff;
126 if (hdr_size + NVM_DEVID + 2 >= image_size)
127 return -EINVAL;
128
129 /* Digital section start should be aligned to 4k page */
130 if (!IS_ALIGNED(hdr_size, SZ_4K))
131 return -EINVAL;
132
133 /*
134 * Read digital section size and check that it also fits inside
135 * the image.
136 */
137 ds_size = *(u16 *)(buf + hdr_size);
138 if (ds_size >= image_size)
139 return -EINVAL;
140
141 if (!sw->safe_mode) {
142 u16 device_id;
143
144 /*
145 * Make sure the device ID in the image matches the one
146 * we read from the switch config space.
147 */
148 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
149 if (device_id != sw->config.device_id)
150 return -EINVAL;
151
152 if (sw->generation < 3) {
153 /* Write CSS headers first */
154 ret = dma_port_flash_write(sw->dma_port,
155 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
156 DMA_PORT_CSS_MAX_SIZE);
157 if (ret)
158 return ret;
159 }
160
161 /* Skip headers in the image */
162 buf += hdr_size;
163 image_size -= hdr_size;
164 }
165
166 return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
167}
168
169static int nvm_authenticate_host(struct tb_switch *sw)
170{
171 int ret;
172
173 /*
174 * Root switch NVM upgrade requires that we disconnect the
175 * existing PCIe paths first (in case it is not in safe mode
176 * already).
177 */
178 if (!sw->safe_mode) {
179 ret = tb_domain_disconnect_pcie_paths(sw->tb);
180 if (ret)
181 return ret;
182 /*
183 * The host controller goes away pretty soon after this if
184 * everything goes well so getting timeout is expected.
185 */
186 ret = dma_port_flash_update_auth(sw->dma_port);
187 return ret == -ETIMEDOUT ? 0 : ret;
188 }
189
190 /*
191 * From safe mode we can get out by just power cycling the
192 * switch.
193 */
194 dma_port_power_cycle(sw->dma_port);
195 return 0;
196}
197
198static int nvm_authenticate_device(struct tb_switch *sw)
199{
200 int ret, retries = 10;
201
202 ret = dma_port_flash_update_auth(sw->dma_port);
203 if (ret && ret != -ETIMEDOUT)
204 return ret;
205
206 /*
207 * Poll here for the authentication status. It takes some time
208 * for the device to respond (we get timeout for a while). Once
209 * we get response the device needs to be power cycled in order
210 * to the new NVM to be taken into use.
211 */
212 do {
213 u32 status;
214
215 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
216 if (ret < 0 && ret != -ETIMEDOUT)
217 return ret;
218 if (ret > 0) {
219 if (status) {
220 tb_sw_warn(sw, "failed to authenticate NVM\n");
221 nvm_set_auth_status(sw, status);
222 }
223
224 tb_sw_info(sw, "power cycling the switch now\n");
225 dma_port_power_cycle(sw->dma_port);
226 return 0;
227 }
228
229 msleep(500);
230 } while (--retries);
231
232 return -ETIMEDOUT;
233}
234
235static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
236 size_t bytes)
237{
238 struct tb_switch *sw = priv;
239
240 return dma_port_flash_read(sw->dma_port, offset, val, bytes);
241}
242
243static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
244 size_t bytes)
245{
246 struct tb_switch *sw = priv;
247 int ret = 0;
248
249 if (mutex_lock_interruptible(&switch_lock))
250 return -ERESTARTSYS;
251
252 /*
253 * Since writing the NVM image might require some special steps,
254 * for example when CSS headers are written, we cache the image
255 * locally here and handle the special cases when the user asks
256 * us to authenticate the image.
257 */
258 if (!sw->nvm->buf) {
259 sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
260 if (!sw->nvm->buf) {
261 ret = -ENOMEM;
262 goto unlock;
263 }
264 }
265
266 sw->nvm->buf_data_size = offset + bytes;
267 memcpy(sw->nvm->buf + offset, val, bytes);
268
269unlock:
270 mutex_unlock(&switch_lock);
271
272 return ret;
273}
274
275static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
276 size_t size, bool active)
277{
278 struct nvmem_config config;
279
280 memset(&config, 0, sizeof(config));
281
282 if (active) {
283 config.name = "nvm_active";
284 config.reg_read = tb_switch_nvm_read;
800161bd 285 config.read_only = true;
e6b245cc
MW
286 } else {
287 config.name = "nvm_non_active";
288 config.reg_write = tb_switch_nvm_write;
800161bd 289 config.root_only = true;
e6b245cc
MW
290 }
291
292 config.id = id;
293 config.stride = 4;
294 config.word_size = 4;
295 config.size = size;
296 config.dev = &sw->dev;
297 config.owner = THIS_MODULE;
e6b245cc
MW
298 config.priv = sw;
299
300 return nvmem_register(&config);
301}
302
303static int tb_switch_nvm_add(struct tb_switch *sw)
304{
305 struct nvmem_device *nvm_dev;
306 struct tb_switch_nvm *nvm;
307 u32 val;
308 int ret;
309
310 if (!sw->dma_port)
311 return 0;
312
313 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
314 if (!nvm)
315 return -ENOMEM;
316
317 nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
318
319 /*
320 * If the switch is in safe-mode the only accessible portion of
321 * the NVM is the non-active one where userspace is expected to
322 * write new functional NVM.
323 */
324 if (!sw->safe_mode) {
325 u32 nvm_size, hdr_size;
326
327 ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
328 sizeof(val));
329 if (ret)
330 goto err_ida;
331
332 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
333 nvm_size = (SZ_1M << (val & 7)) / 8;
334 nvm_size = (nvm_size - hdr_size) / 2;
335
336 ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
337 sizeof(val));
338 if (ret)
339 goto err_ida;
340
341 nvm->major = val >> 16;
342 nvm->minor = val >> 8;
343
344 nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
345 if (IS_ERR(nvm_dev)) {
346 ret = PTR_ERR(nvm_dev);
347 goto err_ida;
348 }
349 nvm->active = nvm_dev;
350 }
351
352 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
353 if (IS_ERR(nvm_dev)) {
354 ret = PTR_ERR(nvm_dev);
355 goto err_nvm_active;
356 }
357 nvm->non_active = nvm_dev;
358
359 mutex_lock(&switch_lock);
360 sw->nvm = nvm;
361 mutex_unlock(&switch_lock);
362
363 return 0;
364
365err_nvm_active:
366 if (nvm->active)
367 nvmem_unregister(nvm->active);
368err_ida:
369 ida_simple_remove(&nvm_ida, nvm->id);
370 kfree(nvm);
371
372 return ret;
373}
374
375static void tb_switch_nvm_remove(struct tb_switch *sw)
376{
377 struct tb_switch_nvm *nvm;
378
379 mutex_lock(&switch_lock);
380 nvm = sw->nvm;
381 sw->nvm = NULL;
382 mutex_unlock(&switch_lock);
383
384 if (!nvm)
385 return;
386
387 /* Remove authentication status in case the switch is unplugged */
388 if (!nvm->authenticating)
389 nvm_clear_auth_status(sw);
390
391 nvmem_unregister(nvm->non_active);
392 if (nvm->active)
393 nvmem_unregister(nvm->active);
394 ida_simple_remove(&nvm_ida, nvm->id);
395 vfree(nvm->buf);
396 kfree(nvm);
397}
398
a25c8b2f
AN
399/* port utility functions */
400
401static const char *tb_port_type(struct tb_regs_port_header *port)
402{
403 switch (port->type >> 16) {
404 case 0:
405 switch ((u8) port->type) {
406 case 0:
407 return "Inactive";
408 case 1:
409 return "Port";
410 case 2:
411 return "NHI";
412 default:
413 return "unknown";
414 }
415 case 0x2:
416 return "Ethernet";
417 case 0x8:
418 return "SATA";
419 case 0xe:
420 return "DP/HDMI";
421 case 0x10:
422 return "PCIe";
423 case 0x20:
424 return "USB";
425 default:
426 return "unknown";
427 }
428}
429
430static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
431{
432 tb_info(tb,
433 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
434 port->port_number, port->vendor_id, port->device_id,
435 port->revision, port->thunderbolt_version, tb_port_type(port),
436 port->type);
437 tb_info(tb, " Max hop id (in/out): %d/%d\n",
438 port->max_in_hop_id, port->max_out_hop_id);
439 tb_info(tb, " Max counters: %d\n", port->max_counters);
440 tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits);
441}
442
9da672a4
AN
443/**
444 * tb_port_state() - get connectedness state of a port
445 *
446 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
447 *
448 * Return: Returns an enum tb_port_state on success or an error code on failure.
449 */
450static int tb_port_state(struct tb_port *port)
451{
452 struct tb_cap_phy phy;
453 int res;
454 if (port->cap_phy == 0) {
455 tb_port_WARN(port, "does not have a PHY\n");
456 return -EINVAL;
457 }
458 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
459 if (res)
460 return res;
461 return phy.state;
462}
463
464/**
465 * tb_wait_for_port() - wait for a port to become ready
466 *
467 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
468 * wait_if_unplugged is set then we also wait if the port is in state
469 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
470 * switch resume). Otherwise we only wait if a device is registered but the link
471 * has not yet been established.
472 *
473 * Return: Returns an error code on failure. Returns 0 if the port is not
474 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
475 * if the port is connected and in state TB_PORT_UP.
476 */
477int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
478{
479 int retries = 10;
480 int state;
481 if (!port->cap_phy) {
482 tb_port_WARN(port, "does not have PHY\n");
483 return -EINVAL;
484 }
485 if (tb_is_upstream_port(port)) {
486 tb_port_WARN(port, "is the upstream port\n");
487 return -EINVAL;
488 }
489
490 while (retries--) {
491 state = tb_port_state(port);
492 if (state < 0)
493 return state;
494 if (state == TB_PORT_DISABLED) {
495 tb_port_info(port, "is disabled (state: 0)\n");
496 return 0;
497 }
498 if (state == TB_PORT_UNPLUGGED) {
499 if (wait_if_unplugged) {
500 /* used during resume */
501 tb_port_info(port,
502 "is unplugged (state: 7), retrying...\n");
503 msleep(100);
504 continue;
505 }
506 tb_port_info(port, "is unplugged (state: 7)\n");
507 return 0;
508 }
509 if (state == TB_PORT_UP) {
510 tb_port_info(port,
511 "is connected, link is up (state: 2)\n");
512 return 1;
513 }
514
515 /*
516 * After plug-in the state is TB_PORT_CONNECTING. Give it some
517 * time.
518 */
519 tb_port_info(port,
520 "is connected, link is not up (state: %d), retrying...\n",
521 state);
522 msleep(100);
523 }
524 tb_port_warn(port,
525 "failed to reach state TB_PORT_UP. Ignoring port...\n");
526 return 0;
527}
528
520b6702
AN
529/**
530 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
531 *
532 * Change the number of NFC credits allocated to @port by @credits. To remove
533 * NFC credits pass a negative amount of credits.
534 *
535 * Return: Returns 0 on success or an error code on failure.
536 */
537int tb_port_add_nfc_credits(struct tb_port *port, int credits)
538{
539 if (credits == 0)
540 return 0;
541 tb_port_info(port,
542 "adding %#x NFC credits (%#x -> %#x)",
543 credits,
544 port->config.nfc_credits,
545 port->config.nfc_credits + credits);
546 port->config.nfc_credits += credits;
547 return tb_port_write(port, &port->config.nfc_credits,
548 TB_CFG_PORT, 4, 1);
549}
550
551/**
552 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
553 *
554 * Return: Returns 0 on success or an error code on failure.
555 */
556int tb_port_clear_counter(struct tb_port *port, int counter)
557{
558 u32 zero[3] = { 0, 0, 0 };
559 tb_port_info(port, "clearing counter %d\n", counter);
560 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
561}
562
a25c8b2f
AN
563/**
564 * tb_init_port() - initialize a port
565 *
566 * This is a helper method for tb_switch_alloc. Does not check or initialize
567 * any downstream switches.
568 *
569 * Return: Returns 0 on success or an error code on failure.
570 */
343fcb8c 571static int tb_init_port(struct tb_port *port)
a25c8b2f
AN
572{
573 int res;
9da672a4 574 int cap;
343fcb8c 575
a25c8b2f
AN
576 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
577 if (res)
578 return res;
579
9da672a4 580 /* Port 0 is the switch itself and has no PHY. */
343fcb8c 581 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
da2da04b 582 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
9da672a4
AN
583
584 if (cap > 0)
585 port->cap_phy = cap;
586 else
587 tb_port_WARN(port, "non switch port without a PHY\n");
588 }
589
343fcb8c 590 tb_dump_port(port->sw->tb, &port->config);
a25c8b2f
AN
591
592 /* TODO: Read dual link port, DP port and more from EEPROM. */
593 return 0;
594
595}
596
597/* switch utility functions */
598
599static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
600{
601 tb_info(tb,
602 " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
603 sw->vendor_id, sw->device_id, sw->revision,
604 sw->thunderbolt_version);
605 tb_info(tb, " Max Port Number: %d\n", sw->max_port_number);
606 tb_info(tb, " Config:\n");
607 tb_info(tb,
608 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
609 sw->upstream_port_number, sw->depth,
610 (((u64) sw->route_hi) << 32) | sw->route_lo,
611 sw->enabled, sw->plug_events_delay);
612 tb_info(tb,
613 " unknown1: %#x unknown4: %#x\n",
614 sw->__unknown1, sw->__unknown4);
615}
616
23dd5bb4
AN
617/**
618 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
619 *
620 * Return: Returns 0 on success or an error code on failure.
621 */
622int tb_switch_reset(struct tb *tb, u64 route)
623{
624 struct tb_cfg_result res;
625 struct tb_regs_switch_header header = {
626 header.route_hi = route >> 32,
627 header.route_lo = route,
628 header.enabled = true,
629 };
630 tb_info(tb, "resetting switch at %llx\n", route);
631 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
632 0, 2, 2, 2);
633 if (res.err)
634 return res.err;
635 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
636 if (res.err > 0)
637 return -EIO;
638 return res.err;
639}
640
053596d9
AN
641struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
642{
643 u8 next_port = route; /*
644 * Routes use a stride of 8 bits,
645 * eventhough a port index has 6 bits at most.
646 * */
647 if (route == 0)
648 return sw;
649 if (next_port > sw->config.max_port_number)
c9c2deef 650 return NULL;
053596d9 651 if (tb_is_upstream_port(&sw->ports[next_port]))
c9c2deef 652 return NULL;
053596d9 653 if (!sw->ports[next_port].remote)
c9c2deef 654 return NULL;
053596d9
AN
655 return get_switch_at_route(sw->ports[next_port].remote->sw,
656 route >> TB_ROUTE_SHIFT);
657}
658
ca389f71
AN
659/**
660 * tb_plug_events_active() - enable/disable plug events on a switch
661 *
662 * Also configures a sane plug_events_delay of 255ms.
663 *
664 * Return: Returns 0 on success or an error code on failure.
665 */
666static int tb_plug_events_active(struct tb_switch *sw, bool active)
667{
668 u32 data;
669 int res;
670
bfe778ac
MW
671 if (!sw->config.enabled)
672 return 0;
673
ca389f71
AN
674 sw->config.plug_events_delay = 0xff;
675 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
676 if (res)
677 return res;
678
679 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
680 if (res)
681 return res;
682
683 if (active) {
684 data = data & 0xFFFFFF83;
685 switch (sw->config.device_id) {
1d111406
LW
686 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
687 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
688 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
ca389f71
AN
689 break;
690 default:
691 data |= 4;
692 }
693 } else {
694 data = data | 0x7c;
695 }
696 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
697 sw->cap_plug_events + 1, 1);
698}
699
f67cf491
MW
700static ssize_t authorized_show(struct device *dev,
701 struct device_attribute *attr,
702 char *buf)
703{
704 struct tb_switch *sw = tb_to_switch(dev);
705
706 return sprintf(buf, "%u\n", sw->authorized);
707}
708
709static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
710{
711 int ret = -EINVAL;
712
713 if (mutex_lock_interruptible(&switch_lock))
714 return -ERESTARTSYS;
715
716 if (sw->authorized)
717 goto unlock;
718
719 switch (val) {
720 /* Approve switch */
721 case 1:
722 if (sw->key)
723 ret = tb_domain_approve_switch_key(sw->tb, sw);
724 else
725 ret = tb_domain_approve_switch(sw->tb, sw);
726 break;
727
728 /* Challenge switch */
729 case 2:
730 if (sw->key)
731 ret = tb_domain_challenge_switch_key(sw->tb, sw);
732 break;
733
734 default:
735 break;
736 }
737
738 if (!ret) {
739 sw->authorized = val;
740 /* Notify status change to the userspace */
741 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
742 }
743
744unlock:
745 mutex_unlock(&switch_lock);
746 return ret;
747}
748
749static ssize_t authorized_store(struct device *dev,
750 struct device_attribute *attr,
751 const char *buf, size_t count)
752{
753 struct tb_switch *sw = tb_to_switch(dev);
754 unsigned int val;
755 ssize_t ret;
756
757 ret = kstrtouint(buf, 0, &val);
758 if (ret)
759 return ret;
760 if (val > 2)
761 return -EINVAL;
762
763 ret = tb_switch_set_authorized(sw, val);
764
765 return ret ? ret : count;
766}
767static DEVICE_ATTR_RW(authorized);
768
bfe778ac
MW
769static ssize_t device_show(struct device *dev, struct device_attribute *attr,
770 char *buf)
771{
772 struct tb_switch *sw = tb_to_switch(dev);
ca389f71 773
bfe778ac
MW
774 return sprintf(buf, "%#x\n", sw->device);
775}
776static DEVICE_ATTR_RO(device);
777
72ee3390
MW
778static ssize_t
779device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
780{
781 struct tb_switch *sw = tb_to_switch(dev);
782
783 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
784}
785static DEVICE_ATTR_RO(device_name);
786
f67cf491
MW
787static ssize_t key_show(struct device *dev, struct device_attribute *attr,
788 char *buf)
789{
790 struct tb_switch *sw = tb_to_switch(dev);
791 ssize_t ret;
792
793 if (mutex_lock_interruptible(&switch_lock))
794 return -ERESTARTSYS;
795
796 if (sw->key)
797 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
798 else
799 ret = sprintf(buf, "\n");
800
801 mutex_unlock(&switch_lock);
802 return ret;
803}
804
805static ssize_t key_store(struct device *dev, struct device_attribute *attr,
806 const char *buf, size_t count)
807{
808 struct tb_switch *sw = tb_to_switch(dev);
809 u8 key[TB_SWITCH_KEY_SIZE];
810 ssize_t ret = count;
e545f0d8 811 bool clear = false;
f67cf491 812
e545f0d8
BY
813 if (!strcmp(buf, "\n"))
814 clear = true;
815 else if (hex2bin(key, buf, sizeof(key)))
f67cf491
MW
816 return -EINVAL;
817
818 if (mutex_lock_interruptible(&switch_lock))
819 return -ERESTARTSYS;
820
821 if (sw->authorized) {
822 ret = -EBUSY;
823 } else {
824 kfree(sw->key);
e545f0d8
BY
825 if (clear) {
826 sw->key = NULL;
827 } else {
828 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
829 if (!sw->key)
830 ret = -ENOMEM;
831 }
f67cf491
MW
832 }
833
834 mutex_unlock(&switch_lock);
835 return ret;
836}
0956e411 837static DEVICE_ATTR(key, 0600, key_show, key_store);
f67cf491 838
e6b245cc
MW
839static ssize_t nvm_authenticate_show(struct device *dev,
840 struct device_attribute *attr, char *buf)
841{
842 struct tb_switch *sw = tb_to_switch(dev);
843 u32 status;
844
845 nvm_get_auth_status(sw, &status);
846 return sprintf(buf, "%#x\n", status);
847}
848
849static ssize_t nvm_authenticate_store(struct device *dev,
850 struct device_attribute *attr, const char *buf, size_t count)
851{
852 struct tb_switch *sw = tb_to_switch(dev);
853 bool val;
854 int ret;
855
856 if (mutex_lock_interruptible(&switch_lock))
857 return -ERESTARTSYS;
858
859 /* If NVMem devices are not yet added */
860 if (!sw->nvm) {
861 ret = -EAGAIN;
862 goto exit_unlock;
863 }
864
865 ret = kstrtobool(buf, &val);
866 if (ret)
867 goto exit_unlock;
868
869 /* Always clear the authentication status */
870 nvm_clear_auth_status(sw);
871
872 if (val) {
873 ret = nvm_validate_and_write(sw);
874 if (ret)
875 goto exit_unlock;
876
877 sw->nvm->authenticating = true;
878
879 if (!tb_route(sw))
880 ret = nvm_authenticate_host(sw);
881 else
882 ret = nvm_authenticate_device(sw);
883 }
884
885exit_unlock:
886 mutex_unlock(&switch_lock);
887
888 if (ret)
889 return ret;
890 return count;
891}
892static DEVICE_ATTR_RW(nvm_authenticate);
893
894static ssize_t nvm_version_show(struct device *dev,
895 struct device_attribute *attr, char *buf)
896{
897 struct tb_switch *sw = tb_to_switch(dev);
898 int ret;
899
900 if (mutex_lock_interruptible(&switch_lock))
901 return -ERESTARTSYS;
902
903 if (sw->safe_mode)
904 ret = -ENODATA;
905 else if (!sw->nvm)
906 ret = -EAGAIN;
907 else
908 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
909
910 mutex_unlock(&switch_lock);
911
912 return ret;
913}
914static DEVICE_ATTR_RO(nvm_version);
915
bfe778ac
MW
916static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
917 char *buf)
a25c8b2f 918{
bfe778ac 919 struct tb_switch *sw = tb_to_switch(dev);
a25c8b2f 920
bfe778ac
MW
921 return sprintf(buf, "%#x\n", sw->vendor);
922}
923static DEVICE_ATTR_RO(vendor);
924
72ee3390
MW
925static ssize_t
926vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
927{
928 struct tb_switch *sw = tb_to_switch(dev);
929
930 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
931}
932static DEVICE_ATTR_RO(vendor_name);
933
bfe778ac
MW
934static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
935 char *buf)
936{
937 struct tb_switch *sw = tb_to_switch(dev);
938
939 return sprintf(buf, "%pUb\n", sw->uuid);
940}
941static DEVICE_ATTR_RO(unique_id);
942
943static struct attribute *switch_attrs[] = {
f67cf491 944 &dev_attr_authorized.attr,
bfe778ac 945 &dev_attr_device.attr,
72ee3390 946 &dev_attr_device_name.attr,
f67cf491 947 &dev_attr_key.attr,
e6b245cc
MW
948 &dev_attr_nvm_authenticate.attr,
949 &dev_attr_nvm_version.attr,
bfe778ac 950 &dev_attr_vendor.attr,
72ee3390 951 &dev_attr_vendor_name.attr,
bfe778ac
MW
952 &dev_attr_unique_id.attr,
953 NULL,
954};
955
f67cf491
MW
956static umode_t switch_attr_is_visible(struct kobject *kobj,
957 struct attribute *attr, int n)
958{
959 struct device *dev = container_of(kobj, struct device, kobj);
960 struct tb_switch *sw = tb_to_switch(dev);
961
962 if (attr == &dev_attr_key.attr) {
963 if (tb_route(sw) &&
964 sw->tb->security_level == TB_SECURITY_SECURE &&
965 sw->security_level == TB_SECURITY_SECURE)
966 return attr->mode;
967 return 0;
e6b245cc
MW
968 } else if (attr == &dev_attr_nvm_authenticate.attr ||
969 attr == &dev_attr_nvm_version.attr) {
970 if (sw->dma_port)
971 return attr->mode;
972 return 0;
f67cf491
MW
973 }
974
e6b245cc 975 return sw->safe_mode ? 0 : attr->mode;
f67cf491
MW
976}
977
bfe778ac 978static struct attribute_group switch_group = {
f67cf491 979 .is_visible = switch_attr_is_visible,
bfe778ac
MW
980 .attrs = switch_attrs,
981};
ca389f71 982
bfe778ac
MW
983static const struct attribute_group *switch_groups[] = {
984 &switch_group,
985 NULL,
986};
987
988static void tb_switch_release(struct device *dev)
989{
990 struct tb_switch *sw = tb_to_switch(dev);
991
3e136768
MW
992 dma_port_free(sw->dma_port);
993
bfe778ac 994 kfree(sw->uuid);
72ee3390
MW
995 kfree(sw->device_name);
996 kfree(sw->vendor_name);
a25c8b2f 997 kfree(sw->ports);
343fcb8c 998 kfree(sw->drom);
f67cf491 999 kfree(sw->key);
a25c8b2f
AN
1000 kfree(sw);
1001}
1002
bfe778ac
MW
1003struct device_type tb_switch_type = {
1004 .name = "thunderbolt_device",
1005 .release = tb_switch_release,
1006};
1007
2c3c4197
MW
1008static int tb_switch_get_generation(struct tb_switch *sw)
1009{
1010 switch (sw->config.device_id) {
1011 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1012 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1013 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1014 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1015 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1016 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1017 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1018 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1019 return 1;
1020
1021 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1022 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1023 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1024 return 2;
1025
1026 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1027 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1028 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1029 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1030 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1031 return 3;
1032
1033 default:
1034 /*
1035 * For unknown switches assume generation to be 1 to be
1036 * on the safe side.
1037 */
1038 tb_sw_warn(sw, "unsupported switch device id %#x\n",
1039 sw->config.device_id);
1040 return 1;
1041 }
1042}
1043
a25c8b2f 1044/**
bfe778ac
MW
1045 * tb_switch_alloc() - allocate a switch
1046 * @tb: Pointer to the owning domain
1047 * @parent: Parent device for this switch
1048 * @route: Route string for this switch
a25c8b2f 1049 *
bfe778ac
MW
1050 * Allocates and initializes a switch. Will not upload configuration to
1051 * the switch. For that you need to call tb_switch_configure()
1052 * separately. The returned switch should be released by calling
1053 * tb_switch_put().
1054 *
1055 * Return: Pointer to the allocated switch or %NULL in case of failure
a25c8b2f 1056 */
bfe778ac
MW
1057struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1058 u64 route)
a25c8b2f
AN
1059{
1060 int i;
ca389f71 1061 int cap;
a25c8b2f
AN
1062 struct tb_switch *sw;
1063 int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1064 if (upstream_port < 0)
1065 return NULL;
1066
1067 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1068 if (!sw)
1069 return NULL;
1070
1071 sw->tb = tb;
aae20bb6 1072 if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
bfe778ac
MW
1073 goto err_free_sw_ports;
1074
1075 tb_info(tb, "current switch config:\n");
a25c8b2f
AN
1076 tb_dump_switch(tb, &sw->config);
1077
1078 /* configure switch */
1079 sw->config.upstream_port_number = upstream_port;
1080 sw->config.depth = tb_route_length(route);
1081 sw->config.route_lo = route;
1082 sw->config.route_hi = route >> 32;
bfe778ac 1083 sw->config.enabled = 0;
a25c8b2f
AN
1084
1085 /* initialize ports */
1086 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
343fcb8c 1087 GFP_KERNEL);
a25c8b2f 1088 if (!sw->ports)
bfe778ac 1089 goto err_free_sw_ports;
a25c8b2f
AN
1090
1091 for (i = 0; i <= sw->config.max_port_number; i++) {
343fcb8c
AN
1092 /* minimum setup for tb_find_cap and tb_drom_read to work */
1093 sw->ports[i].sw = sw;
1094 sw->ports[i].port = i;
a25c8b2f
AN
1095 }
1096
2c3c4197
MW
1097 sw->generation = tb_switch_get_generation(sw);
1098
da2da04b 1099 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
ca389f71 1100 if (cap < 0) {
da2da04b 1101 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
bfe778ac 1102 goto err_free_sw_ports;
ca389f71
AN
1103 }
1104 sw->cap_plug_events = cap;
1105
f67cf491
MW
1106 /* Root switch is always authorized */
1107 if (!route)
1108 sw->authorized = true;
1109
bfe778ac
MW
1110 device_initialize(&sw->dev);
1111 sw->dev.parent = parent;
1112 sw->dev.bus = &tb_bus_type;
1113 sw->dev.type = &tb_switch_type;
1114 sw->dev.groups = switch_groups;
1115 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1116
1117 return sw;
1118
1119err_free_sw_ports:
1120 kfree(sw->ports);
1121 kfree(sw);
1122
1123 return NULL;
1124}
1125
e6b245cc
MW
1126/**
1127 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1128 * @tb: Pointer to the owning domain
1129 * @parent: Parent device for this switch
1130 * @route: Route string for this switch
1131 *
1132 * This creates a switch in safe mode. This means the switch pretty much
1133 * lacks all capabilities except DMA configuration port before it is
1134 * flashed with a valid NVM firmware.
1135 *
1136 * The returned switch must be released by calling tb_switch_put().
1137 *
1138 * Return: Pointer to the allocated switch or %NULL in case of failure
1139 */
1140struct tb_switch *
1141tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1142{
1143 struct tb_switch *sw;
1144
1145 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1146 if (!sw)
1147 return NULL;
1148
1149 sw->tb = tb;
1150 sw->config.depth = tb_route_length(route);
1151 sw->config.route_hi = upper_32_bits(route);
1152 sw->config.route_lo = lower_32_bits(route);
1153 sw->safe_mode = true;
1154
1155 device_initialize(&sw->dev);
1156 sw->dev.parent = parent;
1157 sw->dev.bus = &tb_bus_type;
1158 sw->dev.type = &tb_switch_type;
1159 sw->dev.groups = switch_groups;
1160 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1161
1162 return sw;
1163}
1164
bfe778ac
MW
1165/**
1166 * tb_switch_configure() - Uploads configuration to the switch
1167 * @sw: Switch to configure
1168 *
1169 * Call this function before the switch is added to the system. It will
1170 * upload configuration to the switch and makes it available for the
1171 * connection manager to use.
1172 *
1173 * Return: %0 in case of success and negative errno in case of failure
1174 */
1175int tb_switch_configure(struct tb_switch *sw)
1176{
1177 struct tb *tb = sw->tb;
1178 u64 route;
1179 int ret;
1180
1181 route = tb_route(sw);
1182 tb_info(tb,
1183 "initializing Switch at %#llx (depth: %d, up port: %d)\n",
1184 route, tb_route_length(route), sw->config.upstream_port_number);
1185
1186 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
1187 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
1188 sw->config.vendor_id);
1189
bfe778ac
MW
1190 sw->config.enabled = 1;
1191
1192 /* upload configuration */
1193 ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
1194 if (ret)
1195 return ret;
1196
1197 return tb_plug_events_active(sw, true);
1198}
1199
1200static void tb_switch_set_uuid(struct tb_switch *sw)
1201{
1202 u32 uuid[4];
1203 int cap;
1204
1205 if (sw->uuid)
1206 return;
1207
1208 /*
1209 * The newer controllers include fused UUID as part of link
1210 * controller specific registers
1211 */
1212 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1213 if (cap > 0) {
1214 tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
1215 } else {
1216 /*
1217 * ICM generates UUID based on UID and fills the upper
1218 * two words with ones. This is not strictly following
1219 * UUID format but we want to be compatible with it so
1220 * we do the same here.
1221 */
1222 uuid[0] = sw->uid & 0xffffffff;
1223 uuid[1] = (sw->uid >> 32) & 0xffffffff;
1224 uuid[2] = 0xffffffff;
1225 uuid[3] = 0xffffffff;
1226 }
1227
1228 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1229}
1230
e6b245cc 1231static int tb_switch_add_dma_port(struct tb_switch *sw)
3e136768 1232{
e6b245cc
MW
1233 u32 status;
1234 int ret;
1235
3e136768
MW
1236 switch (sw->generation) {
1237 case 3:
1238 break;
1239
1240 case 2:
1241 /* Only root switch can be upgraded */
1242 if (tb_route(sw))
e6b245cc 1243 return 0;
3e136768
MW
1244 break;
1245
1246 default:
e6b245cc
MW
1247 /*
1248 * DMA port is the only thing available when the switch
1249 * is in safe mode.
1250 */
1251 if (!sw->safe_mode)
1252 return 0;
1253 break;
3e136768
MW
1254 }
1255
e6b245cc
MW
1256 if (sw->no_nvm_upgrade)
1257 return 0;
1258
3e136768 1259 sw->dma_port = dma_port_alloc(sw);
e6b245cc
MW
1260 if (!sw->dma_port)
1261 return 0;
1262
1263 /*
1264 * Check status of the previous flash authentication. If there
1265 * is one we need to power cycle the switch in any case to make
1266 * it functional again.
1267 */
1268 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
1269 if (ret <= 0)
1270 return ret;
1271
1272 if (status) {
1273 tb_sw_info(sw, "switch flash authentication failed\n");
1274 tb_switch_set_uuid(sw);
1275 nvm_set_auth_status(sw, status);
1276 }
1277
1278 tb_sw_info(sw, "power cycling the switch now\n");
1279 dma_port_power_cycle(sw->dma_port);
1280
1281 /*
1282 * We return error here which causes the switch adding failure.
1283 * It should appear back after power cycle is complete.
1284 */
1285 return -ESHUTDOWN;
3e136768
MW
1286}
1287
bfe778ac
MW
1288/**
1289 * tb_switch_add() - Add a switch to the domain
1290 * @sw: Switch to add
1291 *
1292 * This is the last step in adding switch to the domain. It will read
1293 * identification information from DROM and initializes ports so that
1294 * they can be used to connect other switches. The switch will be
1295 * exposed to the userspace when this function successfully returns. To
1296 * remove and release the switch, call tb_switch_remove().
1297 *
1298 * Return: %0 in case of success and negative errno in case of failure
1299 */
1300int tb_switch_add(struct tb_switch *sw)
1301{
1302 int i, ret;
1303
3e136768
MW
1304 /*
1305 * Initialize DMA control port now before we read DROM. Recent
1306 * host controllers have more complete DROM on NVM that includes
1307 * vendor and model identification strings which we then expose
1308 * to the userspace. NVM can be accessed through DMA
1309 * configuration based mailbox.
1310 */
e6b245cc
MW
1311 ret = tb_switch_add_dma_port(sw);
1312 if (ret)
f53e7676 1313 return ret;
343fcb8c 1314
e6b245cc
MW
1315 if (!sw->safe_mode) {
1316 /* read drom */
1317 ret = tb_drom_read(sw);
1318 if (ret) {
1319 tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1320 return ret;
1321 }
1322 tb_sw_info(sw, "uid: %#llx\n", sw->uid);
bfe778ac 1323
e6b245cc
MW
1324 tb_switch_set_uuid(sw);
1325
1326 for (i = 0; i <= sw->config.max_port_number; i++) {
1327 if (sw->ports[i].disabled) {
1328 tb_port_info(&sw->ports[i], "disabled by eeprom\n");
1329 continue;
1330 }
1331 ret = tb_init_port(&sw->ports[i]);
1332 if (ret)
1333 return ret;
343fcb8c 1334 }
343fcb8c
AN
1335 }
1336
e6b245cc
MW
1337 ret = device_add(&sw->dev);
1338 if (ret)
1339 return ret;
1340
1341 ret = tb_switch_nvm_add(sw);
1342 if (ret)
1343 device_del(&sw->dev);
1344
1345 return ret;
bfe778ac 1346}
c90553b3 1347
bfe778ac
MW
1348/**
1349 * tb_switch_remove() - Remove and release a switch
1350 * @sw: Switch to remove
1351 *
1352 * This will remove the switch from the domain and release it after last
1353 * reference count drops to zero. If there are switches connected below
1354 * this switch, they will be removed as well.
1355 */
1356void tb_switch_remove(struct tb_switch *sw)
1357{
1358 int i;
ca389f71 1359
bfe778ac
MW
1360 /* port 0 is the switch itself and never has a remote */
1361 for (i = 1; i <= sw->config.max_port_number; i++) {
1362 if (tb_is_upstream_port(&sw->ports[i]))
1363 continue;
1364 if (sw->ports[i].remote)
1365 tb_switch_remove(sw->ports[i].remote->sw);
1366 sw->ports[i].remote = NULL;
1367 }
1368
1369 if (!sw->is_unplugged)
1370 tb_plug_events_active(sw, false);
1371
e6b245cc 1372 tb_switch_nvm_remove(sw);
bfe778ac 1373 device_unregister(&sw->dev);
a25c8b2f
AN
1374}
1375
053596d9 1376/**
aae20bb6 1377 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
053596d9 1378 */
aae20bb6 1379void tb_sw_set_unplugged(struct tb_switch *sw)
053596d9
AN
1380{
1381 int i;
1382 if (sw == sw->tb->root_switch) {
1383 tb_sw_WARN(sw, "cannot unplug root switch\n");
1384 return;
1385 }
1386 if (sw->is_unplugged) {
1387 tb_sw_WARN(sw, "is_unplugged already set\n");
1388 return;
1389 }
1390 sw->is_unplugged = true;
1391 for (i = 0; i <= sw->config.max_port_number; i++) {
1392 if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
aae20bb6 1393 tb_sw_set_unplugged(sw->ports[i].remote->sw);
053596d9
AN
1394 }
1395}
1396
23dd5bb4
AN
1397int tb_switch_resume(struct tb_switch *sw)
1398{
1399 int i, err;
23dd5bb4
AN
1400 tb_sw_info(sw, "resuming switch\n");
1401
08a5e4ce
MW
1402 /*
1403 * Check for UID of the connected switches except for root
1404 * switch which we assume cannot be removed.
1405 */
1406 if (tb_route(sw)) {
1407 u64 uid;
1408
1409 err = tb_drom_read_uid_only(sw, &uid);
1410 if (err) {
1411 tb_sw_warn(sw, "uid read failed\n");
1412 return err;
1413 }
1414 if (sw->uid != uid) {
1415 tb_sw_info(sw,
1416 "changed while suspended (uid %#llx -> %#llx)\n",
1417 sw->uid, uid);
1418 return -ENODEV;
1419 }
23dd5bb4
AN
1420 }
1421
1422 /* upload configuration */
1423 err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
1424 if (err)
1425 return err;
1426
1427 err = tb_plug_events_active(sw, true);
1428 if (err)
1429 return err;
1430
1431 /* check for surviving downstream switches */
1432 for (i = 1; i <= sw->config.max_port_number; i++) {
1433 struct tb_port *port = &sw->ports[i];
1434 if (tb_is_upstream_port(port))
1435 continue;
1436 if (!port->remote)
1437 continue;
1438 if (tb_wait_for_port(port, true) <= 0
1439 || tb_switch_resume(port->remote->sw)) {
1440 tb_port_warn(port,
1441 "lost during suspend, disconnecting\n");
aae20bb6 1442 tb_sw_set_unplugged(port->remote->sw);
23dd5bb4
AN
1443 }
1444 }
1445 return 0;
1446}
1447
1448void tb_switch_suspend(struct tb_switch *sw)
1449{
1450 int i, err;
1451 err = tb_plug_events_active(sw, false);
1452 if (err)
1453 return;
1454
1455 for (i = 1; i <= sw->config.max_port_number; i++) {
1456 if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
1457 tb_switch_suspend(sw->ports[i].remote->sw);
1458 }
1459 /*
1460 * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any
1461 * effect?
1462 */
1463}
f67cf491
MW
1464
1465struct tb_sw_lookup {
1466 struct tb *tb;
1467 u8 link;
1468 u8 depth;
7c39ffe7 1469 const uuid_t *uuid;
f67cf491
MW
1470};
1471
1472static int tb_switch_match(struct device *dev, void *data)
1473{
1474 struct tb_switch *sw = tb_to_switch(dev);
1475 struct tb_sw_lookup *lookup = data;
1476
1477 if (!sw)
1478 return 0;
1479 if (sw->tb != lookup->tb)
1480 return 0;
1481
1482 if (lookup->uuid)
1483 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
1484
1485 /* Root switch is matched only by depth */
1486 if (!lookup->depth)
1487 return !sw->depth;
1488
1489 return sw->link == lookup->link && sw->depth == lookup->depth;
1490}
1491
1492/**
1493 * tb_switch_find_by_link_depth() - Find switch by link and depth
1494 * @tb: Domain the switch belongs
1495 * @link: Link number the switch is connected
1496 * @depth: Depth of the switch in link
1497 *
1498 * Returned switch has reference count increased so the caller needs to
1499 * call tb_switch_put() when done with the switch.
1500 */
1501struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
1502{
1503 struct tb_sw_lookup lookup;
1504 struct device *dev;
1505
1506 memset(&lookup, 0, sizeof(lookup));
1507 lookup.tb = tb;
1508 lookup.link = link;
1509 lookup.depth = depth;
1510
1511 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1512 if (dev)
1513 return tb_to_switch(dev);
1514
1515 return NULL;
1516}
1517
1518/**
1519 * tb_switch_find_by_link_depth() - Find switch by UUID
1520 * @tb: Domain the switch belongs
1521 * @uuid: UUID to look for
1522 *
1523 * Returned switch has reference count increased so the caller needs to
1524 * call tb_switch_put() when done with the switch.
1525 */
7c39ffe7 1526struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
f67cf491
MW
1527{
1528 struct tb_sw_lookup lookup;
1529 struct device *dev;
1530
1531 memset(&lookup, 0, sizeof(lookup));
1532 lookup.tb = tb;
1533 lookup.uuid = uuid;
1534
1535 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1536 if (dev)
1537 return tb_to_switch(dev);
1538
1539 return NULL;
1540}
e6b245cc
MW
1541
1542void tb_switch_exit(void)
1543{
1544 ida_destroy(&nvm_ida);
1545}