]>
Commit | Line | Data |
---|---|---|
a25c8b2f AN |
1 | /* |
2 | * Thunderbolt Cactus Ridge driver - switch/port utility functions | |
3 | * | |
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | |
5 | */ | |
6 | ||
7 | #include <linux/delay.h> | |
e6b245cc MW |
8 | #include <linux/idr.h> |
9 | #include <linux/nvmem-provider.h> | |
10 | #include <linux/sizes.h> | |
10fefe56 | 11 | #include <linux/slab.h> |
e6b245cc | 12 | #include <linux/vmalloc.h> |
a25c8b2f AN |
13 | |
14 | #include "tb.h" | |
15 | ||
f67cf491 MW |
16 | /* Switch authorization from userspace is serialized by this lock */ |
17 | static DEFINE_MUTEX(switch_lock); | |
18 | ||
e6b245cc MW |
19 | /* Switch NVM support */ |
20 | ||
21 | #define NVM_DEVID 0x05 | |
22 | #define NVM_VERSION 0x08 | |
23 | #define NVM_CSS 0x10 | |
24 | #define NVM_FLASH_SIZE 0x45 | |
25 | ||
26 | #define NVM_MIN_SIZE SZ_32K | |
27 | #define NVM_MAX_SIZE SZ_512K | |
28 | ||
29 | static DEFINE_IDA(nvm_ida); | |
30 | ||
31 | struct nvm_auth_status { | |
32 | struct list_head list; | |
7c39ffe7 | 33 | uuid_t uuid; |
e6b245cc MW |
34 | u32 status; |
35 | }; | |
36 | ||
37 | /* | |
38 | * Hold NVM authentication failure status per switch This information | |
39 | * needs to stay around even when the switch gets power cycled so we | |
40 | * keep it separately. | |
41 | */ | |
42 | static LIST_HEAD(nvm_auth_status_cache); | |
43 | static DEFINE_MUTEX(nvm_auth_status_lock); | |
44 | ||
45 | static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) | |
46 | { | |
47 | struct nvm_auth_status *st; | |
48 | ||
49 | list_for_each_entry(st, &nvm_auth_status_cache, list) { | |
7c39ffe7 | 50 | if (uuid_equal(&st->uuid, sw->uuid)) |
e6b245cc MW |
51 | return st; |
52 | } | |
53 | ||
54 | return NULL; | |
55 | } | |
56 | ||
57 | static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) | |
58 | { | |
59 | struct nvm_auth_status *st; | |
60 | ||
61 | mutex_lock(&nvm_auth_status_lock); | |
62 | st = __nvm_get_auth_status(sw); | |
63 | mutex_unlock(&nvm_auth_status_lock); | |
64 | ||
65 | *status = st ? st->status : 0; | |
66 | } | |
67 | ||
68 | static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) | |
69 | { | |
70 | struct nvm_auth_status *st; | |
71 | ||
72 | if (WARN_ON(!sw->uuid)) | |
73 | return; | |
74 | ||
75 | mutex_lock(&nvm_auth_status_lock); | |
76 | st = __nvm_get_auth_status(sw); | |
77 | ||
78 | if (!st) { | |
79 | st = kzalloc(sizeof(*st), GFP_KERNEL); | |
80 | if (!st) | |
81 | goto unlock; | |
82 | ||
83 | memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); | |
84 | INIT_LIST_HEAD(&st->list); | |
85 | list_add_tail(&st->list, &nvm_auth_status_cache); | |
86 | } | |
87 | ||
88 | st->status = status; | |
89 | unlock: | |
90 | mutex_unlock(&nvm_auth_status_lock); | |
91 | } | |
92 | ||
93 | static void nvm_clear_auth_status(const struct tb_switch *sw) | |
94 | { | |
95 | struct nvm_auth_status *st; | |
96 | ||
97 | mutex_lock(&nvm_auth_status_lock); | |
98 | st = __nvm_get_auth_status(sw); | |
99 | if (st) { | |
100 | list_del(&st->list); | |
101 | kfree(st); | |
102 | } | |
103 | mutex_unlock(&nvm_auth_status_lock); | |
104 | } | |
105 | ||
106 | static int nvm_validate_and_write(struct tb_switch *sw) | |
107 | { | |
108 | unsigned int image_size, hdr_size; | |
109 | const u8 *buf = sw->nvm->buf; | |
110 | u16 ds_size; | |
111 | int ret; | |
112 | ||
113 | if (!buf) | |
114 | return -EINVAL; | |
115 | ||
116 | image_size = sw->nvm->buf_data_size; | |
117 | if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) | |
118 | return -EINVAL; | |
119 | ||
120 | /* | |
121 | * FARB pointer must point inside the image and must at least | |
122 | * contain parts of the digital section we will be reading here. | |
123 | */ | |
124 | hdr_size = (*(u32 *)buf) & 0xffffff; | |
125 | if (hdr_size + NVM_DEVID + 2 >= image_size) | |
126 | return -EINVAL; | |
127 | ||
128 | /* Digital section start should be aligned to 4k page */ | |
129 | if (!IS_ALIGNED(hdr_size, SZ_4K)) | |
130 | return -EINVAL; | |
131 | ||
132 | /* | |
133 | * Read digital section size and check that it also fits inside | |
134 | * the image. | |
135 | */ | |
136 | ds_size = *(u16 *)(buf + hdr_size); | |
137 | if (ds_size >= image_size) | |
138 | return -EINVAL; | |
139 | ||
140 | if (!sw->safe_mode) { | |
141 | u16 device_id; | |
142 | ||
143 | /* | |
144 | * Make sure the device ID in the image matches the one | |
145 | * we read from the switch config space. | |
146 | */ | |
147 | device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); | |
148 | if (device_id != sw->config.device_id) | |
149 | return -EINVAL; | |
150 | ||
151 | if (sw->generation < 3) { | |
152 | /* Write CSS headers first */ | |
153 | ret = dma_port_flash_write(sw->dma_port, | |
154 | DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, | |
155 | DMA_PORT_CSS_MAX_SIZE); | |
156 | if (ret) | |
157 | return ret; | |
158 | } | |
159 | ||
160 | /* Skip headers in the image */ | |
161 | buf += hdr_size; | |
162 | image_size -= hdr_size; | |
163 | } | |
164 | ||
165 | return dma_port_flash_write(sw->dma_port, 0, buf, image_size); | |
166 | } | |
167 | ||
168 | static int nvm_authenticate_host(struct tb_switch *sw) | |
169 | { | |
170 | int ret; | |
171 | ||
172 | /* | |
173 | * Root switch NVM upgrade requires that we disconnect the | |
174 | * existing PCIe paths first (in case it is not in safe mode | |
175 | * already). | |
176 | */ | |
177 | if (!sw->safe_mode) { | |
178 | ret = tb_domain_disconnect_pcie_paths(sw->tb); | |
179 | if (ret) | |
180 | return ret; | |
181 | /* | |
182 | * The host controller goes away pretty soon after this if | |
183 | * everything goes well so getting timeout is expected. | |
184 | */ | |
185 | ret = dma_port_flash_update_auth(sw->dma_port); | |
186 | return ret == -ETIMEDOUT ? 0 : ret; | |
187 | } | |
188 | ||
189 | /* | |
190 | * From safe mode we can get out by just power cycling the | |
191 | * switch. | |
192 | */ | |
193 | dma_port_power_cycle(sw->dma_port); | |
194 | return 0; | |
195 | } | |
196 | ||
197 | static int nvm_authenticate_device(struct tb_switch *sw) | |
198 | { | |
199 | int ret, retries = 10; | |
200 | ||
201 | ret = dma_port_flash_update_auth(sw->dma_port); | |
202 | if (ret && ret != -ETIMEDOUT) | |
203 | return ret; | |
204 | ||
205 | /* | |
206 | * Poll here for the authentication status. It takes some time | |
207 | * for the device to respond (we get timeout for a while). Once | |
208 | * we get response the device needs to be power cycled in order | |
209 | * to the new NVM to be taken into use. | |
210 | */ | |
211 | do { | |
212 | u32 status; | |
213 | ||
214 | ret = dma_port_flash_update_auth_status(sw->dma_port, &status); | |
215 | if (ret < 0 && ret != -ETIMEDOUT) | |
216 | return ret; | |
217 | if (ret > 0) { | |
218 | if (status) { | |
219 | tb_sw_warn(sw, "failed to authenticate NVM\n"); | |
220 | nvm_set_auth_status(sw, status); | |
221 | } | |
222 | ||
223 | tb_sw_info(sw, "power cycling the switch now\n"); | |
224 | dma_port_power_cycle(sw->dma_port); | |
225 | return 0; | |
226 | } | |
227 | ||
228 | msleep(500); | |
229 | } while (--retries); | |
230 | ||
231 | return -ETIMEDOUT; | |
232 | } | |
233 | ||
234 | static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, | |
235 | size_t bytes) | |
236 | { | |
237 | struct tb_switch *sw = priv; | |
238 | ||
239 | return dma_port_flash_read(sw->dma_port, offset, val, bytes); | |
240 | } | |
241 | ||
242 | static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, | |
243 | size_t bytes) | |
244 | { | |
245 | struct tb_switch *sw = priv; | |
246 | int ret = 0; | |
247 | ||
248 | if (mutex_lock_interruptible(&switch_lock)) | |
249 | return -ERESTARTSYS; | |
250 | ||
251 | /* | |
252 | * Since writing the NVM image might require some special steps, | |
253 | * for example when CSS headers are written, we cache the image | |
254 | * locally here and handle the special cases when the user asks | |
255 | * us to authenticate the image. | |
256 | */ | |
257 | if (!sw->nvm->buf) { | |
258 | sw->nvm->buf = vmalloc(NVM_MAX_SIZE); | |
259 | if (!sw->nvm->buf) { | |
260 | ret = -ENOMEM; | |
261 | goto unlock; | |
262 | } | |
263 | } | |
264 | ||
265 | sw->nvm->buf_data_size = offset + bytes; | |
266 | memcpy(sw->nvm->buf + offset, val, bytes); | |
267 | ||
268 | unlock: | |
269 | mutex_unlock(&switch_lock); | |
270 | ||
271 | return ret; | |
272 | } | |
273 | ||
274 | static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, | |
275 | size_t size, bool active) | |
276 | { | |
277 | struct nvmem_config config; | |
278 | ||
279 | memset(&config, 0, sizeof(config)); | |
280 | ||
281 | if (active) { | |
282 | config.name = "nvm_active"; | |
283 | config.reg_read = tb_switch_nvm_read; | |
800161bd | 284 | config.read_only = true; |
e6b245cc MW |
285 | } else { |
286 | config.name = "nvm_non_active"; | |
287 | config.reg_write = tb_switch_nvm_write; | |
800161bd | 288 | config.root_only = true; |
e6b245cc MW |
289 | } |
290 | ||
291 | config.id = id; | |
292 | config.stride = 4; | |
293 | config.word_size = 4; | |
294 | config.size = size; | |
295 | config.dev = &sw->dev; | |
296 | config.owner = THIS_MODULE; | |
e6b245cc MW |
297 | config.priv = sw; |
298 | ||
299 | return nvmem_register(&config); | |
300 | } | |
301 | ||
302 | static int tb_switch_nvm_add(struct tb_switch *sw) | |
303 | { | |
304 | struct nvmem_device *nvm_dev; | |
305 | struct tb_switch_nvm *nvm; | |
306 | u32 val; | |
307 | int ret; | |
308 | ||
309 | if (!sw->dma_port) | |
310 | return 0; | |
311 | ||
312 | nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); | |
313 | if (!nvm) | |
314 | return -ENOMEM; | |
315 | ||
316 | nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); | |
317 | ||
318 | /* | |
319 | * If the switch is in safe-mode the only accessible portion of | |
320 | * the NVM is the non-active one where userspace is expected to | |
321 | * write new functional NVM. | |
322 | */ | |
323 | if (!sw->safe_mode) { | |
324 | u32 nvm_size, hdr_size; | |
325 | ||
326 | ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val, | |
327 | sizeof(val)); | |
328 | if (ret) | |
329 | goto err_ida; | |
330 | ||
331 | hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; | |
332 | nvm_size = (SZ_1M << (val & 7)) / 8; | |
333 | nvm_size = (nvm_size - hdr_size) / 2; | |
334 | ||
335 | ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val, | |
336 | sizeof(val)); | |
337 | if (ret) | |
338 | goto err_ida; | |
339 | ||
340 | nvm->major = val >> 16; | |
341 | nvm->minor = val >> 8; | |
342 | ||
343 | nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); | |
344 | if (IS_ERR(nvm_dev)) { | |
345 | ret = PTR_ERR(nvm_dev); | |
346 | goto err_ida; | |
347 | } | |
348 | nvm->active = nvm_dev; | |
349 | } | |
350 | ||
351 | nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); | |
352 | if (IS_ERR(nvm_dev)) { | |
353 | ret = PTR_ERR(nvm_dev); | |
354 | goto err_nvm_active; | |
355 | } | |
356 | nvm->non_active = nvm_dev; | |
357 | ||
358 | mutex_lock(&switch_lock); | |
359 | sw->nvm = nvm; | |
360 | mutex_unlock(&switch_lock); | |
361 | ||
362 | return 0; | |
363 | ||
364 | err_nvm_active: | |
365 | if (nvm->active) | |
366 | nvmem_unregister(nvm->active); | |
367 | err_ida: | |
368 | ida_simple_remove(&nvm_ida, nvm->id); | |
369 | kfree(nvm); | |
370 | ||
371 | return ret; | |
372 | } | |
373 | ||
374 | static void tb_switch_nvm_remove(struct tb_switch *sw) | |
375 | { | |
376 | struct tb_switch_nvm *nvm; | |
377 | ||
378 | mutex_lock(&switch_lock); | |
379 | nvm = sw->nvm; | |
380 | sw->nvm = NULL; | |
381 | mutex_unlock(&switch_lock); | |
382 | ||
383 | if (!nvm) | |
384 | return; | |
385 | ||
386 | /* Remove authentication status in case the switch is unplugged */ | |
387 | if (!nvm->authenticating) | |
388 | nvm_clear_auth_status(sw); | |
389 | ||
390 | nvmem_unregister(nvm->non_active); | |
391 | if (nvm->active) | |
392 | nvmem_unregister(nvm->active); | |
393 | ida_simple_remove(&nvm_ida, nvm->id); | |
394 | vfree(nvm->buf); | |
395 | kfree(nvm); | |
396 | } | |
397 | ||
a25c8b2f AN |
398 | /* port utility functions */ |
399 | ||
400 | static const char *tb_port_type(struct tb_regs_port_header *port) | |
401 | { | |
402 | switch (port->type >> 16) { | |
403 | case 0: | |
404 | switch ((u8) port->type) { | |
405 | case 0: | |
406 | return "Inactive"; | |
407 | case 1: | |
408 | return "Port"; | |
409 | case 2: | |
410 | return "NHI"; | |
411 | default: | |
412 | return "unknown"; | |
413 | } | |
414 | case 0x2: | |
415 | return "Ethernet"; | |
416 | case 0x8: | |
417 | return "SATA"; | |
418 | case 0xe: | |
419 | return "DP/HDMI"; | |
420 | case 0x10: | |
421 | return "PCIe"; | |
422 | case 0x20: | |
423 | return "USB"; | |
424 | default: | |
425 | return "unknown"; | |
426 | } | |
427 | } | |
428 | ||
429 | static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) | |
430 | { | |
431 | tb_info(tb, | |
432 | " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", | |
433 | port->port_number, port->vendor_id, port->device_id, | |
434 | port->revision, port->thunderbolt_version, tb_port_type(port), | |
435 | port->type); | |
436 | tb_info(tb, " Max hop id (in/out): %d/%d\n", | |
437 | port->max_in_hop_id, port->max_out_hop_id); | |
438 | tb_info(tb, " Max counters: %d\n", port->max_counters); | |
439 | tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits); | |
440 | } | |
441 | ||
9da672a4 AN |
442 | /** |
443 | * tb_port_state() - get connectedness state of a port | |
444 | * | |
445 | * The port must have a TB_CAP_PHY (i.e. it should be a real port). | |
446 | * | |
447 | * Return: Returns an enum tb_port_state on success or an error code on failure. | |
448 | */ | |
449 | static int tb_port_state(struct tb_port *port) | |
450 | { | |
451 | struct tb_cap_phy phy; | |
452 | int res; | |
453 | if (port->cap_phy == 0) { | |
454 | tb_port_WARN(port, "does not have a PHY\n"); | |
455 | return -EINVAL; | |
456 | } | |
457 | res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); | |
458 | if (res) | |
459 | return res; | |
460 | return phy.state; | |
461 | } | |
462 | ||
463 | /** | |
464 | * tb_wait_for_port() - wait for a port to become ready | |
465 | * | |
466 | * Wait up to 1 second for a port to reach state TB_PORT_UP. If | |
467 | * wait_if_unplugged is set then we also wait if the port is in state | |
468 | * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after | |
469 | * switch resume). Otherwise we only wait if a device is registered but the link | |
470 | * has not yet been established. | |
471 | * | |
472 | * Return: Returns an error code on failure. Returns 0 if the port is not | |
473 | * connected or failed to reach state TB_PORT_UP within one second. Returns 1 | |
474 | * if the port is connected and in state TB_PORT_UP. | |
475 | */ | |
476 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) | |
477 | { | |
478 | int retries = 10; | |
479 | int state; | |
480 | if (!port->cap_phy) { | |
481 | tb_port_WARN(port, "does not have PHY\n"); | |
482 | return -EINVAL; | |
483 | } | |
484 | if (tb_is_upstream_port(port)) { | |
485 | tb_port_WARN(port, "is the upstream port\n"); | |
486 | return -EINVAL; | |
487 | } | |
488 | ||
489 | while (retries--) { | |
490 | state = tb_port_state(port); | |
491 | if (state < 0) | |
492 | return state; | |
493 | if (state == TB_PORT_DISABLED) { | |
494 | tb_port_info(port, "is disabled (state: 0)\n"); | |
495 | return 0; | |
496 | } | |
497 | if (state == TB_PORT_UNPLUGGED) { | |
498 | if (wait_if_unplugged) { | |
499 | /* used during resume */ | |
500 | tb_port_info(port, | |
501 | "is unplugged (state: 7), retrying...\n"); | |
502 | msleep(100); | |
503 | continue; | |
504 | } | |
505 | tb_port_info(port, "is unplugged (state: 7)\n"); | |
506 | return 0; | |
507 | } | |
508 | if (state == TB_PORT_UP) { | |
509 | tb_port_info(port, | |
510 | "is connected, link is up (state: 2)\n"); | |
511 | return 1; | |
512 | } | |
513 | ||
514 | /* | |
515 | * After plug-in the state is TB_PORT_CONNECTING. Give it some | |
516 | * time. | |
517 | */ | |
518 | tb_port_info(port, | |
519 | "is connected, link is not up (state: %d), retrying...\n", | |
520 | state); | |
521 | msleep(100); | |
522 | } | |
523 | tb_port_warn(port, | |
524 | "failed to reach state TB_PORT_UP. Ignoring port...\n"); | |
525 | return 0; | |
526 | } | |
527 | ||
520b6702 AN |
528 | /** |
529 | * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port | |
530 | * | |
531 | * Change the number of NFC credits allocated to @port by @credits. To remove | |
532 | * NFC credits pass a negative amount of credits. | |
533 | * | |
534 | * Return: Returns 0 on success or an error code on failure. | |
535 | */ | |
536 | int tb_port_add_nfc_credits(struct tb_port *port, int credits) | |
537 | { | |
538 | if (credits == 0) | |
539 | return 0; | |
540 | tb_port_info(port, | |
541 | "adding %#x NFC credits (%#x -> %#x)", | |
542 | credits, | |
543 | port->config.nfc_credits, | |
544 | port->config.nfc_credits + credits); | |
545 | port->config.nfc_credits += credits; | |
546 | return tb_port_write(port, &port->config.nfc_credits, | |
547 | TB_CFG_PORT, 4, 1); | |
548 | } | |
549 | ||
550 | /** | |
551 | * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER | |
552 | * | |
553 | * Return: Returns 0 on success or an error code on failure. | |
554 | */ | |
555 | int tb_port_clear_counter(struct tb_port *port, int counter) | |
556 | { | |
557 | u32 zero[3] = { 0, 0, 0 }; | |
558 | tb_port_info(port, "clearing counter %d\n", counter); | |
559 | return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); | |
560 | } | |
561 | ||
a25c8b2f AN |
562 | /** |
563 | * tb_init_port() - initialize a port | |
564 | * | |
565 | * This is a helper method for tb_switch_alloc. Does not check or initialize | |
566 | * any downstream switches. | |
567 | * | |
568 | * Return: Returns 0 on success or an error code on failure. | |
569 | */ | |
343fcb8c | 570 | static int tb_init_port(struct tb_port *port) |
a25c8b2f AN |
571 | { |
572 | int res; | |
9da672a4 | 573 | int cap; |
343fcb8c | 574 | |
a25c8b2f AN |
575 | res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); |
576 | if (res) | |
577 | return res; | |
578 | ||
9da672a4 | 579 | /* Port 0 is the switch itself and has no PHY. */ |
343fcb8c | 580 | if (port->config.type == TB_TYPE_PORT && port->port != 0) { |
da2da04b | 581 | cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); |
9da672a4 AN |
582 | |
583 | if (cap > 0) | |
584 | port->cap_phy = cap; | |
585 | else | |
586 | tb_port_WARN(port, "non switch port without a PHY\n"); | |
587 | } | |
588 | ||
343fcb8c | 589 | tb_dump_port(port->sw->tb, &port->config); |
a25c8b2f AN |
590 | |
591 | /* TODO: Read dual link port, DP port and more from EEPROM. */ | |
592 | return 0; | |
593 | ||
594 | } | |
595 | ||
596 | /* switch utility functions */ | |
597 | ||
598 | static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) | |
599 | { | |
600 | tb_info(tb, | |
601 | " Switch: %x:%x (Revision: %d, TB Version: %d)\n", | |
602 | sw->vendor_id, sw->device_id, sw->revision, | |
603 | sw->thunderbolt_version); | |
604 | tb_info(tb, " Max Port Number: %d\n", sw->max_port_number); | |
605 | tb_info(tb, " Config:\n"); | |
606 | tb_info(tb, | |
607 | " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", | |
608 | sw->upstream_port_number, sw->depth, | |
609 | (((u64) sw->route_hi) << 32) | sw->route_lo, | |
610 | sw->enabled, sw->plug_events_delay); | |
611 | tb_info(tb, | |
612 | " unknown1: %#x unknown4: %#x\n", | |
613 | sw->__unknown1, sw->__unknown4); | |
614 | } | |
615 | ||
23dd5bb4 AN |
616 | /** |
617 | * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET | |
618 | * | |
619 | * Return: Returns 0 on success or an error code on failure. | |
620 | */ | |
621 | int tb_switch_reset(struct tb *tb, u64 route) | |
622 | { | |
623 | struct tb_cfg_result res; | |
624 | struct tb_regs_switch_header header = { | |
625 | header.route_hi = route >> 32, | |
626 | header.route_lo = route, | |
627 | header.enabled = true, | |
628 | }; | |
629 | tb_info(tb, "resetting switch at %llx\n", route); | |
630 | res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, | |
631 | 0, 2, 2, 2); | |
632 | if (res.err) | |
633 | return res.err; | |
634 | res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); | |
635 | if (res.err > 0) | |
636 | return -EIO; | |
637 | return res.err; | |
638 | } | |
639 | ||
053596d9 AN |
640 | struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route) |
641 | { | |
642 | u8 next_port = route; /* | |
643 | * Routes use a stride of 8 bits, | |
644 | * eventhough a port index has 6 bits at most. | |
645 | * */ | |
646 | if (route == 0) | |
647 | return sw; | |
648 | if (next_port > sw->config.max_port_number) | |
c9c2deef | 649 | return NULL; |
053596d9 | 650 | if (tb_is_upstream_port(&sw->ports[next_port])) |
c9c2deef | 651 | return NULL; |
053596d9 | 652 | if (!sw->ports[next_port].remote) |
c9c2deef | 653 | return NULL; |
053596d9 AN |
654 | return get_switch_at_route(sw->ports[next_port].remote->sw, |
655 | route >> TB_ROUTE_SHIFT); | |
656 | } | |
657 | ||
ca389f71 AN |
658 | /** |
659 | * tb_plug_events_active() - enable/disable plug events on a switch | |
660 | * | |
661 | * Also configures a sane plug_events_delay of 255ms. | |
662 | * | |
663 | * Return: Returns 0 on success or an error code on failure. | |
664 | */ | |
665 | static int tb_plug_events_active(struct tb_switch *sw, bool active) | |
666 | { | |
667 | u32 data; | |
668 | int res; | |
669 | ||
bfe778ac MW |
670 | if (!sw->config.enabled) |
671 | return 0; | |
672 | ||
ca389f71 AN |
673 | sw->config.plug_events_delay = 0xff; |
674 | res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); | |
675 | if (res) | |
676 | return res; | |
677 | ||
678 | res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); | |
679 | if (res) | |
680 | return res; | |
681 | ||
682 | if (active) { | |
683 | data = data & 0xFFFFFF83; | |
684 | switch (sw->config.device_id) { | |
1d111406 LW |
685 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
686 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: | |
687 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: | |
ca389f71 AN |
688 | break; |
689 | default: | |
690 | data |= 4; | |
691 | } | |
692 | } else { | |
693 | data = data | 0x7c; | |
694 | } | |
695 | return tb_sw_write(sw, &data, TB_CFG_SWITCH, | |
696 | sw->cap_plug_events + 1, 1); | |
697 | } | |
698 | ||
f67cf491 MW |
699 | static ssize_t authorized_show(struct device *dev, |
700 | struct device_attribute *attr, | |
701 | char *buf) | |
702 | { | |
703 | struct tb_switch *sw = tb_to_switch(dev); | |
704 | ||
705 | return sprintf(buf, "%u\n", sw->authorized); | |
706 | } | |
707 | ||
708 | static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) | |
709 | { | |
710 | int ret = -EINVAL; | |
711 | ||
712 | if (mutex_lock_interruptible(&switch_lock)) | |
713 | return -ERESTARTSYS; | |
714 | ||
715 | if (sw->authorized) | |
716 | goto unlock; | |
717 | ||
718 | switch (val) { | |
719 | /* Approve switch */ | |
720 | case 1: | |
721 | if (sw->key) | |
722 | ret = tb_domain_approve_switch_key(sw->tb, sw); | |
723 | else | |
724 | ret = tb_domain_approve_switch(sw->tb, sw); | |
725 | break; | |
726 | ||
727 | /* Challenge switch */ | |
728 | case 2: | |
729 | if (sw->key) | |
730 | ret = tb_domain_challenge_switch_key(sw->tb, sw); | |
731 | break; | |
732 | ||
733 | default: | |
734 | break; | |
735 | } | |
736 | ||
737 | if (!ret) { | |
738 | sw->authorized = val; | |
739 | /* Notify status change to the userspace */ | |
740 | kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); | |
741 | } | |
742 | ||
743 | unlock: | |
744 | mutex_unlock(&switch_lock); | |
745 | return ret; | |
746 | } | |
747 | ||
748 | static ssize_t authorized_store(struct device *dev, | |
749 | struct device_attribute *attr, | |
750 | const char *buf, size_t count) | |
751 | { | |
752 | struct tb_switch *sw = tb_to_switch(dev); | |
753 | unsigned int val; | |
754 | ssize_t ret; | |
755 | ||
756 | ret = kstrtouint(buf, 0, &val); | |
757 | if (ret) | |
758 | return ret; | |
759 | if (val > 2) | |
760 | return -EINVAL; | |
761 | ||
762 | ret = tb_switch_set_authorized(sw, val); | |
763 | ||
764 | return ret ? ret : count; | |
765 | } | |
766 | static DEVICE_ATTR_RW(authorized); | |
767 | ||
bfe778ac MW |
768 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
769 | char *buf) | |
770 | { | |
771 | struct tb_switch *sw = tb_to_switch(dev); | |
ca389f71 | 772 | |
bfe778ac MW |
773 | return sprintf(buf, "%#x\n", sw->device); |
774 | } | |
775 | static DEVICE_ATTR_RO(device); | |
776 | ||
72ee3390 MW |
777 | static ssize_t |
778 | device_name_show(struct device *dev, struct device_attribute *attr, char *buf) | |
779 | { | |
780 | struct tb_switch *sw = tb_to_switch(dev); | |
781 | ||
782 | return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); | |
783 | } | |
784 | static DEVICE_ATTR_RO(device_name); | |
785 | ||
f67cf491 MW |
786 | static ssize_t key_show(struct device *dev, struct device_attribute *attr, |
787 | char *buf) | |
788 | { | |
789 | struct tb_switch *sw = tb_to_switch(dev); | |
790 | ssize_t ret; | |
791 | ||
792 | if (mutex_lock_interruptible(&switch_lock)) | |
793 | return -ERESTARTSYS; | |
794 | ||
795 | if (sw->key) | |
796 | ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); | |
797 | else | |
798 | ret = sprintf(buf, "\n"); | |
799 | ||
800 | mutex_unlock(&switch_lock); | |
801 | return ret; | |
802 | } | |
803 | ||
804 | static ssize_t key_store(struct device *dev, struct device_attribute *attr, | |
805 | const char *buf, size_t count) | |
806 | { | |
807 | struct tb_switch *sw = tb_to_switch(dev); | |
808 | u8 key[TB_SWITCH_KEY_SIZE]; | |
809 | ssize_t ret = count; | |
e545f0d8 | 810 | bool clear = false; |
f67cf491 | 811 | |
e545f0d8 BY |
812 | if (!strcmp(buf, "\n")) |
813 | clear = true; | |
814 | else if (hex2bin(key, buf, sizeof(key))) | |
f67cf491 MW |
815 | return -EINVAL; |
816 | ||
817 | if (mutex_lock_interruptible(&switch_lock)) | |
818 | return -ERESTARTSYS; | |
819 | ||
820 | if (sw->authorized) { | |
821 | ret = -EBUSY; | |
822 | } else { | |
823 | kfree(sw->key); | |
e545f0d8 BY |
824 | if (clear) { |
825 | sw->key = NULL; | |
826 | } else { | |
827 | sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); | |
828 | if (!sw->key) | |
829 | ret = -ENOMEM; | |
830 | } | |
f67cf491 MW |
831 | } |
832 | ||
833 | mutex_unlock(&switch_lock); | |
834 | return ret; | |
835 | } | |
0956e411 | 836 | static DEVICE_ATTR(key, 0600, key_show, key_store); |
f67cf491 | 837 | |
e6b245cc MW |
838 | static ssize_t nvm_authenticate_show(struct device *dev, |
839 | struct device_attribute *attr, char *buf) | |
840 | { | |
841 | struct tb_switch *sw = tb_to_switch(dev); | |
842 | u32 status; | |
843 | ||
844 | nvm_get_auth_status(sw, &status); | |
845 | return sprintf(buf, "%#x\n", status); | |
846 | } | |
847 | ||
848 | static ssize_t nvm_authenticate_store(struct device *dev, | |
849 | struct device_attribute *attr, const char *buf, size_t count) | |
850 | { | |
851 | struct tb_switch *sw = tb_to_switch(dev); | |
852 | bool val; | |
853 | int ret; | |
854 | ||
855 | if (mutex_lock_interruptible(&switch_lock)) | |
856 | return -ERESTARTSYS; | |
857 | ||
858 | /* If NVMem devices are not yet added */ | |
859 | if (!sw->nvm) { | |
860 | ret = -EAGAIN; | |
861 | goto exit_unlock; | |
862 | } | |
863 | ||
864 | ret = kstrtobool(buf, &val); | |
865 | if (ret) | |
866 | goto exit_unlock; | |
867 | ||
868 | /* Always clear the authentication status */ | |
869 | nvm_clear_auth_status(sw); | |
870 | ||
871 | if (val) { | |
872 | ret = nvm_validate_and_write(sw); | |
873 | if (ret) | |
874 | goto exit_unlock; | |
875 | ||
876 | sw->nvm->authenticating = true; | |
877 | ||
878 | if (!tb_route(sw)) | |
879 | ret = nvm_authenticate_host(sw); | |
880 | else | |
881 | ret = nvm_authenticate_device(sw); | |
882 | } | |
883 | ||
884 | exit_unlock: | |
885 | mutex_unlock(&switch_lock); | |
886 | ||
887 | if (ret) | |
888 | return ret; | |
889 | return count; | |
890 | } | |
891 | static DEVICE_ATTR_RW(nvm_authenticate); | |
892 | ||
893 | static ssize_t nvm_version_show(struct device *dev, | |
894 | struct device_attribute *attr, char *buf) | |
895 | { | |
896 | struct tb_switch *sw = tb_to_switch(dev); | |
897 | int ret; | |
898 | ||
899 | if (mutex_lock_interruptible(&switch_lock)) | |
900 | return -ERESTARTSYS; | |
901 | ||
902 | if (sw->safe_mode) | |
903 | ret = -ENODATA; | |
904 | else if (!sw->nvm) | |
905 | ret = -EAGAIN; | |
906 | else | |
907 | ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); | |
908 | ||
909 | mutex_unlock(&switch_lock); | |
910 | ||
911 | return ret; | |
912 | } | |
913 | static DEVICE_ATTR_RO(nvm_version); | |
914 | ||
bfe778ac MW |
915 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
916 | char *buf) | |
a25c8b2f | 917 | { |
bfe778ac | 918 | struct tb_switch *sw = tb_to_switch(dev); |
a25c8b2f | 919 | |
bfe778ac MW |
920 | return sprintf(buf, "%#x\n", sw->vendor); |
921 | } | |
922 | static DEVICE_ATTR_RO(vendor); | |
923 | ||
72ee3390 MW |
924 | static ssize_t |
925 | vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) | |
926 | { | |
927 | struct tb_switch *sw = tb_to_switch(dev); | |
928 | ||
929 | return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); | |
930 | } | |
931 | static DEVICE_ATTR_RO(vendor_name); | |
932 | ||
bfe778ac MW |
933 | static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, |
934 | char *buf) | |
935 | { | |
936 | struct tb_switch *sw = tb_to_switch(dev); | |
937 | ||
938 | return sprintf(buf, "%pUb\n", sw->uuid); | |
939 | } | |
940 | static DEVICE_ATTR_RO(unique_id); | |
941 | ||
942 | static struct attribute *switch_attrs[] = { | |
f67cf491 | 943 | &dev_attr_authorized.attr, |
bfe778ac | 944 | &dev_attr_device.attr, |
72ee3390 | 945 | &dev_attr_device_name.attr, |
f67cf491 | 946 | &dev_attr_key.attr, |
e6b245cc MW |
947 | &dev_attr_nvm_authenticate.attr, |
948 | &dev_attr_nvm_version.attr, | |
bfe778ac | 949 | &dev_attr_vendor.attr, |
72ee3390 | 950 | &dev_attr_vendor_name.attr, |
bfe778ac MW |
951 | &dev_attr_unique_id.attr, |
952 | NULL, | |
953 | }; | |
954 | ||
f67cf491 MW |
955 | static umode_t switch_attr_is_visible(struct kobject *kobj, |
956 | struct attribute *attr, int n) | |
957 | { | |
958 | struct device *dev = container_of(kobj, struct device, kobj); | |
959 | struct tb_switch *sw = tb_to_switch(dev); | |
960 | ||
961 | if (attr == &dev_attr_key.attr) { | |
962 | if (tb_route(sw) && | |
963 | sw->tb->security_level == TB_SECURITY_SECURE && | |
964 | sw->security_level == TB_SECURITY_SECURE) | |
965 | return attr->mode; | |
966 | return 0; | |
e6b245cc MW |
967 | } else if (attr == &dev_attr_nvm_authenticate.attr || |
968 | attr == &dev_attr_nvm_version.attr) { | |
969 | if (sw->dma_port) | |
970 | return attr->mode; | |
971 | return 0; | |
f67cf491 MW |
972 | } |
973 | ||
e6b245cc | 974 | return sw->safe_mode ? 0 : attr->mode; |
f67cf491 MW |
975 | } |
976 | ||
bfe778ac | 977 | static struct attribute_group switch_group = { |
f67cf491 | 978 | .is_visible = switch_attr_is_visible, |
bfe778ac MW |
979 | .attrs = switch_attrs, |
980 | }; | |
ca389f71 | 981 | |
bfe778ac MW |
982 | static const struct attribute_group *switch_groups[] = { |
983 | &switch_group, | |
984 | NULL, | |
985 | }; | |
986 | ||
987 | static void tb_switch_release(struct device *dev) | |
988 | { | |
989 | struct tb_switch *sw = tb_to_switch(dev); | |
990 | ||
3e136768 MW |
991 | dma_port_free(sw->dma_port); |
992 | ||
bfe778ac | 993 | kfree(sw->uuid); |
72ee3390 MW |
994 | kfree(sw->device_name); |
995 | kfree(sw->vendor_name); | |
a25c8b2f | 996 | kfree(sw->ports); |
343fcb8c | 997 | kfree(sw->drom); |
f67cf491 | 998 | kfree(sw->key); |
a25c8b2f AN |
999 | kfree(sw); |
1000 | } | |
1001 | ||
bfe778ac MW |
1002 | struct device_type tb_switch_type = { |
1003 | .name = "thunderbolt_device", | |
1004 | .release = tb_switch_release, | |
1005 | }; | |
1006 | ||
2c3c4197 MW |
1007 | static int tb_switch_get_generation(struct tb_switch *sw) |
1008 | { | |
1009 | switch (sw->config.device_id) { | |
1010 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: | |
1011 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: | |
1012 | case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: | |
1013 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: | |
1014 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: | |
1015 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: | |
1016 | case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: | |
1017 | case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: | |
1018 | return 1; | |
1019 | ||
1020 | case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: | |
1021 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: | |
1022 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: | |
1023 | return 2; | |
1024 | ||
1025 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: | |
1026 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: | |
1027 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: | |
1028 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: | |
1029 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: | |
1030 | return 3; | |
1031 | ||
1032 | default: | |
1033 | /* | |
1034 | * For unknown switches assume generation to be 1 to be | |
1035 | * on the safe side. | |
1036 | */ | |
1037 | tb_sw_warn(sw, "unsupported switch device id %#x\n", | |
1038 | sw->config.device_id); | |
1039 | return 1; | |
1040 | } | |
1041 | } | |
1042 | ||
a25c8b2f | 1043 | /** |
bfe778ac MW |
1044 | * tb_switch_alloc() - allocate a switch |
1045 | * @tb: Pointer to the owning domain | |
1046 | * @parent: Parent device for this switch | |
1047 | * @route: Route string for this switch | |
a25c8b2f | 1048 | * |
bfe778ac MW |
1049 | * Allocates and initializes a switch. Will not upload configuration to |
1050 | * the switch. For that you need to call tb_switch_configure() | |
1051 | * separately. The returned switch should be released by calling | |
1052 | * tb_switch_put(). | |
1053 | * | |
1054 | * Return: Pointer to the allocated switch or %NULL in case of failure | |
a25c8b2f | 1055 | */ |
bfe778ac MW |
1056 | struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, |
1057 | u64 route) | |
a25c8b2f AN |
1058 | { |
1059 | int i; | |
ca389f71 | 1060 | int cap; |
a25c8b2f AN |
1061 | struct tb_switch *sw; |
1062 | int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); | |
1063 | if (upstream_port < 0) | |
1064 | return NULL; | |
1065 | ||
1066 | sw = kzalloc(sizeof(*sw), GFP_KERNEL); | |
1067 | if (!sw) | |
1068 | return NULL; | |
1069 | ||
1070 | sw->tb = tb; | |
aae20bb6 | 1071 | if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5)) |
bfe778ac MW |
1072 | goto err_free_sw_ports; |
1073 | ||
1074 | tb_info(tb, "current switch config:\n"); | |
a25c8b2f AN |
1075 | tb_dump_switch(tb, &sw->config); |
1076 | ||
1077 | /* configure switch */ | |
1078 | sw->config.upstream_port_number = upstream_port; | |
1079 | sw->config.depth = tb_route_length(route); | |
1080 | sw->config.route_lo = route; | |
1081 | sw->config.route_hi = route >> 32; | |
bfe778ac | 1082 | sw->config.enabled = 0; |
a25c8b2f AN |
1083 | |
1084 | /* initialize ports */ | |
1085 | sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), | |
343fcb8c | 1086 | GFP_KERNEL); |
a25c8b2f | 1087 | if (!sw->ports) |
bfe778ac | 1088 | goto err_free_sw_ports; |
a25c8b2f AN |
1089 | |
1090 | for (i = 0; i <= sw->config.max_port_number; i++) { | |
343fcb8c AN |
1091 | /* minimum setup for tb_find_cap and tb_drom_read to work */ |
1092 | sw->ports[i].sw = sw; | |
1093 | sw->ports[i].port = i; | |
a25c8b2f AN |
1094 | } |
1095 | ||
2c3c4197 MW |
1096 | sw->generation = tb_switch_get_generation(sw); |
1097 | ||
da2da04b | 1098 | cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); |
ca389f71 | 1099 | if (cap < 0) { |
da2da04b | 1100 | tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); |
bfe778ac | 1101 | goto err_free_sw_ports; |
ca389f71 AN |
1102 | } |
1103 | sw->cap_plug_events = cap; | |
1104 | ||
f67cf491 MW |
1105 | /* Root switch is always authorized */ |
1106 | if (!route) | |
1107 | sw->authorized = true; | |
1108 | ||
bfe778ac MW |
1109 | device_initialize(&sw->dev); |
1110 | sw->dev.parent = parent; | |
1111 | sw->dev.bus = &tb_bus_type; | |
1112 | sw->dev.type = &tb_switch_type; | |
1113 | sw->dev.groups = switch_groups; | |
1114 | dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); | |
1115 | ||
1116 | return sw; | |
1117 | ||
1118 | err_free_sw_ports: | |
1119 | kfree(sw->ports); | |
1120 | kfree(sw); | |
1121 | ||
1122 | return NULL; | |
1123 | } | |
1124 | ||
e6b245cc MW |
1125 | /** |
1126 | * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode | |
1127 | * @tb: Pointer to the owning domain | |
1128 | * @parent: Parent device for this switch | |
1129 | * @route: Route string for this switch | |
1130 | * | |
1131 | * This creates a switch in safe mode. This means the switch pretty much | |
1132 | * lacks all capabilities except DMA configuration port before it is | |
1133 | * flashed with a valid NVM firmware. | |
1134 | * | |
1135 | * The returned switch must be released by calling tb_switch_put(). | |
1136 | * | |
1137 | * Return: Pointer to the allocated switch or %NULL in case of failure | |
1138 | */ | |
1139 | struct tb_switch * | |
1140 | tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) | |
1141 | { | |
1142 | struct tb_switch *sw; | |
1143 | ||
1144 | sw = kzalloc(sizeof(*sw), GFP_KERNEL); | |
1145 | if (!sw) | |
1146 | return NULL; | |
1147 | ||
1148 | sw->tb = tb; | |
1149 | sw->config.depth = tb_route_length(route); | |
1150 | sw->config.route_hi = upper_32_bits(route); | |
1151 | sw->config.route_lo = lower_32_bits(route); | |
1152 | sw->safe_mode = true; | |
1153 | ||
1154 | device_initialize(&sw->dev); | |
1155 | sw->dev.parent = parent; | |
1156 | sw->dev.bus = &tb_bus_type; | |
1157 | sw->dev.type = &tb_switch_type; | |
1158 | sw->dev.groups = switch_groups; | |
1159 | dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); | |
1160 | ||
1161 | return sw; | |
1162 | } | |
1163 | ||
bfe778ac MW |
1164 | /** |
1165 | * tb_switch_configure() - Uploads configuration to the switch | |
1166 | * @sw: Switch to configure | |
1167 | * | |
1168 | * Call this function before the switch is added to the system. It will | |
1169 | * upload configuration to the switch and makes it available for the | |
1170 | * connection manager to use. | |
1171 | * | |
1172 | * Return: %0 in case of success and negative errno in case of failure | |
1173 | */ | |
1174 | int tb_switch_configure(struct tb_switch *sw) | |
1175 | { | |
1176 | struct tb *tb = sw->tb; | |
1177 | u64 route; | |
1178 | int ret; | |
1179 | ||
1180 | route = tb_route(sw); | |
1181 | tb_info(tb, | |
1182 | "initializing Switch at %#llx (depth: %d, up port: %d)\n", | |
1183 | route, tb_route_length(route), sw->config.upstream_port_number); | |
1184 | ||
1185 | if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) | |
1186 | tb_sw_warn(sw, "unknown switch vendor id %#x\n", | |
1187 | sw->config.vendor_id); | |
1188 | ||
bfe778ac MW |
1189 | sw->config.enabled = 1; |
1190 | ||
1191 | /* upload configuration */ | |
1192 | ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3); | |
1193 | if (ret) | |
1194 | return ret; | |
1195 | ||
1196 | return tb_plug_events_active(sw, true); | |
1197 | } | |
1198 | ||
1199 | static void tb_switch_set_uuid(struct tb_switch *sw) | |
1200 | { | |
1201 | u32 uuid[4]; | |
1202 | int cap; | |
1203 | ||
1204 | if (sw->uuid) | |
1205 | return; | |
1206 | ||
1207 | /* | |
1208 | * The newer controllers include fused UUID as part of link | |
1209 | * controller specific registers | |
1210 | */ | |
1211 | cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); | |
1212 | if (cap > 0) { | |
1213 | tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4); | |
1214 | } else { | |
1215 | /* | |
1216 | * ICM generates UUID based on UID and fills the upper | |
1217 | * two words with ones. This is not strictly following | |
1218 | * UUID format but we want to be compatible with it so | |
1219 | * we do the same here. | |
1220 | */ | |
1221 | uuid[0] = sw->uid & 0xffffffff; | |
1222 | uuid[1] = (sw->uid >> 32) & 0xffffffff; | |
1223 | uuid[2] = 0xffffffff; | |
1224 | uuid[3] = 0xffffffff; | |
1225 | } | |
1226 | ||
1227 | sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); | |
1228 | } | |
1229 | ||
e6b245cc | 1230 | static int tb_switch_add_dma_port(struct tb_switch *sw) |
3e136768 | 1231 | { |
e6b245cc MW |
1232 | u32 status; |
1233 | int ret; | |
1234 | ||
3e136768 MW |
1235 | switch (sw->generation) { |
1236 | case 3: | |
1237 | break; | |
1238 | ||
1239 | case 2: | |
1240 | /* Only root switch can be upgraded */ | |
1241 | if (tb_route(sw)) | |
e6b245cc | 1242 | return 0; |
3e136768 MW |
1243 | break; |
1244 | ||
1245 | default: | |
e6b245cc MW |
1246 | /* |
1247 | * DMA port is the only thing available when the switch | |
1248 | * is in safe mode. | |
1249 | */ | |
1250 | if (!sw->safe_mode) | |
1251 | return 0; | |
1252 | break; | |
3e136768 MW |
1253 | } |
1254 | ||
e6b245cc MW |
1255 | if (sw->no_nvm_upgrade) |
1256 | return 0; | |
1257 | ||
3e136768 | 1258 | sw->dma_port = dma_port_alloc(sw); |
e6b245cc MW |
1259 | if (!sw->dma_port) |
1260 | return 0; | |
1261 | ||
1262 | /* | |
1263 | * Check status of the previous flash authentication. If there | |
1264 | * is one we need to power cycle the switch in any case to make | |
1265 | * it functional again. | |
1266 | */ | |
1267 | ret = dma_port_flash_update_auth_status(sw->dma_port, &status); | |
1268 | if (ret <= 0) | |
1269 | return ret; | |
1270 | ||
1271 | if (status) { | |
1272 | tb_sw_info(sw, "switch flash authentication failed\n"); | |
1273 | tb_switch_set_uuid(sw); | |
1274 | nvm_set_auth_status(sw, status); | |
1275 | } | |
1276 | ||
1277 | tb_sw_info(sw, "power cycling the switch now\n"); | |
1278 | dma_port_power_cycle(sw->dma_port); | |
1279 | ||
1280 | /* | |
1281 | * We return error here which causes the switch adding failure. | |
1282 | * It should appear back after power cycle is complete. | |
1283 | */ | |
1284 | return -ESHUTDOWN; | |
3e136768 MW |
1285 | } |
1286 | ||
bfe778ac MW |
1287 | /** |
1288 | * tb_switch_add() - Add a switch to the domain | |
1289 | * @sw: Switch to add | |
1290 | * | |
1291 | * This is the last step in adding switch to the domain. It will read | |
1292 | * identification information from DROM and initializes ports so that | |
1293 | * they can be used to connect other switches. The switch will be | |
1294 | * exposed to the userspace when this function successfully returns. To | |
1295 | * remove and release the switch, call tb_switch_remove(). | |
1296 | * | |
1297 | * Return: %0 in case of success and negative errno in case of failure | |
1298 | */ | |
1299 | int tb_switch_add(struct tb_switch *sw) | |
1300 | { | |
1301 | int i, ret; | |
1302 | ||
3e136768 MW |
1303 | /* |
1304 | * Initialize DMA control port now before we read DROM. Recent | |
1305 | * host controllers have more complete DROM on NVM that includes | |
1306 | * vendor and model identification strings which we then expose | |
1307 | * to the userspace. NVM can be accessed through DMA | |
1308 | * configuration based mailbox. | |
1309 | */ | |
e6b245cc MW |
1310 | ret = tb_switch_add_dma_port(sw); |
1311 | if (ret) | |
f53e7676 | 1312 | return ret; |
343fcb8c | 1313 | |
e6b245cc MW |
1314 | if (!sw->safe_mode) { |
1315 | /* read drom */ | |
1316 | ret = tb_drom_read(sw); | |
1317 | if (ret) { | |
1318 | tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); | |
1319 | return ret; | |
1320 | } | |
1321 | tb_sw_info(sw, "uid: %#llx\n", sw->uid); | |
bfe778ac | 1322 | |
e6b245cc MW |
1323 | tb_switch_set_uuid(sw); |
1324 | ||
1325 | for (i = 0; i <= sw->config.max_port_number; i++) { | |
1326 | if (sw->ports[i].disabled) { | |
1327 | tb_port_info(&sw->ports[i], "disabled by eeprom\n"); | |
1328 | continue; | |
1329 | } | |
1330 | ret = tb_init_port(&sw->ports[i]); | |
1331 | if (ret) | |
1332 | return ret; | |
343fcb8c | 1333 | } |
343fcb8c AN |
1334 | } |
1335 | ||
e6b245cc MW |
1336 | ret = device_add(&sw->dev); |
1337 | if (ret) | |
1338 | return ret; | |
1339 | ||
1340 | ret = tb_switch_nvm_add(sw); | |
1341 | if (ret) | |
1342 | device_del(&sw->dev); | |
1343 | ||
1344 | return ret; | |
bfe778ac | 1345 | } |
c90553b3 | 1346 | |
bfe778ac MW |
1347 | /** |
1348 | * tb_switch_remove() - Remove and release a switch | |
1349 | * @sw: Switch to remove | |
1350 | * | |
1351 | * This will remove the switch from the domain and release it after last | |
1352 | * reference count drops to zero. If there are switches connected below | |
1353 | * this switch, they will be removed as well. | |
1354 | */ | |
1355 | void tb_switch_remove(struct tb_switch *sw) | |
1356 | { | |
1357 | int i; | |
ca389f71 | 1358 | |
bfe778ac MW |
1359 | /* port 0 is the switch itself and never has a remote */ |
1360 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1361 | if (tb_is_upstream_port(&sw->ports[i])) | |
1362 | continue; | |
1363 | if (sw->ports[i].remote) | |
1364 | tb_switch_remove(sw->ports[i].remote->sw); | |
1365 | sw->ports[i].remote = NULL; | |
1366 | } | |
1367 | ||
1368 | if (!sw->is_unplugged) | |
1369 | tb_plug_events_active(sw, false); | |
1370 | ||
e6b245cc | 1371 | tb_switch_nvm_remove(sw); |
bfe778ac | 1372 | device_unregister(&sw->dev); |
a25c8b2f AN |
1373 | } |
1374 | ||
053596d9 | 1375 | /** |
aae20bb6 | 1376 | * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches |
053596d9 | 1377 | */ |
aae20bb6 | 1378 | void tb_sw_set_unplugged(struct tb_switch *sw) |
053596d9 AN |
1379 | { |
1380 | int i; | |
1381 | if (sw == sw->tb->root_switch) { | |
1382 | tb_sw_WARN(sw, "cannot unplug root switch\n"); | |
1383 | return; | |
1384 | } | |
1385 | if (sw->is_unplugged) { | |
1386 | tb_sw_WARN(sw, "is_unplugged already set\n"); | |
1387 | return; | |
1388 | } | |
1389 | sw->is_unplugged = true; | |
1390 | for (i = 0; i <= sw->config.max_port_number; i++) { | |
1391 | if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) | |
aae20bb6 | 1392 | tb_sw_set_unplugged(sw->ports[i].remote->sw); |
053596d9 AN |
1393 | } |
1394 | } | |
1395 | ||
23dd5bb4 AN |
1396 | int tb_switch_resume(struct tb_switch *sw) |
1397 | { | |
1398 | int i, err; | |
23dd5bb4 AN |
1399 | tb_sw_info(sw, "resuming switch\n"); |
1400 | ||
08a5e4ce MW |
1401 | /* |
1402 | * Check for UID of the connected switches except for root | |
1403 | * switch which we assume cannot be removed. | |
1404 | */ | |
1405 | if (tb_route(sw)) { | |
1406 | u64 uid; | |
1407 | ||
1408 | err = tb_drom_read_uid_only(sw, &uid); | |
1409 | if (err) { | |
1410 | tb_sw_warn(sw, "uid read failed\n"); | |
1411 | return err; | |
1412 | } | |
1413 | if (sw->uid != uid) { | |
1414 | tb_sw_info(sw, | |
1415 | "changed while suspended (uid %#llx -> %#llx)\n", | |
1416 | sw->uid, uid); | |
1417 | return -ENODEV; | |
1418 | } | |
23dd5bb4 AN |
1419 | } |
1420 | ||
1421 | /* upload configuration */ | |
1422 | err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3); | |
1423 | if (err) | |
1424 | return err; | |
1425 | ||
1426 | err = tb_plug_events_active(sw, true); | |
1427 | if (err) | |
1428 | return err; | |
1429 | ||
1430 | /* check for surviving downstream switches */ | |
1431 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1432 | struct tb_port *port = &sw->ports[i]; | |
1433 | if (tb_is_upstream_port(port)) | |
1434 | continue; | |
1435 | if (!port->remote) | |
1436 | continue; | |
1437 | if (tb_wait_for_port(port, true) <= 0 | |
1438 | || tb_switch_resume(port->remote->sw)) { | |
1439 | tb_port_warn(port, | |
1440 | "lost during suspend, disconnecting\n"); | |
aae20bb6 | 1441 | tb_sw_set_unplugged(port->remote->sw); |
23dd5bb4 AN |
1442 | } |
1443 | } | |
1444 | return 0; | |
1445 | } | |
1446 | ||
1447 | void tb_switch_suspend(struct tb_switch *sw) | |
1448 | { | |
1449 | int i, err; | |
1450 | err = tb_plug_events_active(sw, false); | |
1451 | if (err) | |
1452 | return; | |
1453 | ||
1454 | for (i = 1; i <= sw->config.max_port_number; i++) { | |
1455 | if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) | |
1456 | tb_switch_suspend(sw->ports[i].remote->sw); | |
1457 | } | |
1458 | /* | |
1459 | * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any | |
1460 | * effect? | |
1461 | */ | |
1462 | } | |
f67cf491 MW |
1463 | |
1464 | struct tb_sw_lookup { | |
1465 | struct tb *tb; | |
1466 | u8 link; | |
1467 | u8 depth; | |
7c39ffe7 | 1468 | const uuid_t *uuid; |
f67cf491 MW |
1469 | }; |
1470 | ||
1471 | static int tb_switch_match(struct device *dev, void *data) | |
1472 | { | |
1473 | struct tb_switch *sw = tb_to_switch(dev); | |
1474 | struct tb_sw_lookup *lookup = data; | |
1475 | ||
1476 | if (!sw) | |
1477 | return 0; | |
1478 | if (sw->tb != lookup->tb) | |
1479 | return 0; | |
1480 | ||
1481 | if (lookup->uuid) | |
1482 | return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); | |
1483 | ||
1484 | /* Root switch is matched only by depth */ | |
1485 | if (!lookup->depth) | |
1486 | return !sw->depth; | |
1487 | ||
1488 | return sw->link == lookup->link && sw->depth == lookup->depth; | |
1489 | } | |
1490 | ||
1491 | /** | |
1492 | * tb_switch_find_by_link_depth() - Find switch by link and depth | |
1493 | * @tb: Domain the switch belongs | |
1494 | * @link: Link number the switch is connected | |
1495 | * @depth: Depth of the switch in link | |
1496 | * | |
1497 | * Returned switch has reference count increased so the caller needs to | |
1498 | * call tb_switch_put() when done with the switch. | |
1499 | */ | |
1500 | struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) | |
1501 | { | |
1502 | struct tb_sw_lookup lookup; | |
1503 | struct device *dev; | |
1504 | ||
1505 | memset(&lookup, 0, sizeof(lookup)); | |
1506 | lookup.tb = tb; | |
1507 | lookup.link = link; | |
1508 | lookup.depth = depth; | |
1509 | ||
1510 | dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); | |
1511 | if (dev) | |
1512 | return tb_to_switch(dev); | |
1513 | ||
1514 | return NULL; | |
1515 | } | |
1516 | ||
1517 | /** | |
1518 | * tb_switch_find_by_link_depth() - Find switch by UUID | |
1519 | * @tb: Domain the switch belongs | |
1520 | * @uuid: UUID to look for | |
1521 | * | |
1522 | * Returned switch has reference count increased so the caller needs to | |
1523 | * call tb_switch_put() when done with the switch. | |
1524 | */ | |
7c39ffe7 | 1525 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) |
f67cf491 MW |
1526 | { |
1527 | struct tb_sw_lookup lookup; | |
1528 | struct device *dev; | |
1529 | ||
1530 | memset(&lookup, 0, sizeof(lookup)); | |
1531 | lookup.tb = tb; | |
1532 | lookup.uuid = uuid; | |
1533 | ||
1534 | dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); | |
1535 | if (dev) | |
1536 | return tb_to_switch(dev); | |
1537 | ||
1538 | return NULL; | |
1539 | } | |
e6b245cc MW |
1540 | |
1541 | void tb_switch_exit(void) | |
1542 | { | |
1543 | ida_destroy(&nvm_ida); | |
1544 | } |