]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/thunderbolt/usb4.c
7bedebfef4f647054bd490bed99cb0055c1e632b
[mirror_ubuntu-hirsute-kernel.git] / drivers / thunderbolt / usb4.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * USB4 specific functionality
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
8 */
9
10 #include <linux/delay.h>
11 #include <linux/ktime.h>
12
13 #include "sb_regs.h"
14 #include "tb.h"
15
16 #define USB4_DATA_DWORDS 16
17 #define USB4_DATA_RETRIES 3
18
19 enum usb4_switch_op {
20 USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
21 USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
22 USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
23 USB4_SWITCH_OP_NVM_WRITE = 0x20,
24 USB4_SWITCH_OP_NVM_AUTH = 0x21,
25 USB4_SWITCH_OP_NVM_READ = 0x22,
26 USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
27 USB4_SWITCH_OP_DROM_READ = 0x24,
28 USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
29 };
30
31 enum usb4_sb_target {
32 USB4_SB_TARGET_ROUTER,
33 USB4_SB_TARGET_PARTNER,
34 USB4_SB_TARGET_RETIMER,
35 };
36
37 #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
38 #define USB4_NVM_READ_OFFSET_SHIFT 2
39 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
40 #define USB4_NVM_READ_LENGTH_SHIFT 24
41
42 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
43 #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
44
45 #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
46 #define USB4_DROM_ADDRESS_SHIFT 2
47 #define USB4_DROM_SIZE_MASK GENMASK(19, 15)
48 #define USB4_DROM_SIZE_SHIFT 15
49
50 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
51
52 typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
53 typedef int (*write_block_fn)(void *, const void *, size_t);
54
55 static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
56 u32 value, int timeout_msec)
57 {
58 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
59
60 do {
61 u32 val;
62 int ret;
63
64 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
65 if (ret)
66 return ret;
67
68 if ((val & bit) == value)
69 return 0;
70
71 usleep_range(50, 100);
72 } while (ktime_before(ktime_get(), timeout));
73
74 return -ETIMEDOUT;
75 }
76
77 static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
78 size_t dwords)
79 {
80 if (dwords > USB4_DATA_DWORDS)
81 return -EINVAL;
82
83 return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
84 }
85
86 static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
87 size_t dwords)
88 {
89 if (dwords > USB4_DATA_DWORDS)
90 return -EINVAL;
91
92 return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
93 }
94
95 static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
96 {
97 return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
98 }
99
100 static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
101 {
102 return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
103 }
104
105 static int usb4_do_read_data(u16 address, void *buf, size_t size,
106 read_block_fn read_block, void *read_block_data)
107 {
108 unsigned int retries = USB4_DATA_RETRIES;
109 unsigned int offset;
110
111 offset = address & 3;
112 address = address & ~3;
113
114 do {
115 size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
116 unsigned int dwaddress, dwords;
117 u8 data[USB4_DATA_DWORDS * 4];
118 int ret;
119
120 dwaddress = address / 4;
121 dwords = ALIGN(nbytes, 4) / 4;
122
123 ret = read_block(read_block_data, dwaddress, data, dwords);
124 if (ret) {
125 if (ret != -ENODEV && retries--)
126 continue;
127 return ret;
128 }
129
130 memcpy(buf, data + offset, nbytes);
131
132 size -= nbytes;
133 address += nbytes;
134 buf += nbytes;
135 } while (size > 0);
136
137 return 0;
138 }
139
140 static int usb4_do_write_data(unsigned int address, const void *buf, size_t size,
141 write_block_fn write_next_block, void *write_block_data)
142 {
143 unsigned int retries = USB4_DATA_RETRIES;
144 unsigned int offset;
145
146 offset = address & 3;
147 address = address & ~3;
148
149 do {
150 u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
151 u8 data[USB4_DATA_DWORDS * 4];
152 int ret;
153
154 memcpy(data + offset, buf, nbytes);
155
156 ret = write_next_block(write_block_data, data, nbytes / 4);
157 if (ret) {
158 if (ret == -ETIMEDOUT) {
159 if (retries--)
160 continue;
161 ret = -EIO;
162 }
163 return ret;
164 }
165
166 size -= nbytes;
167 address += nbytes;
168 buf += nbytes;
169 } while (size > 0);
170
171 return 0;
172 }
173
174 static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
175 {
176 u32 val;
177 int ret;
178
179 val = opcode | ROUTER_CS_26_OV;
180 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
181 if (ret)
182 return ret;
183
184 ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
185 if (ret)
186 return ret;
187
188 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
189 if (ret)
190 return ret;
191
192 if (val & ROUTER_CS_26_ONS)
193 return -EOPNOTSUPP;
194
195 if (status)
196 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
197 ROUTER_CS_26_STATUS_SHIFT;
198 return 0;
199 }
200
201 static void usb4_switch_check_wakes(struct tb_switch *sw)
202 {
203 struct tb_port *port;
204 bool wakeup = false;
205 u32 val;
206
207 if (!device_may_wakeup(&sw->dev))
208 return;
209
210 if (tb_route(sw)) {
211 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
212 return;
213
214 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
215 (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
216 (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
217
218 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
219 }
220
221 /* Check for any connected downstream ports for USB4 wake */
222 tb_switch_for_each_port(sw, port) {
223 if (!tb_port_has_remote(port))
224 continue;
225
226 if (tb_port_read(port, &val, TB_CFG_PORT,
227 port->cap_usb4 + PORT_CS_18, 1))
228 break;
229
230 tb_port_dbg(port, "USB4 wake: %s\n",
231 (val & PORT_CS_18_WOU4S) ? "yes" : "no");
232
233 if (val & PORT_CS_18_WOU4S)
234 wakeup = true;
235 }
236
237 if (wakeup)
238 pm_wakeup_event(&sw->dev, 0);
239 }
240
241 static bool link_is_usb4(struct tb_port *port)
242 {
243 u32 val;
244
245 if (!port->cap_usb4)
246 return false;
247
248 if (tb_port_read(port, &val, TB_CFG_PORT,
249 port->cap_usb4 + PORT_CS_18, 1))
250 return false;
251
252 return !(val & PORT_CS_18_TCM);
253 }
254
255 /**
256 * usb4_switch_setup() - Additional setup for USB4 device
257 * @sw: USB4 router to setup
258 *
259 * USB4 routers need additional settings in order to enable all the
260 * tunneling. This function enables USB and PCIe tunneling if it can be
261 * enabled (e.g the parent switch also supports them). If USB tunneling
262 * is not available for some reason (like that there is Thunderbolt 3
263 * switch upstream) then the internal xHCI controller is enabled
264 * instead.
265 */
266 int usb4_switch_setup(struct tb_switch *sw)
267 {
268 struct tb_port *downstream_port;
269 struct tb_switch *parent;
270 bool tbt3, xhci;
271 u32 val = 0;
272 int ret;
273
274 usb4_switch_check_wakes(sw);
275
276 if (!tb_route(sw))
277 return 0;
278
279 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
280 if (ret)
281 return ret;
282
283 parent = tb_switch_parent(sw);
284 downstream_port = tb_port_at(tb_route(sw), parent);
285 sw->link_usb4 = link_is_usb4(downstream_port);
286 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
287
288 xhci = val & ROUTER_CS_6_HCI;
289 tbt3 = !(val & ROUTER_CS_6_TNS);
290
291 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
292 tbt3 ? "yes" : "no", xhci ? "yes" : "no");
293
294 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
295 if (ret)
296 return ret;
297
298 if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
299 val |= ROUTER_CS_5_UTO;
300 xhci = false;
301 }
302
303 /* Only enable PCIe tunneling if the parent router supports it */
304 if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
305 val |= ROUTER_CS_5_PTO;
306 /*
307 * xHCI can be enabled if PCIe tunneling is supported
308 * and the parent does not have any USB3 dowstream
309 * adapters (so we cannot do USB 3.x tunneling).
310 */
311 if (xhci)
312 val |= ROUTER_CS_5_HCO;
313 }
314
315 /* TBT3 supported by the CM */
316 val |= ROUTER_CS_5_C3S;
317 /* Tunneling configuration is ready now */
318 val |= ROUTER_CS_5_CV;
319
320 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
321 if (ret)
322 return ret;
323
324 return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
325 ROUTER_CS_6_CR, 50);
326 }
327
328 /**
329 * usb4_switch_read_uid() - Read UID from USB4 router
330 * @sw: USB4 router
331 * @uid: UID is stored here
332 *
333 * Reads 64-bit UID from USB4 router config space.
334 */
335 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
336 {
337 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
338 }
339
340 static int usb4_switch_drom_read_block(void *data,
341 unsigned int dwaddress, void *buf,
342 size_t dwords)
343 {
344 struct tb_switch *sw = data;
345 u8 status = 0;
346 u32 metadata;
347 int ret;
348
349 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
350 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
351 USB4_DROM_ADDRESS_MASK;
352
353 ret = usb4_switch_op_write_metadata(sw, metadata);
354 if (ret)
355 return ret;
356
357 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
358 if (ret)
359 return ret;
360
361 if (status)
362 return -EIO;
363
364 return usb4_switch_op_read_data(sw, buf, dwords);
365 }
366
367 /**
368 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
369 * @sw: USB4 router
370 * @address: Byte address inside DROM to start reading
371 * @buf: Buffer where the DROM content is stored
372 * @size: Number of bytes to read from DROM
373 *
374 * Uses USB4 router operations to read router DROM. For devices this
375 * should always work but for hosts it may return %-EOPNOTSUPP in which
376 * case the host router does not have DROM.
377 */
378 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
379 size_t size)
380 {
381 return usb4_do_read_data(address, buf, size,
382 usb4_switch_drom_read_block, sw);
383 }
384
385 /**
386 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
387 * @sw: USB4 router
388 *
389 * Checks whether conditions are met so that lane bonding can be
390 * established with the upstream router. Call only for device routers.
391 */
392 bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
393 {
394 struct tb_port *up;
395 int ret;
396 u32 val;
397
398 up = tb_upstream_port(sw);
399 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
400 if (ret)
401 return false;
402
403 return !!(val & PORT_CS_18_BE);
404 }
405
406 /**
407 * usb4_switch_set_wake() - Enabled/disable wake
408 * @sw: USB4 router
409 * @flags: Wakeup flags (%0 to disable)
410 *
411 * Enables/disables router to wake up from sleep.
412 */
413 int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
414 {
415 struct tb_port *port;
416 u64 route = tb_route(sw);
417 u32 val;
418 int ret;
419
420 /*
421 * Enable wakes coming from all USB4 downstream ports (from
422 * child routers). For device routers do this also for the
423 * upstream USB4 port.
424 */
425 tb_switch_for_each_port(sw, port) {
426 if (!tb_port_is_null(port))
427 continue;
428 if (!route && tb_is_upstream_port(port))
429 continue;
430 if (!port->cap_usb4)
431 continue;
432
433 ret = tb_port_read(port, &val, TB_CFG_PORT,
434 port->cap_usb4 + PORT_CS_19, 1);
435 if (ret)
436 return ret;
437
438 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
439
440 if (flags & TB_WAKE_ON_CONNECT)
441 val |= PORT_CS_19_WOC;
442 if (flags & TB_WAKE_ON_DISCONNECT)
443 val |= PORT_CS_19_WOD;
444 if (flags & TB_WAKE_ON_USB4)
445 val |= PORT_CS_19_WOU4;
446
447 ret = tb_port_write(port, &val, TB_CFG_PORT,
448 port->cap_usb4 + PORT_CS_19, 1);
449 if (ret)
450 return ret;
451 }
452
453 /*
454 * Enable wakes from PCIe and USB 3.x on this router. Only
455 * needed for device routers.
456 */
457 if (route) {
458 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
459 if (ret)
460 return ret;
461
462 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU);
463 if (flags & TB_WAKE_ON_USB3)
464 val |= ROUTER_CS_5_WOU;
465 if (flags & TB_WAKE_ON_PCIE)
466 val |= ROUTER_CS_5_WOP;
467
468 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
469 if (ret)
470 return ret;
471 }
472
473 return 0;
474 }
475
476 /**
477 * usb4_switch_set_sleep() - Prepare the router to enter sleep
478 * @sw: USB4 router
479 *
480 * Sets sleep bit for the router. Returns when the router sleep ready
481 * bit has been asserted.
482 */
483 int usb4_switch_set_sleep(struct tb_switch *sw)
484 {
485 int ret;
486 u32 val;
487
488 /* Set sleep bit and wait for sleep ready to be asserted */
489 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
490 if (ret)
491 return ret;
492
493 val |= ROUTER_CS_5_SLP;
494
495 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
496 if (ret)
497 return ret;
498
499 return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
500 ROUTER_CS_6_SLPR, 500);
501 }
502
503 /**
504 * usb4_switch_nvm_sector_size() - Return router NVM sector size
505 * @sw: USB4 router
506 *
507 * If the router supports NVM operations this function returns the NVM
508 * sector size in bytes. If NVM operations are not supported returns
509 * %-EOPNOTSUPP.
510 */
511 int usb4_switch_nvm_sector_size(struct tb_switch *sw)
512 {
513 u32 metadata;
514 u8 status;
515 int ret;
516
517 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
518 if (ret)
519 return ret;
520
521 if (status)
522 return status == 0x2 ? -EOPNOTSUPP : -EIO;
523
524 ret = usb4_switch_op_read_metadata(sw, &metadata);
525 if (ret)
526 return ret;
527
528 return metadata & USB4_NVM_SECTOR_SIZE_MASK;
529 }
530
531 static int usb4_switch_nvm_read_block(void *data,
532 unsigned int dwaddress, void *buf, size_t dwords)
533 {
534 struct tb_switch *sw = data;
535 u8 status = 0;
536 u32 metadata;
537 int ret;
538
539 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
540 USB4_NVM_READ_LENGTH_MASK;
541 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
542 USB4_NVM_READ_OFFSET_MASK;
543
544 ret = usb4_switch_op_write_metadata(sw, metadata);
545 if (ret)
546 return ret;
547
548 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
549 if (ret)
550 return ret;
551
552 if (status)
553 return -EIO;
554
555 return usb4_switch_op_read_data(sw, buf, dwords);
556 }
557
558 /**
559 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
560 * @sw: USB4 router
561 * @address: Starting address in bytes
562 * @buf: Read data is placed here
563 * @size: How many bytes to read
564 *
565 * Reads NVM contents of the router. If NVM is not supported returns
566 * %-EOPNOTSUPP.
567 */
568 int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
569 size_t size)
570 {
571 return usb4_do_read_data(address, buf, size,
572 usb4_switch_nvm_read_block, sw);
573 }
574
575 static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
576 unsigned int address)
577 {
578 u32 metadata, dwaddress;
579 u8 status = 0;
580 int ret;
581
582 dwaddress = address / 4;
583 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
584 USB4_NVM_SET_OFFSET_MASK;
585
586 ret = usb4_switch_op_write_metadata(sw, metadata);
587 if (ret)
588 return ret;
589
590 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
591 if (ret)
592 return ret;
593
594 return status ? -EIO : 0;
595 }
596
597 static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
598 size_t dwords)
599 {
600 struct tb_switch *sw = data;
601 u8 status;
602 int ret;
603
604 ret = usb4_switch_op_write_data(sw, buf, dwords);
605 if (ret)
606 return ret;
607
608 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
609 if (ret)
610 return ret;
611
612 return status ? -EIO : 0;
613 }
614
615 /**
616 * usb4_switch_nvm_write() - Write to the router NVM
617 * @sw: USB4 router
618 * @address: Start address where to write in bytes
619 * @buf: Pointer to the data to write
620 * @size: Size of @buf in bytes
621 *
622 * Writes @buf to the router NVM using USB4 router operations. If NVM
623 * write is not supported returns %-EOPNOTSUPP.
624 */
625 int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
626 const void *buf, size_t size)
627 {
628 int ret;
629
630 ret = usb4_switch_nvm_set_offset(sw, address);
631 if (ret)
632 return ret;
633
634 return usb4_do_write_data(address, buf, size,
635 usb4_switch_nvm_write_next_block, sw);
636 }
637
638 /**
639 * usb4_switch_nvm_authenticate() - Authenticate new NVM
640 * @sw: USB4 router
641 *
642 * After the new NVM has been written via usb4_switch_nvm_write(), this
643 * function triggers NVM authentication process. The router gets power
644 * cycled and if the authentication is successful the new NVM starts
645 * running. In case of failure returns negative errno.
646 *
647 * The caller should call usb4_switch_nvm_authenticate_status() to read
648 * the status of the authentication after power cycle. It should be the
649 * first router operation to avoid the status being lost.
650 */
651 int usb4_switch_nvm_authenticate(struct tb_switch *sw)
652 {
653 int ret;
654
655 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL);
656 switch (ret) {
657 /*
658 * The router is power cycled once NVM_AUTH is started so it is
659 * expected to get any of the following errors back.
660 */
661 case -EACCES:
662 case -ENOTCONN:
663 case -ETIMEDOUT:
664 return 0;
665
666 default:
667 return ret;
668 }
669 }
670
671 /**
672 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
673 * @sw: USB4 router
674 * @status: Status code of the operation
675 *
676 * The function checks if there is status available from the last NVM
677 * authenticate router operation. If there is status then %0 is returned
678 * and the status code is placed in @status. Returns negative errno in case
679 * of failure.
680 *
681 * Must be called before any other router operation.
682 */
683 int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
684 {
685 u16 opcode;
686 u32 val;
687 int ret;
688
689 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
690 if (ret)
691 return ret;
692
693 /* Check that the opcode is correct */
694 opcode = val & ROUTER_CS_26_OPCODE_MASK;
695 if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
696 if (val & ROUTER_CS_26_OV)
697 return -EBUSY;
698 if (val & ROUTER_CS_26_ONS)
699 return -EOPNOTSUPP;
700
701 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
702 ROUTER_CS_26_STATUS_SHIFT;
703 } else {
704 *status = 0;
705 }
706
707 return 0;
708 }
709
710 /**
711 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
712 * @sw: USB4 router
713 * @in: DP IN adapter
714 *
715 * For DP tunneling this function can be used to query availability of
716 * DP IN resource. Returns true if the resource is available for DP
717 * tunneling, false otherwise.
718 */
719 bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
720 {
721 u8 status;
722 int ret;
723
724 ret = usb4_switch_op_write_metadata(sw, in->port);
725 if (ret)
726 return false;
727
728 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
729 /*
730 * If DP resource allocation is not supported assume it is
731 * always available.
732 */
733 if (ret == -EOPNOTSUPP)
734 return true;
735 else if (ret)
736 return false;
737
738 return !status;
739 }
740
741 /**
742 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
743 * @sw: USB4 router
744 * @in: DP IN adapter
745 *
746 * Allocates DP IN resource for DP tunneling using USB4 router
747 * operations. If the resource was allocated returns %0. Otherwise
748 * returns negative errno, in particular %-EBUSY if the resource is
749 * already allocated.
750 */
751 int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
752 {
753 u8 status;
754 int ret;
755
756 ret = usb4_switch_op_write_metadata(sw, in->port);
757 if (ret)
758 return ret;
759
760 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
761 if (ret == -EOPNOTSUPP)
762 return 0;
763 else if (ret)
764 return ret;
765
766 return status ? -EBUSY : 0;
767 }
768
769 /**
770 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
771 * @sw: USB4 router
772 * @in: DP IN adapter
773 *
774 * Releases the previously allocated DP IN resource.
775 */
776 int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
777 {
778 u8 status;
779 int ret;
780
781 ret = usb4_switch_op_write_metadata(sw, in->port);
782 if (ret)
783 return ret;
784
785 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
786 if (ret == -EOPNOTSUPP)
787 return 0;
788 else if (ret)
789 return ret;
790
791 return status ? -EIO : 0;
792 }
793
794 static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
795 {
796 struct tb_port *p;
797 int usb4_idx = 0;
798
799 /* Assume port is primary */
800 tb_switch_for_each_port(sw, p) {
801 if (!tb_port_is_null(p))
802 continue;
803 if (tb_is_upstream_port(p))
804 continue;
805 if (!p->link_nr) {
806 if (p == port)
807 break;
808 usb4_idx++;
809 }
810 }
811
812 return usb4_idx;
813 }
814
815 /**
816 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
817 * @sw: USB4 router
818 * @port: USB4 port
819 *
820 * USB4 routers have direct mapping between USB4 ports and PCIe
821 * downstream adapters where the PCIe topology is extended. This
822 * function returns the corresponding downstream PCIe adapter or %NULL
823 * if no such mapping was possible.
824 */
825 struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
826 const struct tb_port *port)
827 {
828 int usb4_idx = usb4_port_idx(sw, port);
829 struct tb_port *p;
830 int pcie_idx = 0;
831
832 /* Find PCIe down port matching usb4_port */
833 tb_switch_for_each_port(sw, p) {
834 if (!tb_port_is_pcie_down(p))
835 continue;
836
837 if (pcie_idx == usb4_idx)
838 return p;
839
840 pcie_idx++;
841 }
842
843 return NULL;
844 }
845
846 /**
847 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
848 * @sw: USB4 router
849 * @port: USB4 port
850 *
851 * USB4 routers have direct mapping between USB4 ports and USB 3.x
852 * downstream adapters where the USB 3.x topology is extended. This
853 * function returns the corresponding downstream USB 3.x adapter or
854 * %NULL if no such mapping was possible.
855 */
856 struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
857 const struct tb_port *port)
858 {
859 int usb4_idx = usb4_port_idx(sw, port);
860 struct tb_port *p;
861 int usb_idx = 0;
862
863 /* Find USB3 down port matching usb4_port */
864 tb_switch_for_each_port(sw, p) {
865 if (!tb_port_is_usb3_down(p))
866 continue;
867
868 if (usb_idx == usb4_idx)
869 return p;
870
871 usb_idx++;
872 }
873
874 return NULL;
875 }
876
877 /**
878 * usb4_port_unlock() - Unlock USB4 downstream port
879 * @port: USB4 port to unlock
880 *
881 * Unlocks USB4 downstream port so that the connection manager can
882 * access the router below this port.
883 */
884 int usb4_port_unlock(struct tb_port *port)
885 {
886 int ret;
887 u32 val;
888
889 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
890 if (ret)
891 return ret;
892
893 val &= ~ADP_CS_4_LCK;
894 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
895 }
896
897 static int usb4_port_set_configured(struct tb_port *port, bool configured)
898 {
899 int ret;
900 u32 val;
901
902 if (!port->cap_usb4)
903 return -EINVAL;
904
905 ret = tb_port_read(port, &val, TB_CFG_PORT,
906 port->cap_usb4 + PORT_CS_19, 1);
907 if (ret)
908 return ret;
909
910 if (configured)
911 val |= PORT_CS_19_PC;
912 else
913 val &= ~PORT_CS_19_PC;
914
915 return tb_port_write(port, &val, TB_CFG_PORT,
916 port->cap_usb4 + PORT_CS_19, 1);
917 }
918
919 /**
920 * usb4_port_configure() - Set USB4 port configured
921 * @port: USB4 router
922 *
923 * Sets the USB4 link to be configured for power management purposes.
924 */
925 int usb4_port_configure(struct tb_port *port)
926 {
927 return usb4_port_set_configured(port, true);
928 }
929
930 /**
931 * usb4_port_unconfigure() - Set USB4 port unconfigured
932 * @port: USB4 router
933 *
934 * Sets the USB4 link to be unconfigured for power management purposes.
935 */
936 void usb4_port_unconfigure(struct tb_port *port)
937 {
938 usb4_port_set_configured(port, false);
939 }
940
941 static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
942 {
943 int ret;
944 u32 val;
945
946 if (!port->cap_usb4)
947 return -EINVAL;
948
949 ret = tb_port_read(port, &val, TB_CFG_PORT,
950 port->cap_usb4 + PORT_CS_19, 1);
951 if (ret)
952 return ret;
953
954 if (configured)
955 val |= PORT_CS_19_PID;
956 else
957 val &= ~PORT_CS_19_PID;
958
959 return tb_port_write(port, &val, TB_CFG_PORT,
960 port->cap_usb4 + PORT_CS_19, 1);
961 }
962
963 /**
964 * usb4_port_configure_xdomain() - Configure port for XDomain
965 * @port: USB4 port connected to another host
966 *
967 * Marks the USB4 port as being connected to another host. Returns %0 in
968 * success and negative errno in failure.
969 */
970 int usb4_port_configure_xdomain(struct tb_port *port)
971 {
972 return usb4_set_xdomain_configured(port, true);
973 }
974
975 /**
976 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
977 * @port: USB4 port that was connected to another host
978 *
979 * Clears USB4 port from being marked as XDomain.
980 */
981 void usb4_port_unconfigure_xdomain(struct tb_port *port)
982 {
983 usb4_set_xdomain_configured(port, false);
984 }
985
986 static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
987 u32 value, int timeout_msec)
988 {
989 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
990
991 do {
992 u32 val;
993 int ret;
994
995 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
996 if (ret)
997 return ret;
998
999 if ((val & bit) == value)
1000 return 0;
1001
1002 usleep_range(50, 100);
1003 } while (ktime_before(ktime_get(), timeout));
1004
1005 return -ETIMEDOUT;
1006 }
1007
1008 static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
1009 {
1010 if (dwords > USB4_DATA_DWORDS)
1011 return -EINVAL;
1012
1013 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1014 dwords);
1015 }
1016
1017 static int usb4_port_write_data(struct tb_port *port, const void *data,
1018 size_t dwords)
1019 {
1020 if (dwords > USB4_DATA_DWORDS)
1021 return -EINVAL;
1022
1023 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1024 dwords);
1025 }
1026
1027 static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
1028 u8 index, u8 reg, void *buf, u8 size)
1029 {
1030 size_t dwords = DIV_ROUND_UP(size, 4);
1031 int ret;
1032 u32 val;
1033
1034 if (!port->cap_usb4)
1035 return -EINVAL;
1036
1037 val = reg;
1038 val |= size << PORT_CS_1_LENGTH_SHIFT;
1039 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1040 if (target == USB4_SB_TARGET_RETIMER)
1041 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1042 val |= PORT_CS_1_PND;
1043
1044 ret = tb_port_write(port, &val, TB_CFG_PORT,
1045 port->cap_usb4 + PORT_CS_1, 1);
1046 if (ret)
1047 return ret;
1048
1049 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1050 PORT_CS_1_PND, 0, 500);
1051 if (ret)
1052 return ret;
1053
1054 ret = tb_port_read(port, &val, TB_CFG_PORT,
1055 port->cap_usb4 + PORT_CS_1, 1);
1056 if (ret)
1057 return ret;
1058
1059 if (val & PORT_CS_1_NR)
1060 return -ENODEV;
1061 if (val & PORT_CS_1_RC)
1062 return -EIO;
1063
1064 return buf ? usb4_port_read_data(port, buf, dwords) : 0;
1065 }
1066
1067 static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
1068 u8 index, u8 reg, const void *buf, u8 size)
1069 {
1070 size_t dwords = DIV_ROUND_UP(size, 4);
1071 int ret;
1072 u32 val;
1073
1074 if (!port->cap_usb4)
1075 return -EINVAL;
1076
1077 if (buf) {
1078 ret = usb4_port_write_data(port, buf, dwords);
1079 if (ret)
1080 return ret;
1081 }
1082
1083 val = reg;
1084 val |= size << PORT_CS_1_LENGTH_SHIFT;
1085 val |= PORT_CS_1_WNR_WRITE;
1086 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1087 if (target == USB4_SB_TARGET_RETIMER)
1088 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1089 val |= PORT_CS_1_PND;
1090
1091 ret = tb_port_write(port, &val, TB_CFG_PORT,
1092 port->cap_usb4 + PORT_CS_1, 1);
1093 if (ret)
1094 return ret;
1095
1096 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1097 PORT_CS_1_PND, 0, 500);
1098 if (ret)
1099 return ret;
1100
1101 ret = tb_port_read(port, &val, TB_CFG_PORT,
1102 port->cap_usb4 + PORT_CS_1, 1);
1103 if (ret)
1104 return ret;
1105
1106 if (val & PORT_CS_1_NR)
1107 return -ENODEV;
1108 if (val & PORT_CS_1_RC)
1109 return -EIO;
1110
1111 return 0;
1112 }
1113
1114 static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
1115 u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
1116 {
1117 ktime_t timeout;
1118 u32 val;
1119 int ret;
1120
1121 val = opcode;
1122 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
1123 sizeof(val));
1124 if (ret)
1125 return ret;
1126
1127 timeout = ktime_add_ms(ktime_get(), timeout_msec);
1128
1129 do {
1130 /* Check results */
1131 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
1132 &val, sizeof(val));
1133 if (ret)
1134 return ret;
1135
1136 switch (val) {
1137 case 0:
1138 return 0;
1139
1140 case USB4_SB_OPCODE_ERR:
1141 return -EAGAIN;
1142
1143 case USB4_SB_OPCODE_ONS:
1144 return -EOPNOTSUPP;
1145
1146 default:
1147 if (val != opcode)
1148 return -EIO;
1149 break;
1150 }
1151 } while (ktime_before(ktime_get(), timeout));
1152
1153 return -ETIMEDOUT;
1154 }
1155
1156 /**
1157 * usb4_port_enumerate_retimers() - Send RT broadcast transaction
1158 * @port: USB4 port
1159 *
1160 * This forces the USB4 port to send broadcast RT transaction which
1161 * makes the retimers on the link to assign index to themselves. Returns
1162 * %0 in case of success and negative errno if there was an error.
1163 */
1164 int usb4_port_enumerate_retimers(struct tb_port *port)
1165 {
1166 u32 val;
1167
1168 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
1169 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1170 USB4_SB_OPCODE, &val, sizeof(val));
1171 }
1172
1173 static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
1174 enum usb4_sb_opcode opcode,
1175 int timeout_msec)
1176 {
1177 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
1178 timeout_msec);
1179 }
1180
1181 /**
1182 * usb4_port_retimer_read() - Read from retimer sideband registers
1183 * @port: USB4 port
1184 * @index: Retimer index
1185 * @reg: Sideband register to read
1186 * @buf: Data from @reg is stored here
1187 * @size: Number of bytes to read
1188 *
1189 * Function reads retimer sideband registers starting from @reg. The
1190 * retimer is connected to @port at @index. Returns %0 in case of
1191 * success, and read data is copied to @buf. If there is no retimer
1192 * present at given @index returns %-ENODEV. In any other failure
1193 * returns negative errno.
1194 */
1195 int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1196 u8 size)
1197 {
1198 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1199 size);
1200 }
1201
1202 /**
1203 * usb4_port_retimer_write() - Write to retimer sideband registers
1204 * @port: USB4 port
1205 * @index: Retimer index
1206 * @reg: Sideband register to write
1207 * @buf: Data that is written starting from @reg
1208 * @size: Number of bytes to write
1209 *
1210 * Writes retimer sideband registers starting from @reg. The retimer is
1211 * connected to @port at @index. Returns %0 in case of success. If there
1212 * is no retimer present at given @index returns %-ENODEV. In any other
1213 * failure returns negative errno.
1214 */
1215 int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1216 const void *buf, u8 size)
1217 {
1218 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1219 size);
1220 }
1221
1222 /**
1223 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1224 * @port: USB4 port
1225 * @index: Retimer index
1226 *
1227 * If the retimer at @index is last one (connected directly to the
1228 * Type-C port) this function returns %1. If it is not returns %0. If
1229 * the retimer is not present returns %-ENODEV. Otherwise returns
1230 * negative errno.
1231 */
1232 int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1233 {
1234 u32 metadata;
1235 int ret;
1236
1237 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1238 500);
1239 if (ret)
1240 return ret;
1241
1242 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1243 sizeof(metadata));
1244 return ret ? ret : metadata & 1;
1245 }
1246
1247 /**
1248 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1249 * @port: USB4 port
1250 * @index: Retimer index
1251 *
1252 * Reads NVM sector size (in bytes) of a retimer at @index. This
1253 * operation can be used to determine whether the retimer supports NVM
1254 * upgrade for example. Returns sector size in bytes or negative errno
1255 * in case of error. Specifically returns %-ENODEV if there is no
1256 * retimer at @index.
1257 */
1258 int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1259 {
1260 u32 metadata;
1261 int ret;
1262
1263 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1264 500);
1265 if (ret)
1266 return ret;
1267
1268 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1269 sizeof(metadata));
1270 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1271 }
1272
1273 static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1274 unsigned int address)
1275 {
1276 u32 metadata, dwaddress;
1277 int ret;
1278
1279 dwaddress = address / 4;
1280 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1281 USB4_NVM_SET_OFFSET_MASK;
1282
1283 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1284 sizeof(metadata));
1285 if (ret)
1286 return ret;
1287
1288 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1289 500);
1290 }
1291
1292 struct retimer_info {
1293 struct tb_port *port;
1294 u8 index;
1295 };
1296
1297 static int usb4_port_retimer_nvm_write_next_block(void *data, const void *buf,
1298 size_t dwords)
1299
1300 {
1301 const struct retimer_info *info = data;
1302 struct tb_port *port = info->port;
1303 u8 index = info->index;
1304 int ret;
1305
1306 ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
1307 buf, dwords * 4);
1308 if (ret)
1309 return ret;
1310
1311 return usb4_port_retimer_op(port, index,
1312 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1313 }
1314
1315 /**
1316 * usb4_port_retimer_nvm_write() - Write to retimer NVM
1317 * @port: USB4 port
1318 * @index: Retimer index
1319 * @address: Byte address where to start the write
1320 * @buf: Data to write
1321 * @size: Size in bytes how much to write
1322 *
1323 * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1324 * upgrade. Returns %0 if the data was written successfully and negative
1325 * errno in case of failure. Specifically returns %-ENODEV if there is
1326 * no retimer at @index.
1327 */
1328 int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1329 const void *buf, size_t size)
1330 {
1331 struct retimer_info info = { .port = port, .index = index };
1332 int ret;
1333
1334 ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1335 if (ret)
1336 return ret;
1337
1338 return usb4_do_write_data(address, buf, size,
1339 usb4_port_retimer_nvm_write_next_block, &info);
1340 }
1341
1342 /**
1343 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1344 * @port: USB4 port
1345 * @index: Retimer index
1346 *
1347 * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1348 * this function can be used to trigger the NVM upgrade process. If
1349 * successful the retimer restarts with the new NVM and may not have the
1350 * index set so one needs to call usb4_port_enumerate_retimers() to
1351 * force index to be assigned.
1352 */
1353 int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1354 {
1355 u32 val;
1356
1357 /*
1358 * We need to use the raw operation here because once the
1359 * authentication completes the retimer index is not set anymore
1360 * so we do not get back the status now.
1361 */
1362 val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
1363 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1364 USB4_SB_OPCODE, &val, sizeof(val));
1365 }
1366
1367 /**
1368 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
1369 * @port: USB4 port
1370 * @index: Retimer index
1371 * @status: Raw status code read from metadata
1372 *
1373 * This can be called after usb4_port_retimer_nvm_authenticate() and
1374 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
1375 *
1376 * Returns %0 if the authentication status was successfully read. The
1377 * completion metadata (the result) is then stored into @status. If
1378 * reading the status fails, returns negative errno.
1379 */
1380 int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1381 u32 *status)
1382 {
1383 u32 metadata, val;
1384 int ret;
1385
1386 ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
1387 sizeof(val));
1388 if (ret)
1389 return ret;
1390
1391 switch (val) {
1392 case 0:
1393 *status = 0;
1394 return 0;
1395
1396 case USB4_SB_OPCODE_ERR:
1397 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
1398 &metadata, sizeof(metadata));
1399 if (ret)
1400 return ret;
1401
1402 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
1403 return 0;
1404
1405 case USB4_SB_OPCODE_ONS:
1406 return -EOPNOTSUPP;
1407
1408 default:
1409 return -EIO;
1410 }
1411 }
1412
1413 static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
1414 void *buf, size_t dwords)
1415 {
1416 const struct retimer_info *info = data;
1417 struct tb_port *port = info->port;
1418 u8 index = info->index;
1419 u32 metadata;
1420 int ret;
1421
1422 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
1423 if (dwords < USB4_DATA_DWORDS)
1424 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
1425
1426 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1427 sizeof(metadata));
1428 if (ret)
1429 return ret;
1430
1431 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
1432 if (ret)
1433 return ret;
1434
1435 return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
1436 dwords * 4);
1437 }
1438
1439 /**
1440 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
1441 * @port: USB4 port
1442 * @index: Retimer index
1443 * @address: NVM address (in bytes) to start reading
1444 * @buf: Data read from NVM is stored here
1445 * @size: Number of bytes to read
1446 *
1447 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
1448 * read was successful and negative errno in case of failure.
1449 * Specifically returns %-ENODEV if there is no retimer at @index.
1450 */
1451 int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1452 unsigned int address, void *buf, size_t size)
1453 {
1454 struct retimer_info info = { .port = port, .index = index };
1455
1456 return usb4_do_read_data(address, buf, size,
1457 usb4_port_retimer_nvm_read_block, &info);
1458 }
1459
1460 /**
1461 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
1462 * @port: USB3 adapter port
1463 *
1464 * Return maximum supported link rate of a USB3 adapter in Mb/s.
1465 * Negative errno in case of error.
1466 */
1467 int usb4_usb3_port_max_link_rate(struct tb_port *port)
1468 {
1469 int ret, lr;
1470 u32 val;
1471
1472 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1473 return -EINVAL;
1474
1475 ret = tb_port_read(port, &val, TB_CFG_PORT,
1476 port->cap_adap + ADP_USB3_CS_4, 1);
1477 if (ret)
1478 return ret;
1479
1480 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
1481 return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
1482 }
1483
1484 /**
1485 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
1486 * @port: USB3 adapter port
1487 *
1488 * Return actual established link rate of a USB3 adapter in Mb/s. If the
1489 * link is not up returns %0 and negative errno in case of failure.
1490 */
1491 int usb4_usb3_port_actual_link_rate(struct tb_port *port)
1492 {
1493 int ret, lr;
1494 u32 val;
1495
1496 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1497 return -EINVAL;
1498
1499 ret = tb_port_read(port, &val, TB_CFG_PORT,
1500 port->cap_adap + ADP_USB3_CS_4, 1);
1501 if (ret)
1502 return ret;
1503
1504 if (!(val & ADP_USB3_CS_4_ULV))
1505 return 0;
1506
1507 lr = val & ADP_USB3_CS_4_ALR_MASK;
1508 return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
1509 }
1510
1511 static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
1512 {
1513 int ret;
1514 u32 val;
1515
1516 if (!tb_port_is_usb3_down(port))
1517 return -EINVAL;
1518 if (tb_route(port->sw))
1519 return -EINVAL;
1520
1521 ret = tb_port_read(port, &val, TB_CFG_PORT,
1522 port->cap_adap + ADP_USB3_CS_2, 1);
1523 if (ret)
1524 return ret;
1525
1526 if (request)
1527 val |= ADP_USB3_CS_2_CMR;
1528 else
1529 val &= ~ADP_USB3_CS_2_CMR;
1530
1531 ret = tb_port_write(port, &val, TB_CFG_PORT,
1532 port->cap_adap + ADP_USB3_CS_2, 1);
1533 if (ret)
1534 return ret;
1535
1536 /*
1537 * We can use val here directly as the CMR bit is in the same place
1538 * as HCA. Just mask out others.
1539 */
1540 val &= ADP_USB3_CS_2_CMR;
1541 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
1542 ADP_USB3_CS_1_HCA, val, 1500);
1543 }
1544
1545 static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
1546 {
1547 return usb4_usb3_port_cm_request(port, true);
1548 }
1549
1550 static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
1551 {
1552 return usb4_usb3_port_cm_request(port, false);
1553 }
1554
1555 static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
1556 {
1557 unsigned long uframes;
1558
1559 uframes = bw * 512UL << scale;
1560 return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
1561 }
1562
1563 static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
1564 {
1565 unsigned long uframes;
1566
1567 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
1568 uframes = ((unsigned long)mbps * 1000 * 1000) / 8000;
1569 return DIV_ROUND_UP(uframes, 512UL << scale);
1570 }
1571
1572 static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
1573 int *upstream_bw,
1574 int *downstream_bw)
1575 {
1576 u32 val, bw, scale;
1577 int ret;
1578
1579 ret = tb_port_read(port, &val, TB_CFG_PORT,
1580 port->cap_adap + ADP_USB3_CS_2, 1);
1581 if (ret)
1582 return ret;
1583
1584 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1585 port->cap_adap + ADP_USB3_CS_3, 1);
1586 if (ret)
1587 return ret;
1588
1589 scale &= ADP_USB3_CS_3_SCALE_MASK;
1590
1591 bw = val & ADP_USB3_CS_2_AUBW_MASK;
1592 *upstream_bw = usb3_bw_to_mbps(bw, scale);
1593
1594 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
1595 *downstream_bw = usb3_bw_to_mbps(bw, scale);
1596
1597 return 0;
1598 }
1599
1600 /**
1601 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
1602 * @port: USB3 adapter port
1603 * @upstream_bw: Allocated upstream bandwidth is stored here
1604 * @downstream_bw: Allocated downstream bandwidth is stored here
1605 *
1606 * Stores currently allocated USB3 bandwidth into @upstream_bw and
1607 * @downstream_bw in Mb/s. Returns %0 in case of success and negative
1608 * errno in failure.
1609 */
1610 int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
1611 int *downstream_bw)
1612 {
1613 int ret;
1614
1615 ret = usb4_usb3_port_set_cm_request(port);
1616 if (ret)
1617 return ret;
1618
1619 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
1620 downstream_bw);
1621 usb4_usb3_port_clear_cm_request(port);
1622
1623 return ret;
1624 }
1625
1626 static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
1627 int *upstream_bw,
1628 int *downstream_bw)
1629 {
1630 u32 val, bw, scale;
1631 int ret;
1632
1633 ret = tb_port_read(port, &val, TB_CFG_PORT,
1634 port->cap_adap + ADP_USB3_CS_1, 1);
1635 if (ret)
1636 return ret;
1637
1638 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1639 port->cap_adap + ADP_USB3_CS_3, 1);
1640 if (ret)
1641 return ret;
1642
1643 scale &= ADP_USB3_CS_3_SCALE_MASK;
1644
1645 bw = val & ADP_USB3_CS_1_CUBW_MASK;
1646 *upstream_bw = usb3_bw_to_mbps(bw, scale);
1647
1648 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
1649 *downstream_bw = usb3_bw_to_mbps(bw, scale);
1650
1651 return 0;
1652 }
1653
1654 static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
1655 int upstream_bw,
1656 int downstream_bw)
1657 {
1658 u32 val, ubw, dbw, scale;
1659 int ret;
1660
1661 /* Read the used scale, hardware default is 0 */
1662 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1663 port->cap_adap + ADP_USB3_CS_3, 1);
1664 if (ret)
1665 return ret;
1666
1667 scale &= ADP_USB3_CS_3_SCALE_MASK;
1668 ubw = mbps_to_usb3_bw(upstream_bw, scale);
1669 dbw = mbps_to_usb3_bw(downstream_bw, scale);
1670
1671 ret = tb_port_read(port, &val, TB_CFG_PORT,
1672 port->cap_adap + ADP_USB3_CS_2, 1);
1673 if (ret)
1674 return ret;
1675
1676 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
1677 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
1678 val |= ubw;
1679
1680 return tb_port_write(port, &val, TB_CFG_PORT,
1681 port->cap_adap + ADP_USB3_CS_2, 1);
1682 }
1683
1684 /**
1685 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
1686 * @port: USB3 adapter port
1687 * @upstream_bw: New upstream bandwidth
1688 * @downstream_bw: New downstream bandwidth
1689 *
1690 * This can be used to set how much bandwidth is allocated for the USB3
1691 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
1692 * new values programmed to the USB3 adapter allocation registers. If
1693 * the values are lower than what is currently consumed the allocation
1694 * is set to what is currently consumed instead (consumed bandwidth
1695 * cannot be taken away by CM). The actual new values are returned in
1696 * @upstream_bw and @downstream_bw.
1697 *
1698 * Returns %0 in case of success and negative errno if there was a
1699 * failure.
1700 */
1701 int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
1702 int *downstream_bw)
1703 {
1704 int ret, consumed_up, consumed_down, allocate_up, allocate_down;
1705
1706 ret = usb4_usb3_port_set_cm_request(port);
1707 if (ret)
1708 return ret;
1709
1710 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1711 &consumed_down);
1712 if (ret)
1713 goto err_request;
1714
1715 /* Don't allow it go lower than what is consumed */
1716 allocate_up = max(*upstream_bw, consumed_up);
1717 allocate_down = max(*downstream_bw, consumed_down);
1718
1719 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
1720 allocate_down);
1721 if (ret)
1722 goto err_request;
1723
1724 *upstream_bw = allocate_up;
1725 *downstream_bw = allocate_down;
1726
1727 err_request:
1728 usb4_usb3_port_clear_cm_request(port);
1729 return ret;
1730 }
1731
1732 /**
1733 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
1734 * @port: USB3 adapter port
1735 * @upstream_bw: New allocated upstream bandwidth
1736 * @downstream_bw: New allocated downstream bandwidth
1737 *
1738 * Releases USB3 allocated bandwidth down to what is actually consumed.
1739 * The new bandwidth is returned in @upstream_bw and @downstream_bw.
1740 *
1741 * Returns 0% in success and negative errno in case of failure.
1742 */
1743 int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
1744 int *downstream_bw)
1745 {
1746 int ret, consumed_up, consumed_down;
1747
1748 ret = usb4_usb3_port_set_cm_request(port);
1749 if (ret)
1750 return ret;
1751
1752 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1753 &consumed_down);
1754 if (ret)
1755 goto err_request;
1756
1757 /*
1758 * Always keep 1000 Mb/s to make sure xHCI has at least some
1759 * bandwidth available for isochronous traffic.
1760 */
1761 if (consumed_up < 1000)
1762 consumed_up = 1000;
1763 if (consumed_down < 1000)
1764 consumed_down = 1000;
1765
1766 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
1767 consumed_down);
1768 if (ret)
1769 goto err_request;
1770
1771 *upstream_bw = consumed_up;
1772 *downstream_bw = consumed_down;
1773
1774 err_request:
1775 usb4_usb3_port_clear_cm_request(port);
1776 return ret;
1777 }