]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/thunderbolt/lc.c
Merge tag 'drm-next-2021-04-30' of git://anongit.freedesktop.org/drm/drm
[mirror_ubuntu-jammy-kernel.git] / drivers / thunderbolt / lc.c
CommitLineData
a9be5582
MW
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt link controller support
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include "tb.h"
10
11/**
12 * tb_lc_read_uuid() - Read switch UUID from link controller common register
13 * @sw: Switch whose UUID is read
14 * @uuid: UUID is placed here
15 */
16int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
17{
18 if (!sw->cap_lc)
19 return -EINVAL;
20 return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
21}
e879a709
MW
22
23static int read_lc_desc(struct tb_switch *sw, u32 *desc)
24{
25 if (!sw->cap_lc)
26 return -EINVAL;
27 return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
28}
29
30static int find_port_lc_cap(struct tb_port *port)
31{
32 struct tb_switch *sw = port->sw;
33 int start, phys, ret, size;
34 u32 desc;
35
36 ret = read_lc_desc(sw, &desc);
37 if (ret)
38 return ret;
39
40 /* Start of port LC registers */
41 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
42 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
43 phys = tb_phy_port_from_link(port->port);
44
45 return sw->cap_lc + start + phys * size;
46}
47
e28178bf 48static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
e879a709
MW
49{
50 bool upstream = tb_is_upstream_port(port);
51 struct tb_switch *sw = port->sw;
52 u32 ctrl, lane;
53 int cap, ret;
54
55 if (sw->generation < 2)
56 return 0;
57
58 cap = find_port_lc_cap(port);
59 if (cap < 0)
60 return cap;
61
62 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
63 if (ret)
64 return ret;
65
66 /* Resolve correct lane */
67 if (port->port % 2)
68 lane = TB_LC_SX_CTRL_L1C;
69 else
70 lane = TB_LC_SX_CTRL_L2C;
71
e28178bf 72 if (configured) {
e879a709
MW
73 ctrl |= lane;
74 if (upstream)
75 ctrl |= TB_LC_SX_CTRL_UPSTREAM;
76 } else {
77 ctrl &= ~lane;
78 if (upstream)
79 ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
80 }
81
82 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
83}
84
85/**
e28178bf
MW
86 * tb_lc_configure_port() - Let LC know about configured port
87 * @port: Port that is set as configured
e879a709 88 *
e28178bf 89 * Sets the port configured for power management purposes.
e879a709 90 */
e28178bf 91int tb_lc_configure_port(struct tb_port *port)
e879a709 92{
e28178bf 93 return tb_lc_set_port_configured(port, true);
e879a709
MW
94}
95
96/**
e28178bf
MW
97 * tb_lc_unconfigure_port() - Let LC know about unconfigured port
98 * @port: Port that is set as configured
e879a709 99 *
e28178bf 100 * Sets the port unconfigured for power management purposes.
e879a709 101 */
e28178bf 102void tb_lc_unconfigure_port(struct tb_port *port)
e879a709 103{
e28178bf 104 tb_lc_set_port_configured(port, false);
e879a709 105}
5480dfc2 106
284652a4
MW
107static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
108{
109 struct tb_switch *sw = port->sw;
110 u32 ctrl, lane;
111 int cap, ret;
112
113 if (sw->generation < 2)
114 return 0;
115
116 cap = find_port_lc_cap(port);
117 if (cap < 0)
118 return cap;
119
120 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
121 if (ret)
122 return ret;
123
124 /* Resolve correct lane */
125 if (port->port % 2)
126 lane = TB_LC_SX_CTRL_L1D;
127 else
128 lane = TB_LC_SX_CTRL_L2D;
129
130 if (configure)
131 ctrl |= lane;
132 else
133 ctrl &= ~lane;
134
135 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
136}
137
138/**
139 * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
140 * @port: Switch downstream port connected to another host
141 *
142 * Sets the lane configured for XDomain accordingly so that the LC knows
143 * about this. Returns %0 in success and negative errno in failure.
144 */
145int tb_lc_configure_xdomain(struct tb_port *port)
146{
147 return tb_lc_set_xdomain_configured(port, true);
148}
149
150/**
151 * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
152 * @port: Switch downstream port that was connected to another host
153 *
154 * Unsets the lane XDomain configuration.
155 */
156void tb_lc_unconfigure_xdomain(struct tb_port *port)
157{
158 tb_lc_set_xdomain_configured(port, false);
159}
160
fdb0887c
MW
161/**
162 * tb_lc_start_lane_initialization() - Start lane initialization
163 * @port: Device router lane 0 adapter
164 *
165 * Starts lane initialization for @port after the router resumed from
166 * sleep. Should be called for those downstream lane adapters that were
167 * not connected (tb_lc_configure_port() was not called) before sleep.
168 *
169 * Returns %0 in success and negative errno in case of failure.
170 */
171int tb_lc_start_lane_initialization(struct tb_port *port)
172{
173 struct tb_switch *sw = port->sw;
174 int ret, cap;
175 u32 ctrl;
176
177 if (!tb_route(sw))
178 return 0;
179
180 if (sw->generation < 2)
181 return 0;
182
183 cap = find_port_lc_cap(port);
184 if (cap < 0)
185 return cap;
186
187 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
188 if (ret)
189 return ret;
190
191 ctrl |= TB_LC_SX_CTRL_SLI;
192
193 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
194}
195
b2911a59
MW
196static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
197 unsigned int flags)
198{
199 u32 ctrl;
200 int ret;
201
202 /*
203 * Enable wake on PCIe and USB4 (wake coming from another
204 * router).
205 */
206 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
207 offset + TB_LC_SX_CTRL, 1);
208 if (ret)
209 return ret;
210
211 ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP |
212 TB_LC_SX_CTRL_WOU4);
213
214 if (flags & TB_WAKE_ON_CONNECT)
215 ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
216 if (flags & TB_WAKE_ON_USB4)
217 ctrl |= TB_LC_SX_CTRL_WOU4;
218 if (flags & TB_WAKE_ON_PCIE)
219 ctrl |= TB_LC_SX_CTRL_WOP;
220
221 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
222}
223
224/**
225 * tb_lc_set_wake() - Enable/disable wake
226 * @sw: Switch whose wakes to configure
227 * @flags: Wakeup flags (%0 to disable)
228 *
229 * For each LC sets wake bits accordingly.
230 */
231int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
232{
233 int start, size, nlc, ret, i;
234 u32 desc;
235
236 if (sw->generation < 2)
237 return 0;
238
239 if (!tb_route(sw))
240 return 0;
241
242 ret = read_lc_desc(sw, &desc);
243 if (ret)
244 return ret;
245
246 /* Figure out number of link controllers */
247 nlc = desc & TB_LC_DESC_NLC_MASK;
248 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
249 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
250
251 /* For each link controller set sleep bit */
252 for (i = 0; i < nlc; i++) {
253 unsigned int offset = sw->cap_lc + start + i * size;
254
255 ret = tb_lc_set_wake_one(sw, offset, flags);
256 if (ret)
257 return ret;
258 }
259
260 return 0;
261}
262
5480dfc2
MW
263/**
264 * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
265 * @sw: Switch to set sleep
266 *
267 * Let the switch link controllers know that the switch is going to
268 * sleep.
269 */
270int tb_lc_set_sleep(struct tb_switch *sw)
271{
272 int start, size, nlc, ret, i;
273 u32 desc;
274
275 if (sw->generation < 2)
276 return 0;
277
278 ret = read_lc_desc(sw, &desc);
279 if (ret)
280 return ret;
281
282 /* Figure out number of link controllers */
283 nlc = desc & TB_LC_DESC_NLC_MASK;
284 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
285 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
286
287 /* For each link controller set sleep bit */
288 for (i = 0; i < nlc; i++) {
289 unsigned int offset = sw->cap_lc + start + i * size;
290 u32 ctrl;
291
292 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
293 offset + TB_LC_SX_CTRL, 1);
294 if (ret)
295 return ret;
296
297 ctrl |= TB_LC_SX_CTRL_SLP;
298 ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
299 offset + TB_LC_SX_CTRL, 1);
300 if (ret)
301 return ret;
302 }
303
304 return 0;
305}
91c0c120
MW
306
307/**
308 * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
309 * @sw: Switch to check
310 *
311 * Checks whether conditions for lane bonding from parent to @sw are
312 * possible.
313 */
314bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
315{
316 struct tb_port *up;
317 int cap, ret;
318 u32 val;
319
320 if (sw->generation < 2)
321 return false;
322
323 up = tb_upstream_port(sw);
324 cap = find_port_lc_cap(up);
325 if (cap < 0)
326 return false;
327
328 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
329 if (ret)
330 return false;
331
332 return !!(val & TB_LC_PORT_ATTR_BE);
333}
8afe909b
MW
334
335static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
336 struct tb_port *in)
337{
338 struct tb_port *port;
339
340 /* The first DP IN port is sink 0 and second is sink 1 */
341 tb_switch_for_each_port(sw, port) {
342 if (tb_port_is_dpin(port))
343 return in != port;
344 }
345
346 return -EINVAL;
347}
348
349static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
350{
351 u32 val, alloc;
352 int ret;
353
354 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
355 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
356 if (ret)
357 return ret;
358
359 /*
360 * Sink is available for CM/SW to use if the allocation valie is
361 * either 0 or 1.
362 */
363 if (!sink) {
364 alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
365 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
366 return 0;
367 } else {
368 alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
369 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
370 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
371 return 0;
372 }
373
374 return -EBUSY;
375}
376
377/**
378 * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
379 * @sw: Switch whose DP sink is queried
380 * @in: DP IN port to check
381 *
382 * Queries through LC SNK_ALLOCATION registers whether DP sink is available
383 * for the given DP IN port or not.
384 */
385bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
386{
387 int sink;
388
389 /*
390 * For older generations sink is always available as there is no
391 * allocation mechanism.
392 */
393 if (sw->generation < 3)
394 return true;
395
396 sink = tb_lc_dp_sink_from_port(sw, in);
397 if (sink < 0)
398 return false;
399
400 return !tb_lc_dp_sink_available(sw, sink);
401}
402
403/**
404 * tb_lc_dp_sink_alloc() - Allocate DP sink
405 * @sw: Switch whose DP sink is allocated
406 * @in: DP IN port the DP sink is allocated for
407 *
408 * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
409 * resource is available and allocation is successful returns %0. In all
410 * other cases returs negative errno. In particular %-EBUSY is returned if
411 * the resource was not available.
412 */
413int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
414{
415 int ret, sink;
416 u32 val;
417
418 if (sw->generation < 3)
419 return 0;
420
421 sink = tb_lc_dp_sink_from_port(sw, in);
422 if (sink < 0)
423 return sink;
424
425 ret = tb_lc_dp_sink_available(sw, sink);
426 if (ret)
427 return ret;
428
429 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
430 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
431 if (ret)
432 return ret;
433
434 if (!sink) {
435 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
436 val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
437 } else {
438 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
439 val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
440 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
441 }
442
443 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
444 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
445
446 if (ret)
447 return ret;
448
449 tb_port_dbg(in, "sink %d allocated\n", sink);
450 return 0;
451}
452
453/**
454 * tb_lc_dp_sink_dealloc() - De-allocate DP sink
455 * @sw: Switch whose DP sink is de-allocated
456 * @in: DP IN port whose DP sink is de-allocated
457 *
458 * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
459 */
460int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
461{
462 int ret, sink;
463 u32 val;
464
465 if (sw->generation < 3)
466 return 0;
467
468 sink = tb_lc_dp_sink_from_port(sw, in);
469 if (sink < 0)
470 return sink;
471
472 /* Needs to be owned by CM/SW */
473 ret = tb_lc_dp_sink_available(sw, sink);
474 if (ret)
475 return ret;
476
477 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
478 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
479 if (ret)
480 return ret;
481
482 if (!sink)
483 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
484 else
485 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
486
487 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
488 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
489 if (ret)
490 return ret;
491
492 tb_port_dbg(in, "sink %d de-allocated\n", sink);
493 return 0;
494}
1cb36293
ML
495
496/**
497 * tb_lc_force_power() - Forces LC to be powered on
498 * @sw: Thunderbolt switch
499 *
500 * This is useful to let authentication cycle pass even without
501 * a Thunderbolt link present.
502 */
503int tb_lc_force_power(struct tb_switch *sw)
504{
505 u32 in = 0xffff;
506
507 return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
508}