]>
Commit | Line | Data |
---|---|---|
ad7f8a1f DA |
1 | /* |
2 | * Copyright © 2014 Red Hat. | |
3 | * | |
4 | * Permission to use, copy, modify, distribute, and sell this software and its | |
5 | * documentation for any purpose is hereby granted without fee, provided that | |
6 | * the above copyright notice appear in all copies and that both that copyright | |
7 | * notice and this permission notice appear in supporting documentation, and | |
8 | * that the name of the copyright holders not be used in advertising or | |
9 | * publicity pertaining to distribution of the software without specific, | |
10 | * written prior permission. The copyright holders make no representations | |
11 | * about the suitability of this software for any purpose. It is provided "as | |
12 | * is" without express or implied warranty. | |
13 | * | |
14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | |
15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | |
16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | |
17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | |
18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | |
19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | |
20 | * OF THIS SOFTWARE. | |
21 | */ | |
22 | #ifndef _DRM_DP_MST_HELPER_H_ | |
23 | #define _DRM_DP_MST_HELPER_H_ | |
24 | ||
25 | #include <linux/types.h> | |
26 | #include <drm/drm_dp_helper.h> | |
3f3353b7 | 27 | #include <drm/drm_atomic.h> |
ad7f8a1f | 28 | |
12a280c7 LP |
29 | #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) |
30 | #include <linux/stackdepot.h> | |
31 | #include <linux/timekeeping.h> | |
32 | ||
33 | enum drm_dp_mst_topology_ref_type { | |
34 | DRM_DP_MST_TOPOLOGY_REF_GET, | |
35 | DRM_DP_MST_TOPOLOGY_REF_PUT, | |
36 | }; | |
37 | ||
38 | struct drm_dp_mst_topology_ref_history { | |
39 | struct drm_dp_mst_topology_ref_entry { | |
40 | enum drm_dp_mst_topology_ref_type type; | |
41 | int count; | |
42 | ktime_t ts_nsec; | |
43 | depot_stack_handle_t backtrace; | |
44 | } *entries; | |
45 | int len; | |
46 | }; | |
47 | #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */ | |
48 | ||
ad7f8a1f DA |
49 | struct drm_dp_mst_branch; |
50 | ||
51 | /** | |
32197aab | 52 | * struct drm_dp_vcpi - Virtual Channel Payload Identifier |
ad7f8a1f DA |
53 | * @vcpi: Virtual channel ID. |
54 | * @pbn: Payload Bandwidth Number for this channel | |
55 | * @aligned_pbn: PBN aligned with slot size | |
56 | * @num_slots: number of slots for this PBN | |
57 | */ | |
58 | struct drm_dp_vcpi { | |
59 | int vcpi; | |
60 | int pbn; | |
61 | int aligned_pbn; | |
62 | int num_slots; | |
63 | }; | |
64 | ||
65 | /** | |
66 | * struct drm_dp_mst_port - MST port | |
ad7f8a1f | 67 | * @port_num: port number |
3f9b3f02 LP |
68 | * @input: if this port is an input port. Protected by |
69 | * &drm_dp_mst_topology_mgr.base.lock. | |
70 | * @mcs: message capability status - DP 1.2 spec. Protected by | |
71 | * &drm_dp_mst_topology_mgr.base.lock. | |
72 | * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by | |
73 | * &drm_dp_mst_topology_mgr.base.lock. | |
74 | * @pdt: Peer Device Type. Protected by | |
75 | * &drm_dp_mst_topology_mgr.base.lock. | |
76 | * @ldps: Legacy Device Plug Status. Protected by | |
77 | * &drm_dp_mst_topology_mgr.base.lock. | |
78 | * @dpcd_rev: DPCD revision of device on this port. Protected by | |
79 | * &drm_dp_mst_topology_mgr.base.lock. | |
80 | * @num_sdp_streams: Number of simultaneous streams. Protected by | |
81 | * &drm_dp_mst_topology_mgr.base.lock. | |
82 | * @num_sdp_stream_sinks: Number of stream sinks. Protected by | |
83 | * &drm_dp_mst_topology_mgr.base.lock. | |
fcf46380 | 84 | * @full_pbn: Max possible bandwidth for this port. Protected by |
3f9b3f02 | 85 | * &drm_dp_mst_topology_mgr.base.lock. |
ad7f8a1f | 86 | * @next: link to next port on this branch device |
c485e2c9 | 87 | * @aux: i2c aux transport to talk to device connected to this port, protected |
3f9b3f02 | 88 | * by &drm_dp_mst_topology_mgr.base.lock. |
ad7f8a1f DA |
89 | * @parent: branch device parent of this port |
90 | * @vcpi: Virtual Channel Payload info for this port. | |
3f9b3f02 LP |
91 | * @connector: DRM connector this port is connected to. Protected by |
92 | * &drm_dp_mst_topology_mgr.base.lock. | |
ad7f8a1f DA |
93 | * @mgr: topology manager this port lives under. |
94 | * | |
95 | * This structure represents an MST port endpoint on a device somewhere | |
96 | * in the MST topology. | |
97 | */ | |
98 | struct drm_dp_mst_port { | |
ebcc0e6b LP |
99 | /** |
100 | * @topology_kref: refcount for this port's lifetime in the topology, | |
101 | * only the DP MST helpers should need to touch this | |
102 | */ | |
103 | struct kref topology_kref; | |
104 | ||
105 | /** | |
106 | * @malloc_kref: refcount for the memory allocation containing this | |
107 | * structure. See drm_dp_mst_get_port_malloc() and | |
108 | * drm_dp_mst_put_port_malloc(). | |
109 | */ | |
110 | struct kref malloc_kref; | |
ad7f8a1f | 111 | |
12a280c7 LP |
112 | #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) |
113 | /** | |
114 | * @topology_ref_history: A history of each topology | |
115 | * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS. | |
116 | */ | |
117 | struct drm_dp_mst_topology_ref_history topology_ref_history; | |
118 | #endif | |
119 | ||
ad7f8a1f DA |
120 | u8 port_num; |
121 | bool input; | |
122 | bool mcs; | |
123 | bool ddps; | |
124 | u8 pdt; | |
125 | bool ldps; | |
126 | u8 dpcd_rev; | |
127 | u8 num_sdp_streams; | |
128 | u8 num_sdp_stream_sinks; | |
fcf46380 | 129 | uint16_t full_pbn; |
ad7f8a1f | 130 | struct list_head next; |
14692a36 LP |
131 | /** |
132 | * @mstb: the branch device connected to this port, if there is one. | |
133 | * This should be considered protected for reading by | |
134 | * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this: | |
135 | * &drm_dp_mst_topology_mgr.up_req_work and | |
136 | * &drm_dp_mst_topology_mgr.work, which do not grab | |
137 | * &drm_dp_mst_topology_mgr.lock during reads but are the only | |
138 | * updaters of this list and are protected from writing concurrently | |
139 | * by &drm_dp_mst_topology_mgr.probe_lock. | |
140 | */ | |
141 | struct drm_dp_mst_branch *mstb; | |
ad7f8a1f DA |
142 | struct drm_dp_aux aux; /* i2c bus for this port? */ |
143 | struct drm_dp_mst_branch *parent; | |
144 | ||
145 | struct drm_dp_vcpi vcpi; | |
146 | struct drm_connector *connector; | |
147 | struct drm_dp_mst_topology_mgr *mgr; | |
c6a0aed4 | 148 | |
132d49d7 DV |
149 | /** |
150 | * @cached_edid: for DP logical ports - make tiling work by ensuring | |
151 | * that the EDID for all connectors is read immediately. | |
152 | */ | |
153 | struct edid *cached_edid; | |
154 | /** | |
155 | * @has_audio: Tracks whether the sink connector to this port is | |
156 | * audio-capable. | |
157 | */ | |
ef8f9bea | 158 | bool has_audio; |
a3c2b0ff | 159 | |
6c0ac4d5 SR |
160 | /** |
161 | * @fec_capable: bool indicating if FEC can be supported up to that | |
162 | * point in the MST topology. | |
163 | */ | |
a3c2b0ff | 164 | bool fec_capable; |
ad7f8a1f DA |
165 | }; |
166 | ||
fbc821c4 SP |
167 | /* sideband msg header - not bit struct */ |
168 | struct drm_dp_sideband_msg_hdr { | |
169 | u8 lct; | |
170 | u8 lcr; | |
171 | u8 rad[8]; | |
172 | bool broadcast; | |
173 | bool path_msg; | |
174 | u8 msg_len; | |
175 | bool somt; | |
176 | bool eomt; | |
177 | bool seqno; | |
178 | }; | |
179 | ||
180 | struct drm_dp_sideband_msg_rx { | |
181 | u8 chunk[48]; | |
182 | u8 msg[256]; | |
183 | u8 curchunk_len; | |
184 | u8 curchunk_idx; /* chunk we are parsing now */ | |
185 | u8 curchunk_hdrlen; | |
186 | u8 curlen; /* total length of the msg */ | |
187 | bool have_somt; | |
188 | bool have_eomt; | |
189 | struct drm_dp_sideband_msg_hdr initial_hdr; | |
190 | }; | |
191 | ||
ad7f8a1f DA |
192 | /** |
193 | * struct drm_dp_mst_branch - MST branch device. | |
ad7f8a1f DA |
194 | * @rad: Relative Address to talk to this branch device. |
195 | * @lct: Link count total to talk to this branch device. | |
196 | * @num_ports: number of ports on the branch. | |
ad7f8a1f DA |
197 | * @port_parent: pointer to the port parent, NULL if toplevel. |
198 | * @mgr: topology manager for this branch device. | |
ad7f8a1f | 199 | * @link_address_sent: if a link address message has been sent to this device yet. |
5e93b820 HW |
200 | * @guid: guid for DP 1.2 branch device. port under this branch can be |
201 | * identified by port #. | |
ad7f8a1f DA |
202 | * |
203 | * This structure represents an MST branch device, there is one | |
5e93b820 HW |
204 | * primary branch device at the root, along with any other branches connected |
205 | * to downstream port of parent branches. | |
ad7f8a1f DA |
206 | */ |
207 | struct drm_dp_mst_branch { | |
ebcc0e6b LP |
208 | /** |
209 | * @topology_kref: refcount for this branch device's lifetime in the | |
210 | * topology, only the DP MST helpers should need to touch this | |
211 | */ | |
212 | struct kref topology_kref; | |
213 | ||
214 | /** | |
215 | * @malloc_kref: refcount for the memory allocation containing this | |
216 | * structure. See drm_dp_mst_get_mstb_malloc() and | |
217 | * drm_dp_mst_put_mstb_malloc(). | |
218 | */ | |
219 | struct kref malloc_kref; | |
220 | ||
12a280c7 LP |
221 | #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) |
222 | /** | |
223 | * @topology_ref_history: A history of each topology | |
224 | * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS. | |
225 | */ | |
226 | struct drm_dp_mst_topology_ref_history topology_ref_history; | |
227 | #endif | |
228 | ||
7cb12d48 LP |
229 | /** |
230 | * @destroy_next: linked-list entry used by | |
231 | * drm_dp_delayed_destroy_work() | |
232 | */ | |
233 | struct list_head destroy_next; | |
234 | ||
ad7f8a1f DA |
235 | u8 rad[8]; |
236 | u8 lct; | |
237 | int num_ports; | |
238 | ||
14692a36 LP |
239 | /** |
240 | * @ports: the list of ports on this branch device. This should be | |
241 | * considered protected for reading by &drm_dp_mst_topology_mgr.lock. | |
242 | * There are two exceptions to this: | |
243 | * &drm_dp_mst_topology_mgr.up_req_work and | |
244 | * &drm_dp_mst_topology_mgr.work, which do not grab | |
245 | * &drm_dp_mst_topology_mgr.lock during reads but are the only | |
246 | * updaters of this list and are protected from updating the list | |
247 | * concurrently by @drm_dp_mst_topology_mgr.probe_lock | |
248 | */ | |
ad7f8a1f DA |
249 | struct list_head ports; |
250 | ||
ad7f8a1f DA |
251 | struct drm_dp_mst_port *port_parent; |
252 | struct drm_dp_mst_topology_mgr *mgr; | |
253 | ||
ad7f8a1f | 254 | bool link_address_sent; |
5e93b820 HW |
255 | |
256 | /* global unique identifier to identify branch devices */ | |
257 | u8 guid[16]; | |
ad7f8a1f DA |
258 | }; |
259 | ||
260 | ||
ad7f8a1f DA |
261 | struct drm_dp_nak_reply { |
262 | u8 guid[16]; | |
263 | u8 reason; | |
264 | u8 nak_data; | |
265 | }; | |
266 | ||
267 | struct drm_dp_link_address_ack_reply { | |
268 | u8 guid[16]; | |
269 | u8 nports; | |
270 | struct drm_dp_link_addr_reply_port { | |
271 | bool input_port; | |
272 | u8 peer_device_type; | |
273 | u8 port_number; | |
274 | bool mcs; | |
275 | bool ddps; | |
276 | bool legacy_device_plug_status; | |
277 | u8 dpcd_revision; | |
278 | u8 peer_guid[16]; | |
279 | u8 num_sdp_streams; | |
280 | u8 num_sdp_stream_sinks; | |
281 | } ports[16]; | |
282 | }; | |
283 | ||
284 | struct drm_dp_remote_dpcd_read_ack_reply { | |
285 | u8 port_number; | |
286 | u8 num_bytes; | |
287 | u8 bytes[255]; | |
288 | }; | |
289 | ||
290 | struct drm_dp_remote_dpcd_write_ack_reply { | |
291 | u8 port_number; | |
292 | }; | |
293 | ||
294 | struct drm_dp_remote_dpcd_write_nak_reply { | |
295 | u8 port_number; | |
296 | u8 reason; | |
297 | u8 bytes_written_before_failure; | |
298 | }; | |
299 | ||
300 | struct drm_dp_remote_i2c_read_ack_reply { | |
301 | u8 port_number; | |
302 | u8 num_bytes; | |
303 | u8 bytes[255]; | |
304 | }; | |
305 | ||
306 | struct drm_dp_remote_i2c_read_nak_reply { | |
307 | u8 port_number; | |
308 | u8 nak_reason; | |
309 | u8 i2c_nak_transaction; | |
310 | }; | |
311 | ||
312 | struct drm_dp_remote_i2c_write_ack_reply { | |
313 | u8 port_number; | |
314 | }; | |
315 | ||
316 | ||
ef8f9bea | 317 | #define DRM_DP_MAX_SDP_STREAMS 16 |
ad7f8a1f DA |
318 | struct drm_dp_allocate_payload { |
319 | u8 port_number; | |
320 | u8 number_sdp_streams; | |
321 | u8 vcpi; | |
322 | u16 pbn; | |
ef8f9bea | 323 | u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS]; |
ad7f8a1f DA |
324 | }; |
325 | ||
326 | struct drm_dp_allocate_payload_ack_reply { | |
327 | u8 port_number; | |
328 | u8 vcpi; | |
329 | u16 allocated_pbn; | |
330 | }; | |
331 | ||
332 | struct drm_dp_connection_status_notify { | |
333 | u8 guid[16]; | |
334 | u8 port_number; | |
335 | bool legacy_device_plug_status; | |
336 | bool displayport_device_plug_status; | |
337 | bool message_capability_status; | |
338 | bool input_port; | |
339 | u8 peer_device_type; | |
340 | }; | |
341 | ||
342 | struct drm_dp_remote_dpcd_read { | |
343 | u8 port_number; | |
344 | u32 dpcd_address; | |
345 | u8 num_bytes; | |
346 | }; | |
347 | ||
348 | struct drm_dp_remote_dpcd_write { | |
349 | u8 port_number; | |
350 | u32 dpcd_address; | |
351 | u8 num_bytes; | |
352 | u8 *bytes; | |
353 | }; | |
354 | ||
ae491542 | 355 | #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 |
ad7f8a1f DA |
356 | struct drm_dp_remote_i2c_read { |
357 | u8 num_transactions; | |
358 | u8 port_number; | |
2f015ec6 | 359 | struct drm_dp_remote_i2c_read_tx { |
ad7f8a1f DA |
360 | u8 i2c_dev_id; |
361 | u8 num_bytes; | |
362 | u8 *bytes; | |
363 | u8 no_stop_bit; | |
364 | u8 i2c_transaction_delay; | |
ae491542 | 365 | } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; |
ad7f8a1f DA |
366 | u8 read_i2c_device_id; |
367 | u8 num_bytes_read; | |
368 | }; | |
369 | ||
370 | struct drm_dp_remote_i2c_write { | |
371 | u8 port_number; | |
372 | u8 write_i2c_device_id; | |
373 | u8 num_bytes; | |
374 | u8 *bytes; | |
375 | }; | |
376 | ||
377 | /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */ | |
378 | struct drm_dp_port_number_req { | |
379 | u8 port_number; | |
380 | }; | |
381 | ||
382 | struct drm_dp_enum_path_resources_ack_reply { | |
383 | u8 port_number; | |
a3c2b0ff | 384 | bool fec_capable; |
ad7f8a1f DA |
385 | u16 full_payload_bw_number; |
386 | u16 avail_payload_bw_number; | |
387 | }; | |
388 | ||
389 | /* covers POWER_DOWN_PHY, POWER_UP_PHY */ | |
390 | struct drm_dp_port_number_rep { | |
391 | u8 port_number; | |
392 | }; | |
393 | ||
394 | struct drm_dp_query_payload { | |
395 | u8 port_number; | |
396 | u8 vcpi; | |
397 | }; | |
398 | ||
399 | struct drm_dp_resource_status_notify { | |
400 | u8 port_number; | |
401 | u8 guid[16]; | |
402 | u16 available_pbn; | |
403 | }; | |
404 | ||
405 | struct drm_dp_query_payload_ack_reply { | |
406 | u8 port_number; | |
268de653 | 407 | u16 allocated_pbn; |
ad7f8a1f DA |
408 | }; |
409 | ||
410 | struct drm_dp_sideband_msg_req_body { | |
411 | u8 req_type; | |
412 | union ack_req { | |
413 | struct drm_dp_connection_status_notify conn_stat; | |
414 | struct drm_dp_port_number_req port_num; | |
415 | struct drm_dp_resource_status_notify resource_stat; | |
416 | ||
417 | struct drm_dp_query_payload query_payload; | |
418 | struct drm_dp_allocate_payload allocate_payload; | |
419 | ||
420 | struct drm_dp_remote_dpcd_read dpcd_read; | |
421 | struct drm_dp_remote_dpcd_write dpcd_write; | |
422 | ||
423 | struct drm_dp_remote_i2c_read i2c_read; | |
424 | struct drm_dp_remote_i2c_write i2c_write; | |
425 | } u; | |
426 | }; | |
427 | ||
428 | struct drm_dp_sideband_msg_reply_body { | |
429 | u8 reply_type; | |
430 | u8 req_type; | |
431 | union ack_replies { | |
432 | struct drm_dp_nak_reply nak; | |
433 | struct drm_dp_link_address_ack_reply link_addr; | |
434 | struct drm_dp_port_number_rep port_number; | |
435 | ||
436 | struct drm_dp_enum_path_resources_ack_reply path_resources; | |
437 | struct drm_dp_allocate_payload_ack_reply allocate_payload; | |
438 | struct drm_dp_query_payload_ack_reply query_payload; | |
439 | ||
440 | struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack; | |
441 | struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack; | |
442 | struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack; | |
443 | ||
444 | struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; | |
445 | struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; | |
446 | struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; | |
447 | } u; | |
448 | }; | |
449 | ||
450 | /* msg is queued to be put into a slot */ | |
451 | #define DRM_DP_SIDEBAND_TX_QUEUED 0 | |
452 | /* msg has started transmitting on a slot - still on msgq */ | |
453 | #define DRM_DP_SIDEBAND_TX_START_SEND 1 | |
454 | /* msg has finished transmitting on a slot - removed from msgq only in slot */ | |
455 | #define DRM_DP_SIDEBAND_TX_SENT 2 | |
456 | /* msg has received a response - removed from slot */ | |
457 | #define DRM_DP_SIDEBAND_TX_RX 3 | |
458 | #define DRM_DP_SIDEBAND_TX_TIMEOUT 4 | |
459 | ||
460 | struct drm_dp_sideband_msg_tx { | |
461 | u8 msg[256]; | |
462 | u8 chunk[48]; | |
463 | u8 cur_offset; | |
464 | u8 cur_len; | |
465 | struct drm_dp_mst_branch *dst; | |
466 | struct list_head next; | |
467 | int seqno; | |
468 | int state; | |
469 | bool path_msg; | |
470 | struct drm_dp_sideband_msg_reply_body reply; | |
471 | }; | |
472 | ||
473 | /* sideband msg handler */ | |
474 | struct drm_dp_mst_topology_mgr; | |
475 | struct drm_dp_mst_topology_cbs { | |
476 | /* create a connector for a port */ | |
12e6cecd | 477 | struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); |
471bdd0d ID |
478 | /* |
479 | * Checks for any pending MST interrupts, passing them to MST core for | |
480 | * processing, the same way an HPD IRQ pulse handler would do this. | |
481 | * If provided MST core calls this callback from a poll-waiting loop | |
482 | * when waiting for MST down message replies. The driver is expected | |
483 | * to guard against a race between this callback and the driver's HPD | |
484 | * IRQ pulse handler. | |
485 | */ | |
486 | void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr); | |
ad7f8a1f DA |
487 | }; |
488 | ||
489 | #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) | |
490 | ||
491 | #define DP_PAYLOAD_LOCAL 1 | |
492 | #define DP_PAYLOAD_REMOTE 2 | |
493 | #define DP_PAYLOAD_DELETE_LOCAL 3 | |
494 | ||
495 | struct drm_dp_payload { | |
496 | int payload_state; | |
497 | int start_slot; | |
498 | int num_slots; | |
dfda0df3 | 499 | int vcpi; |
ad7f8a1f DA |
500 | }; |
501 | ||
a4370c77 VS |
502 | #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) |
503 | ||
eceae147 LP |
504 | struct drm_dp_vcpi_allocation { |
505 | struct drm_dp_mst_port *port; | |
506 | int vcpi; | |
cd82d82c | 507 | int pbn; |
8afb7e6a | 508 | bool dsc_enabled; |
eceae147 LP |
509 | struct list_head next; |
510 | }; | |
511 | ||
3f3353b7 | 512 | struct drm_dp_mst_topology_state { |
a4370c77 | 513 | struct drm_private_state base; |
eceae147 | 514 | struct list_head vcpis; |
3f3353b7 PD |
515 | struct drm_dp_mst_topology_mgr *mgr; |
516 | }; | |
517 | ||
a4370c77 VS |
518 | #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) |
519 | ||
ad7f8a1f DA |
520 | /** |
521 | * struct drm_dp_mst_topology_mgr - DisplayPort MST manager | |
ad7f8a1f DA |
522 | * |
523 | * This struct represents the toplevel displayport MST topology manager. | |
524 | * There should be one instance of this for every MST capable DP connector | |
525 | * on the GPU. | |
526 | */ | |
527 | struct drm_dp_mst_topology_mgr { | |
a4370c77 VS |
528 | /** |
529 | * @base: Base private object for atomic | |
530 | */ | |
531 | struct drm_private_obj base; | |
532 | ||
132d49d7 DV |
533 | /** |
534 | * @dev: device pointer for adding i2c devices etc. | |
535 | */ | |
7b0a89a6 | 536 | struct drm_device *dev; |
132d49d7 DV |
537 | /** |
538 | * @cbs: callbacks for connector addition and destruction. | |
539 | */ | |
69a0f89c | 540 | const struct drm_dp_mst_topology_cbs *cbs; |
132d49d7 DV |
541 | /** |
542 | * @max_dpcd_transaction_bytes: maximum number of bytes to read/write | |
543 | * in one go. | |
544 | */ | |
ad7f8a1f | 545 | int max_dpcd_transaction_bytes; |
132d49d7 DV |
546 | /** |
547 | * @aux: AUX channel for the DP MST connector this topolgy mgr is | |
548 | * controlling. | |
549 | */ | |
550 | struct drm_dp_aux *aux; | |
551 | /** | |
552 | * @max_payloads: maximum number of payloads the GPU can generate. | |
553 | */ | |
ad7f8a1f | 554 | int max_payloads; |
132d49d7 DV |
555 | /** |
556 | * @conn_base_id: DRM connector ID this mgr is connected to. Only used | |
557 | * to build the MST connector path value. | |
558 | */ | |
ad7f8a1f DA |
559 | int conn_base_id; |
560 | ||
132d49d7 | 561 | /** |
85783369 | 562 | * @up_req_recv: Message receiver state for up requests. |
132d49d7 | 563 | */ |
ad7f8a1f DA |
564 | struct drm_dp_sideband_msg_rx up_req_recv; |
565 | ||
d308a881 LP |
566 | /** |
567 | * @down_rep_recv: Message receiver state for replies to down | |
568 | * requests. | |
569 | */ | |
570 | struct drm_dp_sideband_msg_rx down_rep_recv; | |
571 | ||
132d49d7 | 572 | /** |
f7948907 SP |
573 | * @lock: protects @mst_state, @mst_primary, @dpcd, and |
574 | * @payload_id_table_cleared. | |
132d49d7 DV |
575 | */ |
576 | struct mutex lock; | |
ad7f8a1f | 577 | |
14692a36 LP |
578 | /** |
579 | * @probe_lock: Prevents @work and @up_req_work, the only writers of | |
580 | * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing | |
581 | * while they update the topology. | |
582 | */ | |
583 | struct mutex probe_lock; | |
584 | ||
132d49d7 DV |
585 | /** |
586 | * @mst_state: If this manager is enabled for an MST capable port. False | |
587 | * if no MST sink/branch devices is connected. | |
588 | */ | |
f7948907 SP |
589 | bool mst_state : 1; |
590 | ||
591 | /** | |
592 | * @payload_id_table_cleared: Whether or not we've cleared the payload | |
593 | * ID table for @mst_primary. Protected by @lock. | |
594 | */ | |
595 | bool payload_id_table_cleared : 1; | |
596 | ||
132d49d7 DV |
597 | /** |
598 | * @mst_primary: Pointer to the primary/first branch device. | |
599 | */ | |
ad7f8a1f | 600 | struct drm_dp_mst_branch *mst_primary; |
5e93b820 | 601 | |
132d49d7 DV |
602 | /** |
603 | * @dpcd: Cache of DPCD for primary port. | |
604 | */ | |
ad7f8a1f | 605 | u8 dpcd[DP_RECEIVER_CAP_SIZE]; |
132d49d7 DV |
606 | /** |
607 | * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0. | |
608 | */ | |
ad7f8a1f | 609 | u8 sink_count; |
132d49d7 DV |
610 | /** |
611 | * @pbn_div: PBN to slots divisor. | |
612 | */ | |
ad7f8a1f | 613 | int pbn_div; |
a538d613 | 614 | |
3f3353b7 PD |
615 | /** |
616 | * @funcs: Atomic helper callbacks | |
617 | */ | |
618 | const struct drm_private_state_funcs *funcs; | |
619 | ||
132d49d7 | 620 | /** |
d308a881 | 621 | * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state |
132d49d7 | 622 | */ |
ad7f8a1f | 623 | struct mutex qlock; |
5a64967a | 624 | |
132d49d7 | 625 | /** |
d308a881 | 626 | * @tx_msg_downq: List of pending down requests |
132d49d7 | 627 | */ |
ad7f8a1f | 628 | struct list_head tx_msg_downq; |
ad7f8a1f | 629 | |
132d49d7 DV |
630 | /** |
631 | * @payload_lock: Protect payload information. | |
632 | */ | |
ad7f8a1f | 633 | struct mutex payload_lock; |
132d49d7 DV |
634 | /** |
635 | * @proposed_vcpis: Array of pointers for the new VCPI allocation. The | |
a727fe8f LP |
636 | * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of |
637 | * this array is determined by @max_payloads. | |
132d49d7 | 638 | */ |
ad7f8a1f | 639 | struct drm_dp_vcpi **proposed_vcpis; |
132d49d7 | 640 | /** |
a727fe8f LP |
641 | * @payloads: Array of payloads. The size of this array is determined |
642 | * by @max_payloads. | |
132d49d7 | 643 | */ |
ad7f8a1f | 644 | struct drm_dp_payload *payloads; |
132d49d7 DV |
645 | /** |
646 | * @payload_mask: Elements of @payloads actually in use. Since | |
647 | * reallocation of active outputs isn't possible gaps can be created by | |
648 | * disabling outputs out of order compared to how they've been enabled. | |
649 | */ | |
ad7f8a1f | 650 | unsigned long payload_mask; |
132d49d7 DV |
651 | /** |
652 | * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis. | |
653 | */ | |
dfda0df3 | 654 | unsigned long vcpi_mask; |
ad7f8a1f | 655 | |
132d49d7 DV |
656 | /** |
657 | * @tx_waitq: Wait to queue stall for the tx worker. | |
658 | */ | |
ad7f8a1f | 659 | wait_queue_head_t tx_waitq; |
132d49d7 DV |
660 | /** |
661 | * @work: Probe work. | |
662 | */ | |
ad7f8a1f | 663 | struct work_struct work; |
132d49d7 DV |
664 | /** |
665 | * @tx_work: Sideband transmit worker. This can nest within the main | |
666 | * @work worker for each transaction @work launches. | |
667 | */ | |
ad7f8a1f | 668 | struct work_struct tx_work; |
6b8eeca6 | 669 | |
132d49d7 | 670 | /** |
7cb12d48 LP |
671 | * @destroy_port_list: List of to be destroyed connectors. |
672 | */ | |
673 | struct list_head destroy_port_list; | |
674 | /** | |
675 | * @destroy_branch_device_list: List of to be destroyed branch | |
676 | * devices. | |
132d49d7 | 677 | */ |
7cb12d48 | 678 | struct list_head destroy_branch_device_list; |
132d49d7 | 679 | /** |
7cb12d48 LP |
680 | * @delayed_destroy_lock: Protects @destroy_port_list and |
681 | * @destroy_branch_device_list. | |
132d49d7 | 682 | */ |
7cb12d48 | 683 | struct mutex delayed_destroy_lock; |
72822c3b ID |
684 | |
685 | /** | |
686 | * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items. | |
687 | * A dedicated WQ makes it possible to drain any requeued work items | |
688 | * on it. | |
689 | */ | |
690 | struct workqueue_struct *delayed_destroy_wq; | |
691 | ||
132d49d7 | 692 | /** |
7cb12d48 LP |
693 | * @delayed_destroy_work: Work item to destroy MST port and branch |
694 | * devices, needed to avoid locking inversion. | |
132d49d7 | 695 | */ |
7cb12d48 | 696 | struct work_struct delayed_destroy_work; |
9408cc94 LP |
697 | |
698 | /** | |
699 | * @up_req_list: List of pending up requests from the topology that | |
700 | * need to be processed, in chronological order. | |
701 | */ | |
702 | struct list_head up_req_list; | |
703 | /** | |
704 | * @up_req_lock: Protects @up_req_list | |
705 | */ | |
706 | struct mutex up_req_lock; | |
707 | /** | |
708 | * @up_req_work: Work item to process up requests received from the | |
709 | * topology. Needed to avoid blocking hotplug handling and sideband | |
710 | * transmissions. | |
711 | */ | |
712 | struct work_struct up_req_work; | |
12a280c7 LP |
713 | |
714 | #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) | |
715 | /** | |
716 | * @topology_ref_history_lock: protects | |
717 | * &drm_dp_mst_port.topology_ref_history and | |
718 | * &drm_dp_mst_branch.topology_ref_history. | |
719 | */ | |
720 | struct mutex topology_ref_history_lock; | |
721 | #endif | |
ad7f8a1f DA |
722 | }; |
723 | ||
7b0a89a6 DP |
724 | int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, |
725 | struct drm_device *dev, struct drm_dp_aux *aux, | |
726 | int max_dpcd_transaction_bytes, | |
727 | int max_payloads, int conn_base_id); | |
ad7f8a1f DA |
728 | |
729 | void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); | |
730 | ||
731 | ||
732 | int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); | |
733 | ||
734 | ||
735 | int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); | |
736 | ||
737 | ||
3f9b3f02 LP |
738 | int |
739 | drm_dp_mst_detect_port(struct drm_connector *connector, | |
740 | struct drm_modeset_acquire_ctx *ctx, | |
741 | struct drm_dp_mst_topology_mgr *mgr, | |
742 | struct drm_dp_mst_port *port); | |
ad7f8a1f DA |
743 | |
744 | struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); | |
745 | ||
746 | ||
dc48529f | 747 | int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); |
ad7f8a1f | 748 | |
1e797f55 PD |
749 | bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, |
750 | struct drm_dp_mst_port *port, int pbn, int slots); | |
ad7f8a1f | 751 | |
87f5942d DA |
752 | int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
753 | ||
ad7f8a1f DA |
754 | |
755 | void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); | |
756 | ||
757 | ||
758 | void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, | |
759 | struct drm_dp_mst_port *port); | |
760 | ||
761 | ||
762 | int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, | |
763 | int pbn); | |
764 | ||
765 | ||
766 | int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr); | |
767 | ||
768 | ||
769 | int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr); | |
770 | ||
771 | int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); | |
772 | ||
773 | void drm_dp_mst_dump_topology(struct seq_file *m, | |
774 | struct drm_dp_mst_topology_mgr *mgr); | |
775 | ||
776 | void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); | |
c235316d | 777 | int __must_check |
6f85f738 LP |
778 | drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, |
779 | bool sync); | |
562836a2 VS |
780 | |
781 | ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, | |
782 | unsigned int offset, void *buffer, size_t size); | |
783 | ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, | |
784 | unsigned int offset, void *buffer, size_t size); | |
785 | ||
786 | int drm_dp_mst_connector_late_register(struct drm_connector *connector, | |
787 | struct drm_dp_mst_port *port); | |
788 | void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, | |
789 | struct drm_dp_mst_port *port); | |
790 | ||
3f3353b7 PD |
791 | struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, |
792 | struct drm_dp_mst_topology_mgr *mgr); | |
eceae147 LP |
793 | int __must_check |
794 | drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, | |
795 | struct drm_dp_mst_topology_mgr *mgr, | |
1c6c1cb5 ML |
796 | struct drm_dp_mst_port *port, int pbn, |
797 | int pbn_div); | |
8afb7e6a ML |
798 | int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, |
799 | struct drm_dp_mst_port *port, | |
800 | int pbn, int pbn_div, | |
801 | bool enable); | |
eceae147 | 802 | int __must_check |
8ec04671 ML |
803 | drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, |
804 | struct drm_dp_mst_topology_mgr *mgr); | |
eceae147 LP |
805 | int __must_check |
806 | drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, | |
807 | struct drm_dp_mst_topology_mgr *mgr, | |
808 | struct drm_dp_mst_port *port); | |
0bb9c2b2 DP |
809 | int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, |
810 | struct drm_dp_mst_port *port, bool power_up); | |
eceae147 | 811 | int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); |
3f3353b7 | 812 | |
ebcc0e6b LP |
813 | void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); |
814 | void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); | |
815 | ||
c2bc1b6e DF |
816 | struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port); |
817 | ||
bea5c38f LP |
818 | extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs; |
819 | ||
820 | /** | |
821 | * __drm_dp_mst_state_iter_get - private atomic state iterator function for | |
822 | * macro-internal use | |
823 | * @state: &struct drm_atomic_state pointer | |
824 | * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor | |
825 | * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state | |
826 | * iteration cursor | |
827 | * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state | |
828 | * iteration cursor | |
829 | * @i: int iteration cursor, for macro-internal use | |
830 | * | |
831 | * Used by for_each_oldnew_mst_mgr_in_state(), | |
832 | * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't | |
833 | * call this directly. | |
834 | * | |
835 | * Returns: | |
836 | * True if the current &struct drm_private_obj is a &struct | |
837 | * drm_dp_mst_topology_mgr, false otherwise. | |
838 | */ | |
839 | static inline bool | |
840 | __drm_dp_mst_state_iter_get(struct drm_atomic_state *state, | |
841 | struct drm_dp_mst_topology_mgr **mgr, | |
842 | struct drm_dp_mst_topology_state **old_state, | |
843 | struct drm_dp_mst_topology_state **new_state, | |
844 | int i) | |
845 | { | |
846 | struct __drm_private_objs_state *objs_state = &state->private_objs[i]; | |
847 | ||
848 | if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs) | |
849 | return false; | |
850 | ||
851 | *mgr = to_dp_mst_topology_mgr(objs_state->ptr); | |
852 | if (old_state) | |
853 | *old_state = to_dp_mst_topology_state(objs_state->old_state); | |
854 | if (new_state) | |
855 | *new_state = to_dp_mst_topology_state(objs_state->new_state); | |
856 | ||
857 | return true; | |
858 | } | |
859 | ||
860 | /** | |
861 | * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology | |
862 | * managers in an atomic update | |
863 | * @__state: &struct drm_atomic_state pointer | |
864 | * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor | |
865 | * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old | |
866 | * state | |
867 | * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new | |
868 | * state | |
869 | * @__i: int iteration cursor, for macro-internal use | |
870 | * | |
871 | * This iterates over all DRM DP MST topology managers in an atomic update, | |
872 | * tracking both old and new state. This is useful in places where the state | |
873 | * delta needs to be considered, for example in atomic check functions. | |
874 | */ | |
875 | #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \ | |
876 | for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ | |
877 | for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i))) | |
878 | ||
879 | /** | |
880 | * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers | |
881 | * in an atomic update | |
882 | * @__state: &struct drm_atomic_state pointer | |
883 | * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor | |
884 | * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old | |
885 | * state | |
886 | * @__i: int iteration cursor, for macro-internal use | |
887 | * | |
888 | * This iterates over all DRM DP MST topology managers in an atomic update, | |
889 | * tracking only the old state. This is useful in disable functions, where we | |
890 | * need the old state the hardware is still in. | |
891 | */ | |
892 | #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \ | |
893 | for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ | |
894 | for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i))) | |
895 | ||
896 | /** | |
897 | * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers | |
898 | * in an atomic update | |
899 | * @__state: &struct drm_atomic_state pointer | |
900 | * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor | |
901 | * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new | |
902 | * state | |
903 | * @__i: int iteration cursor, for macro-internal use | |
904 | * | |
905 | * This iterates over all DRM DP MST topology managers in an atomic update, | |
906 | * tracking only the new state. This is useful in enable functions, where we | |
907 | * need the new state the hardware should be in when the atomic commit | |
908 | * operation has completed. | |
909 | */ | |
910 | #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \ | |
911 | for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ | |
912 | for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i))) | |
913 | ||
ad7f8a1f | 914 | #endif |