]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/drm/drm_dp_mst_helper.h
x86/apic/msi: Plug non-maskable MSI affinity race
[mirror_ubuntu-bionic-kernel.git] / include / drm / drm_dp_mst_helper.h
1 /*
2 * Copyright © 2014 Red Hat.
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
24
25 #include <linux/types.h>
26 #include <drm/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
28
29 struct drm_dp_mst_branch;
30
31 /**
32 * struct drm_dp_vcpi - Virtual Channel Payload Identifier
33 * @vcpi: Virtual channel ID.
34 * @pbn: Payload Bandwidth Number for this channel
35 * @aligned_pbn: PBN aligned with slot size
36 * @num_slots: number of slots for this PBN
37 */
38 struct drm_dp_vcpi {
39 int vcpi;
40 int pbn;
41 int aligned_pbn;
42 int num_slots;
43 };
44
45 /**
46 * struct drm_dp_mst_port - MST port
47 * @kref: reference count for this port.
48 * @port_num: port number
49 * @input: if this port is an input port.
50 * @mcs: message capability status - DP 1.2 spec.
51 * @ddps: DisplayPort Device Plug Status - DP 1.2
52 * @pdt: Peer Device Type
53 * @ldps: Legacy Device Plug Status
54 * @dpcd_rev: DPCD revision of device on this port
55 * @num_sdp_streams: Number of simultaneous streams
56 * @num_sdp_stream_sinks: Number of stream sinks
57 * @available_pbn: Available bandwidth for this port.
58 * @next: link to next port on this branch device
59 * @mstb: branch device attach below this port
60 * @aux: i2c aux transport to talk to device connected to this port.
61 * @parent: branch device parent of this port
62 * @vcpi: Virtual Channel Payload info for this port.
63 * @connector: DRM connector this port is connected to.
64 * @mgr: topology manager this port lives under.
65 *
66 * This structure represents an MST port endpoint on a device somewhere
67 * in the MST topology.
68 */
69 struct drm_dp_mst_port {
70 struct kref kref;
71
72 u8 port_num;
73 bool input;
74 bool mcs;
75 bool ddps;
76 u8 pdt;
77 bool ldps;
78 u8 dpcd_rev;
79 u8 num_sdp_streams;
80 u8 num_sdp_stream_sinks;
81 uint16_t available_pbn;
82 struct list_head next;
83 struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */
84 struct drm_dp_aux aux; /* i2c bus for this port? */
85 struct drm_dp_mst_branch *parent;
86
87 struct drm_dp_vcpi vcpi;
88 struct drm_connector *connector;
89 struct drm_dp_mst_topology_mgr *mgr;
90
91 /**
92 * @cached_edid: for DP logical ports - make tiling work by ensuring
93 * that the EDID for all connectors is read immediately.
94 */
95 struct edid *cached_edid;
96 /**
97 * @has_audio: Tracks whether the sink connector to this port is
98 * audio-capable.
99 */
100 bool has_audio;
101 };
102
103 /**
104 * struct drm_dp_mst_branch - MST branch device.
105 * @kref: reference count for this port.
106 * @rad: Relative Address to talk to this branch device.
107 * @lct: Link count total to talk to this branch device.
108 * @num_ports: number of ports on the branch.
109 * @msg_slots: one bit per transmitted msg slot.
110 * @ports: linked list of ports on this branch.
111 * @port_parent: pointer to the port parent, NULL if toplevel.
112 * @mgr: topology manager for this branch device.
113 * @tx_slots: transmission slots for this device.
114 * @last_seqno: last sequence number used to talk to this.
115 * @link_address_sent: if a link address message has been sent to this device yet.
116 * @guid: guid for DP 1.2 branch device. port under this branch can be
117 * identified by port #.
118 *
119 * This structure represents an MST branch device, there is one
120 * primary branch device at the root, along with any other branches connected
121 * to downstream port of parent branches.
122 */
123 struct drm_dp_mst_branch {
124 struct kref kref;
125 u8 rad[8];
126 u8 lct;
127 int num_ports;
128
129 int msg_slots;
130 struct list_head ports;
131
132 /* list of tx ops queue for this port */
133 struct drm_dp_mst_port *port_parent;
134 struct drm_dp_mst_topology_mgr *mgr;
135
136 /* slots are protected by mstb->mgr->qlock */
137 struct drm_dp_sideband_msg_tx *tx_slots[2];
138 int last_seqno;
139 bool link_address_sent;
140
141 /* global unique identifier to identify branch devices */
142 u8 guid[16];
143 };
144
145
146 /* sideband msg header - not bit struct */
147 struct drm_dp_sideband_msg_hdr {
148 u8 lct;
149 u8 lcr;
150 u8 rad[8];
151 bool broadcast;
152 bool path_msg;
153 u8 msg_len;
154 bool somt;
155 bool eomt;
156 bool seqno;
157 };
158
159 struct drm_dp_nak_reply {
160 u8 guid[16];
161 u8 reason;
162 u8 nak_data;
163 };
164
165 struct drm_dp_link_address_ack_reply {
166 u8 guid[16];
167 u8 nports;
168 struct drm_dp_link_addr_reply_port {
169 bool input_port;
170 u8 peer_device_type;
171 u8 port_number;
172 bool mcs;
173 bool ddps;
174 bool legacy_device_plug_status;
175 u8 dpcd_revision;
176 u8 peer_guid[16];
177 u8 num_sdp_streams;
178 u8 num_sdp_stream_sinks;
179 } ports[16];
180 };
181
182 struct drm_dp_remote_dpcd_read_ack_reply {
183 u8 port_number;
184 u8 num_bytes;
185 u8 bytes[255];
186 };
187
188 struct drm_dp_remote_dpcd_write_ack_reply {
189 u8 port_number;
190 };
191
192 struct drm_dp_remote_dpcd_write_nak_reply {
193 u8 port_number;
194 u8 reason;
195 u8 bytes_written_before_failure;
196 };
197
198 struct drm_dp_remote_i2c_read_ack_reply {
199 u8 port_number;
200 u8 num_bytes;
201 u8 bytes[255];
202 };
203
204 struct drm_dp_remote_i2c_read_nak_reply {
205 u8 port_number;
206 u8 nak_reason;
207 u8 i2c_nak_transaction;
208 };
209
210 struct drm_dp_remote_i2c_write_ack_reply {
211 u8 port_number;
212 };
213
214
215 struct drm_dp_sideband_msg_rx {
216 u8 chunk[48];
217 u8 msg[256];
218 u8 curchunk_len;
219 u8 curchunk_idx; /* chunk we are parsing now */
220 u8 curchunk_hdrlen;
221 u8 curlen; /* total length of the msg */
222 bool have_somt;
223 bool have_eomt;
224 struct drm_dp_sideband_msg_hdr initial_hdr;
225 };
226
227 #define DRM_DP_MAX_SDP_STREAMS 16
228 struct drm_dp_allocate_payload {
229 u8 port_number;
230 u8 number_sdp_streams;
231 u8 vcpi;
232 u16 pbn;
233 u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
234 };
235
236 struct drm_dp_allocate_payload_ack_reply {
237 u8 port_number;
238 u8 vcpi;
239 u16 allocated_pbn;
240 };
241
242 struct drm_dp_connection_status_notify {
243 u8 guid[16];
244 u8 port_number;
245 bool legacy_device_plug_status;
246 bool displayport_device_plug_status;
247 bool message_capability_status;
248 bool input_port;
249 u8 peer_device_type;
250 };
251
252 struct drm_dp_remote_dpcd_read {
253 u8 port_number;
254 u32 dpcd_address;
255 u8 num_bytes;
256 };
257
258 struct drm_dp_remote_dpcd_write {
259 u8 port_number;
260 u32 dpcd_address;
261 u8 num_bytes;
262 u8 *bytes;
263 };
264
265 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
266 struct drm_dp_remote_i2c_read {
267 u8 num_transactions;
268 u8 port_number;
269 struct {
270 u8 i2c_dev_id;
271 u8 num_bytes;
272 u8 *bytes;
273 u8 no_stop_bit;
274 u8 i2c_transaction_delay;
275 } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
276 u8 read_i2c_device_id;
277 u8 num_bytes_read;
278 };
279
280 struct drm_dp_remote_i2c_write {
281 u8 port_number;
282 u8 write_i2c_device_id;
283 u8 num_bytes;
284 u8 *bytes;
285 };
286
287 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
288 struct drm_dp_port_number_req {
289 u8 port_number;
290 };
291
292 struct drm_dp_enum_path_resources_ack_reply {
293 u8 port_number;
294 u16 full_payload_bw_number;
295 u16 avail_payload_bw_number;
296 };
297
298 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
299 struct drm_dp_port_number_rep {
300 u8 port_number;
301 };
302
303 struct drm_dp_query_payload {
304 u8 port_number;
305 u8 vcpi;
306 };
307
308 struct drm_dp_resource_status_notify {
309 u8 port_number;
310 u8 guid[16];
311 u16 available_pbn;
312 };
313
314 struct drm_dp_query_payload_ack_reply {
315 u8 port_number;
316 u16 allocated_pbn;
317 };
318
319 struct drm_dp_sideband_msg_req_body {
320 u8 req_type;
321 union ack_req {
322 struct drm_dp_connection_status_notify conn_stat;
323 struct drm_dp_port_number_req port_num;
324 struct drm_dp_resource_status_notify resource_stat;
325
326 struct drm_dp_query_payload query_payload;
327 struct drm_dp_allocate_payload allocate_payload;
328
329 struct drm_dp_remote_dpcd_read dpcd_read;
330 struct drm_dp_remote_dpcd_write dpcd_write;
331
332 struct drm_dp_remote_i2c_read i2c_read;
333 struct drm_dp_remote_i2c_write i2c_write;
334 } u;
335 };
336
337 struct drm_dp_sideband_msg_reply_body {
338 u8 reply_type;
339 u8 req_type;
340 union ack_replies {
341 struct drm_dp_nak_reply nak;
342 struct drm_dp_link_address_ack_reply link_addr;
343 struct drm_dp_port_number_rep port_number;
344
345 struct drm_dp_enum_path_resources_ack_reply path_resources;
346 struct drm_dp_allocate_payload_ack_reply allocate_payload;
347 struct drm_dp_query_payload_ack_reply query_payload;
348
349 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
350 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
351 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
352
353 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
354 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
355 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
356 } u;
357 };
358
359 /* msg is queued to be put into a slot */
360 #define DRM_DP_SIDEBAND_TX_QUEUED 0
361 /* msg has started transmitting on a slot - still on msgq */
362 #define DRM_DP_SIDEBAND_TX_START_SEND 1
363 /* msg has finished transmitting on a slot - removed from msgq only in slot */
364 #define DRM_DP_SIDEBAND_TX_SENT 2
365 /* msg has received a response - removed from slot */
366 #define DRM_DP_SIDEBAND_TX_RX 3
367 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
368
369 struct drm_dp_sideband_msg_tx {
370 u8 msg[256];
371 u8 chunk[48];
372 u8 cur_offset;
373 u8 cur_len;
374 struct drm_dp_mst_branch *dst;
375 struct list_head next;
376 int seqno;
377 int state;
378 bool path_msg;
379 struct drm_dp_sideband_msg_reply_body reply;
380 };
381
382 /* sideband msg handler */
383 struct drm_dp_mst_topology_mgr;
384 struct drm_dp_mst_topology_cbs {
385 /* create a connector for a port */
386 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
387 void (*register_connector)(struct drm_connector *connector);
388 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
389 struct drm_connector *connector);
390 void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
391
392 };
393
394 #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
395
396 #define DP_PAYLOAD_LOCAL 1
397 #define DP_PAYLOAD_REMOTE 2
398 #define DP_PAYLOAD_DELETE_LOCAL 3
399
400 struct drm_dp_payload {
401 int payload_state;
402 int start_slot;
403 int num_slots;
404 int vcpi;
405 };
406
407 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
408
409 struct drm_dp_mst_topology_state {
410 struct drm_private_state base;
411 int avail_slots;
412 struct drm_atomic_state *state;
413 struct drm_dp_mst_topology_mgr *mgr;
414 };
415
416 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
417
418 /**
419 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
420 *
421 * This struct represents the toplevel displayport MST topology manager.
422 * There should be one instance of this for every MST capable DP connector
423 * on the GPU.
424 */
425 struct drm_dp_mst_topology_mgr {
426 /**
427 * @base: Base private object for atomic
428 */
429 struct drm_private_obj base;
430
431 /**
432 * @dev: device pointer for adding i2c devices etc.
433 */
434 struct drm_device *dev;
435 /**
436 * @cbs: callbacks for connector addition and destruction.
437 */
438 const struct drm_dp_mst_topology_cbs *cbs;
439 /**
440 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
441 * in one go.
442 */
443 int max_dpcd_transaction_bytes;
444 /**
445 * @aux: AUX channel for the DP MST connector this topolgy mgr is
446 * controlling.
447 */
448 struct drm_dp_aux *aux;
449 /**
450 * @max_payloads: maximum number of payloads the GPU can generate.
451 */
452 int max_payloads;
453 /**
454 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
455 * to build the MST connector path value.
456 */
457 int conn_base_id;
458
459 /**
460 * @down_rep_recv: Message receiver state for down replies. This and
461 * @up_req_recv are only ever access from the work item, which is
462 * serialised.
463 */
464 struct drm_dp_sideband_msg_rx down_rep_recv;
465 /**
466 * @up_req_recv: Message receiver state for up requests. This and
467 * @down_rep_recv are only ever access from the work item, which is
468 * serialised.
469 */
470 struct drm_dp_sideband_msg_rx up_req_recv;
471
472 /**
473 * @lock: protects mst state, primary, dpcd.
474 */
475 struct mutex lock;
476
477 /**
478 * @mst_state: If this manager is enabled for an MST capable port. False
479 * if no MST sink/branch devices is connected.
480 */
481 bool mst_state;
482 /**
483 * @mst_primary: Pointer to the primary/first branch device.
484 */
485 struct drm_dp_mst_branch *mst_primary;
486
487 /**
488 * @dpcd: Cache of DPCD for primary port.
489 */
490 u8 dpcd[DP_RECEIVER_CAP_SIZE];
491 /**
492 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
493 */
494 u8 sink_count;
495 /**
496 * @pbn_div: PBN to slots divisor.
497 */
498 int pbn_div;
499
500 /**
501 * @state: State information for topology manager
502 */
503 struct drm_dp_mst_topology_state *state;
504
505 /**
506 * @funcs: Atomic helper callbacks
507 */
508 const struct drm_private_state_funcs *funcs;
509
510 /**
511 * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and
512 * &drm_dp_sideband_msg_tx.state once they are queued
513 */
514 struct mutex qlock;
515 /**
516 * @tx_msg_downq: List of pending down replies.
517 */
518 struct list_head tx_msg_downq;
519
520 /**
521 * @payload_lock: Protect payload information.
522 */
523 struct mutex payload_lock;
524 /**
525 * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
526 * VCPI structure itself is &drm_dp_mst_port.vcpi.
527 */
528 struct drm_dp_vcpi **proposed_vcpis;
529 /**
530 * @payloads: Array of payloads.
531 */
532 struct drm_dp_payload *payloads;
533 /**
534 * @payload_mask: Elements of @payloads actually in use. Since
535 * reallocation of active outputs isn't possible gaps can be created by
536 * disabling outputs out of order compared to how they've been enabled.
537 */
538 unsigned long payload_mask;
539 /**
540 * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
541 */
542 unsigned long vcpi_mask;
543
544 /**
545 * @tx_waitq: Wait to queue stall for the tx worker.
546 */
547 wait_queue_head_t tx_waitq;
548 /**
549 * @work: Probe work.
550 */
551 struct work_struct work;
552 /**
553 * @tx_work: Sideband transmit worker. This can nest within the main
554 * @work worker for each transaction @work launches.
555 */
556 struct work_struct tx_work;
557
558 /**
559 * @destroy_connector_list: List of to be destroyed connectors.
560 */
561 struct list_head destroy_connector_list;
562 /**
563 * @destroy_connector_lock: Protects @connector_list.
564 */
565 struct mutex destroy_connector_lock;
566 /**
567 * @destroy_connector_work: Work item to destroy connectors. Needed to
568 * avoid locking inversion.
569 */
570 struct work_struct destroy_connector_work;
571 };
572
573 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
574 struct drm_device *dev, struct drm_dp_aux *aux,
575 int max_dpcd_transaction_bytes,
576 int max_payloads, int conn_base_id);
577
578 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
579
580
581 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
582
583
584 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
585
586
587 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
588
589 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
590 struct drm_dp_mst_port *port);
591 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
592
593
594 int drm_dp_calc_pbn_mode(int clock, int bpp);
595
596
597 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
598 struct drm_dp_mst_port *port, int pbn, int slots);
599
600 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
601
602
603 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
604
605
606 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
607 struct drm_dp_mst_port *port);
608
609
610 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
611 int pbn);
612
613
614 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
615
616
617 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
618
619 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
620
621 void drm_dp_mst_dump_topology(struct seq_file *m,
622 struct drm_dp_mst_topology_mgr *mgr);
623
624 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
625 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
626 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
627 struct drm_dp_mst_topology_mgr *mgr);
628 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
629 struct drm_dp_mst_topology_mgr *mgr,
630 struct drm_dp_mst_port *port, int pbn);
631 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
632 struct drm_dp_mst_topology_mgr *mgr,
633 int slots);
634 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
635 struct drm_dp_mst_port *port, bool power_up);
636
637 #endif