2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/seq_file.h>
29 #include <linux/i2c.h>
30 #include <drm/drm_dp_mst_helper.h>
33 #include <drm/drm_fixed.h>
38 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
39 * protocol. The helpers contain a topology manager and bandwidth manager.
40 * The helpers encapsulate the sending and received of sideband msgs.
42 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr
*mgr
,
44 static int test_calc_pbn_mode(void);
46 static void drm_dp_put_port(struct drm_dp_mst_port
*port
);
48 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr
*mgr
,
50 struct drm_dp_payload
*payload
);
52 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr
*mgr
,
53 struct drm_dp_mst_port
*port
,
54 int offset
, int size
, u8
*bytes
);
56 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
57 struct drm_dp_mst_branch
*mstb
);
58 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr
*mgr
,
59 struct drm_dp_mst_branch
*mstb
,
60 struct drm_dp_mst_port
*port
);
61 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr
*mgr
,
64 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux
*aux
);
65 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux
*aux
);
66 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr
*mgr
);
67 /* sideband msg handling */
68 static u8
drm_dp_msg_header_crc4(const uint8_t *data
, size_t num_nibbles
)
73 int number_of_bits
= num_nibbles
* 4;
76 while (number_of_bits
!= 0) {
79 remainder
|= (data
[array_index
] & bitmask
) >> bitshift
;
87 if ((remainder
& 0x10) == 0x10)
92 while (number_of_bits
!= 0) {
95 if ((remainder
& 0x10) != 0)
102 static u8
drm_dp_msg_data_crc4(const uint8_t *data
, u8 number_of_bytes
)
107 int number_of_bits
= number_of_bytes
* 8;
110 while (number_of_bits
!= 0) {
113 remainder
|= (data
[array_index
] & bitmask
) >> bitshift
;
121 if ((remainder
& 0x100) == 0x100)
126 while (number_of_bits
!= 0) {
129 if ((remainder
& 0x100) != 0)
133 return remainder
& 0xff;
135 static inline u8
drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr
*hdr
)
138 size
+= (hdr
->lct
/ 2);
142 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr
*hdr
,
148 buf
[idx
++] = ((hdr
->lct
& 0xf) << 4) | (hdr
->lcr
& 0xf);
149 for (i
= 0; i
< (hdr
->lct
/ 2); i
++)
150 buf
[idx
++] = hdr
->rad
[i
];
151 buf
[idx
++] = (hdr
->broadcast
<< 7) | (hdr
->path_msg
<< 6) |
152 (hdr
->msg_len
& 0x3f);
153 buf
[idx
++] = (hdr
->somt
<< 7) | (hdr
->eomt
<< 6) | (hdr
->seqno
<< 4);
155 crc4
= drm_dp_msg_header_crc4(buf
, (idx
* 2) - 1);
156 buf
[idx
- 1] |= (crc4
& 0xf);
161 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr
*hdr
,
162 u8
*buf
, int buflen
, u8
*hdrlen
)
171 len
+= ((buf
[0] & 0xf0) >> 4) / 2;
174 crc4
= drm_dp_msg_header_crc4(buf
, (len
* 2) - 1);
176 if ((crc4
& 0xf) != (buf
[len
- 1] & 0xf)) {
177 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4
, buf
[len
- 1]);
181 hdr
->lct
= (buf
[0] & 0xf0) >> 4;
182 hdr
->lcr
= (buf
[0] & 0xf);
184 for (i
= 0; i
< (hdr
->lct
/ 2); i
++)
185 hdr
->rad
[i
] = buf
[idx
++];
186 hdr
->broadcast
= (buf
[idx
] >> 7) & 0x1;
187 hdr
->path_msg
= (buf
[idx
] >> 6) & 0x1;
188 hdr
->msg_len
= buf
[idx
] & 0x3f;
190 hdr
->somt
= (buf
[idx
] >> 7) & 0x1;
191 hdr
->eomt
= (buf
[idx
] >> 6) & 0x1;
192 hdr
->seqno
= (buf
[idx
] >> 4) & 0x1;
198 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body
*req
,
199 struct drm_dp_sideband_msg_tx
*raw
)
204 buf
[idx
++] = req
->req_type
& 0x7f;
206 switch (req
->req_type
) {
207 case DP_ENUM_PATH_RESOURCES
:
208 buf
[idx
] = (req
->u
.port_num
.port_number
& 0xf) << 4;
211 case DP_ALLOCATE_PAYLOAD
:
212 buf
[idx
] = (req
->u
.allocate_payload
.port_number
& 0xf) << 4 |
213 (req
->u
.allocate_payload
.number_sdp_streams
& 0xf);
215 buf
[idx
] = (req
->u
.allocate_payload
.vcpi
& 0x7f);
217 buf
[idx
] = (req
->u
.allocate_payload
.pbn
>> 8);
219 buf
[idx
] = (req
->u
.allocate_payload
.pbn
& 0xff);
221 for (i
= 0; i
< req
->u
.allocate_payload
.number_sdp_streams
/ 2; i
++) {
222 buf
[idx
] = ((req
->u
.allocate_payload
.sdp_stream_sink
[i
* 2] & 0xf) << 4) |
223 (req
->u
.allocate_payload
.sdp_stream_sink
[i
* 2 + 1] & 0xf);
226 if (req
->u
.allocate_payload
.number_sdp_streams
& 1) {
227 i
= req
->u
.allocate_payload
.number_sdp_streams
- 1;
228 buf
[idx
] = (req
->u
.allocate_payload
.sdp_stream_sink
[i
] & 0xf) << 4;
232 case DP_QUERY_PAYLOAD
:
233 buf
[idx
] = (req
->u
.query_payload
.port_number
& 0xf) << 4;
235 buf
[idx
] = (req
->u
.query_payload
.vcpi
& 0x7f);
238 case DP_REMOTE_DPCD_READ
:
239 buf
[idx
] = (req
->u
.dpcd_read
.port_number
& 0xf) << 4;
240 buf
[idx
] |= ((req
->u
.dpcd_read
.dpcd_address
& 0xf0000) >> 16) & 0xf;
242 buf
[idx
] = (req
->u
.dpcd_read
.dpcd_address
& 0xff00) >> 8;
244 buf
[idx
] = (req
->u
.dpcd_read
.dpcd_address
& 0xff);
246 buf
[idx
] = (req
->u
.dpcd_read
.num_bytes
);
250 case DP_REMOTE_DPCD_WRITE
:
251 buf
[idx
] = (req
->u
.dpcd_write
.port_number
& 0xf) << 4;
252 buf
[idx
] |= ((req
->u
.dpcd_write
.dpcd_address
& 0xf0000) >> 16) & 0xf;
254 buf
[idx
] = (req
->u
.dpcd_write
.dpcd_address
& 0xff00) >> 8;
256 buf
[idx
] = (req
->u
.dpcd_write
.dpcd_address
& 0xff);
258 buf
[idx
] = (req
->u
.dpcd_write
.num_bytes
);
260 memcpy(&buf
[idx
], req
->u
.dpcd_write
.bytes
, req
->u
.dpcd_write
.num_bytes
);
261 idx
+= req
->u
.dpcd_write
.num_bytes
;
263 case DP_REMOTE_I2C_READ
:
264 buf
[idx
] = (req
->u
.i2c_read
.port_number
& 0xf) << 4;
265 buf
[idx
] |= (req
->u
.i2c_read
.num_transactions
& 0x3);
267 for (i
= 0; i
< (req
->u
.i2c_read
.num_transactions
& 0x3); i
++) {
268 buf
[idx
] = req
->u
.i2c_read
.transactions
[i
].i2c_dev_id
& 0x7f;
270 buf
[idx
] = req
->u
.i2c_read
.transactions
[i
].num_bytes
;
272 memcpy(&buf
[idx
], req
->u
.i2c_read
.transactions
[i
].bytes
, req
->u
.i2c_read
.transactions
[i
].num_bytes
);
273 idx
+= req
->u
.i2c_read
.transactions
[i
].num_bytes
;
275 buf
[idx
] = (req
->u
.i2c_read
.transactions
[i
].no_stop_bit
& 0x1) << 5;
276 buf
[idx
] |= (req
->u
.i2c_read
.transactions
[i
].i2c_transaction_delay
& 0xf);
279 buf
[idx
] = (req
->u
.i2c_read
.read_i2c_device_id
) & 0x7f;
281 buf
[idx
] = (req
->u
.i2c_read
.num_bytes_read
);
285 case DP_REMOTE_I2C_WRITE
:
286 buf
[idx
] = (req
->u
.i2c_write
.port_number
& 0xf) << 4;
288 buf
[idx
] = (req
->u
.i2c_write
.write_i2c_device_id
) & 0x7f;
290 buf
[idx
] = (req
->u
.i2c_write
.num_bytes
);
292 memcpy(&buf
[idx
], req
->u
.i2c_write
.bytes
, req
->u
.i2c_write
.num_bytes
);
293 idx
+= req
->u
.i2c_write
.num_bytes
;
299 static void drm_dp_crc_sideband_chunk_req(u8
*msg
, u8 len
)
302 crc4
= drm_dp_msg_data_crc4(msg
, len
);
306 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body
*rep
,
307 struct drm_dp_sideband_msg_tx
*raw
)
312 buf
[idx
++] = (rep
->reply_type
& 0x1) << 7 | (rep
->req_type
& 0x7f);
317 /* this adds a chunk of msg to the builder to get the final msg */
318 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx
*msg
,
319 u8
*replybuf
, u8 replybuflen
, bool hdr
)
326 struct drm_dp_sideband_msg_hdr recv_hdr
;
327 ret
= drm_dp_decode_sideband_msg_hdr(&recv_hdr
, replybuf
, replybuflen
, &hdrlen
);
329 print_hex_dump(KERN_DEBUG
, "failed hdr", DUMP_PREFIX_NONE
, 16, 1, replybuf
, replybuflen
, false);
334 * ignore out-of-order messages or messages that are part of a
337 if (!recv_hdr
.somt
&& !msg
->have_somt
)
340 /* get length contained in this portion */
341 msg
->curchunk_len
= recv_hdr
.msg_len
;
342 msg
->curchunk_hdrlen
= hdrlen
;
344 /* we have already gotten an somt - don't bother parsing */
345 if (recv_hdr
.somt
&& msg
->have_somt
)
349 memcpy(&msg
->initial_hdr
, &recv_hdr
, sizeof(struct drm_dp_sideband_msg_hdr
));
350 msg
->have_somt
= true;
353 msg
->have_eomt
= true;
355 /* copy the bytes for the remainder of this header chunk */
356 msg
->curchunk_idx
= min(msg
->curchunk_len
, (u8
)(replybuflen
- hdrlen
));
357 memcpy(&msg
->chunk
[0], replybuf
+ hdrlen
, msg
->curchunk_idx
);
359 memcpy(&msg
->chunk
[msg
->curchunk_idx
], replybuf
, replybuflen
);
360 msg
->curchunk_idx
+= replybuflen
;
363 if (msg
->curchunk_idx
>= msg
->curchunk_len
) {
365 crc4
= drm_dp_msg_data_crc4(msg
->chunk
, msg
->curchunk_len
- 1);
366 /* copy chunk into bigger msg */
367 memcpy(&msg
->msg
[msg
->curlen
], msg
->chunk
, msg
->curchunk_len
- 1);
368 msg
->curlen
+= msg
->curchunk_len
- 1;
373 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx
*raw
,
374 struct drm_dp_sideband_msg_reply_body
*repmsg
)
378 memcpy(repmsg
->u
.link_addr
.guid
, &raw
->msg
[idx
], 16);
380 repmsg
->u
.link_addr
.nports
= raw
->msg
[idx
] & 0xf;
382 if (idx
> raw
->curlen
)
384 for (i
= 0; i
< repmsg
->u
.link_addr
.nports
; i
++) {
385 if (raw
->msg
[idx
] & 0x80)
386 repmsg
->u
.link_addr
.ports
[i
].input_port
= 1;
388 repmsg
->u
.link_addr
.ports
[i
].peer_device_type
= (raw
->msg
[idx
] >> 4) & 0x7;
389 repmsg
->u
.link_addr
.ports
[i
].port_number
= (raw
->msg
[idx
] & 0xf);
392 if (idx
> raw
->curlen
)
394 repmsg
->u
.link_addr
.ports
[i
].mcs
= (raw
->msg
[idx
] >> 7) & 0x1;
395 repmsg
->u
.link_addr
.ports
[i
].ddps
= (raw
->msg
[idx
] >> 6) & 0x1;
396 if (repmsg
->u
.link_addr
.ports
[i
].input_port
== 0)
397 repmsg
->u
.link_addr
.ports
[i
].legacy_device_plug_status
= (raw
->msg
[idx
] >> 5) & 0x1;
399 if (idx
> raw
->curlen
)
401 if (repmsg
->u
.link_addr
.ports
[i
].input_port
== 0) {
402 repmsg
->u
.link_addr
.ports
[i
].dpcd_revision
= (raw
->msg
[idx
]);
404 if (idx
> raw
->curlen
)
406 memcpy(repmsg
->u
.link_addr
.ports
[i
].peer_guid
, &raw
->msg
[idx
], 16);
408 if (idx
> raw
->curlen
)
410 repmsg
->u
.link_addr
.ports
[i
].num_sdp_streams
= (raw
->msg
[idx
] >> 4) & 0xf;
411 repmsg
->u
.link_addr
.ports
[i
].num_sdp_stream_sinks
= (raw
->msg
[idx
] & 0xf);
415 if (idx
> raw
->curlen
)
421 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx
, raw
->curlen
);
425 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx
*raw
,
426 struct drm_dp_sideband_msg_reply_body
*repmsg
)
429 repmsg
->u
.remote_dpcd_read_ack
.port_number
= raw
->msg
[idx
] & 0xf;
431 if (idx
> raw
->curlen
)
433 repmsg
->u
.remote_dpcd_read_ack
.num_bytes
= raw
->msg
[idx
];
434 if (idx
> raw
->curlen
)
437 memcpy(repmsg
->u
.remote_dpcd_read_ack
.bytes
, &raw
->msg
[idx
], repmsg
->u
.remote_dpcd_read_ack
.num_bytes
);
440 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx
, raw
->curlen
);
444 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx
*raw
,
445 struct drm_dp_sideband_msg_reply_body
*repmsg
)
448 repmsg
->u
.remote_dpcd_write_ack
.port_number
= raw
->msg
[idx
] & 0xf;
450 if (idx
> raw
->curlen
)
454 DRM_DEBUG_KMS("parse length fail %d %d\n", idx
, raw
->curlen
);
458 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx
*raw
,
459 struct drm_dp_sideband_msg_reply_body
*repmsg
)
463 repmsg
->u
.remote_i2c_read_ack
.port_number
= (raw
->msg
[idx
] & 0xf);
465 if (idx
> raw
->curlen
)
467 repmsg
->u
.remote_i2c_read_ack
.num_bytes
= raw
->msg
[idx
];
470 memcpy(repmsg
->u
.remote_i2c_read_ack
.bytes
, &raw
->msg
[idx
], repmsg
->u
.remote_i2c_read_ack
.num_bytes
);
473 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx
, raw
->curlen
);
477 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx
*raw
,
478 struct drm_dp_sideband_msg_reply_body
*repmsg
)
481 repmsg
->u
.path_resources
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
483 if (idx
> raw
->curlen
)
485 repmsg
->u
.path_resources
.full_payload_bw_number
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
487 if (idx
> raw
->curlen
)
489 repmsg
->u
.path_resources
.avail_payload_bw_number
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
491 if (idx
> raw
->curlen
)
495 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx
, raw
->curlen
);
499 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx
*raw
,
500 struct drm_dp_sideband_msg_reply_body
*repmsg
)
503 repmsg
->u
.allocate_payload
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
505 if (idx
> raw
->curlen
)
507 repmsg
->u
.allocate_payload
.vcpi
= raw
->msg
[idx
];
509 if (idx
> raw
->curlen
)
511 repmsg
->u
.allocate_payload
.allocated_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
513 if (idx
> raw
->curlen
)
517 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx
, raw
->curlen
);
521 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx
*raw
,
522 struct drm_dp_sideband_msg_reply_body
*repmsg
)
525 repmsg
->u
.query_payload
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
527 if (idx
> raw
->curlen
)
529 repmsg
->u
.query_payload
.allocated_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+ 1]);
531 if (idx
> raw
->curlen
)
535 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx
, raw
->curlen
);
539 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx
*raw
,
540 struct drm_dp_sideband_msg_reply_body
*msg
)
542 memset(msg
, 0, sizeof(*msg
));
543 msg
->reply_type
= (raw
->msg
[0] & 0x80) >> 7;
544 msg
->req_type
= (raw
->msg
[0] & 0x7f);
546 if (msg
->reply_type
) {
547 memcpy(msg
->u
.nak
.guid
, &raw
->msg
[1], 16);
548 msg
->u
.nak
.reason
= raw
->msg
[17];
549 msg
->u
.nak
.nak_data
= raw
->msg
[18];
553 switch (msg
->req_type
) {
554 case DP_LINK_ADDRESS
:
555 return drm_dp_sideband_parse_link_address(raw
, msg
);
556 case DP_QUERY_PAYLOAD
:
557 return drm_dp_sideband_parse_query_payload_ack(raw
, msg
);
558 case DP_REMOTE_DPCD_READ
:
559 return drm_dp_sideband_parse_remote_dpcd_read(raw
, msg
);
560 case DP_REMOTE_DPCD_WRITE
:
561 return drm_dp_sideband_parse_remote_dpcd_write(raw
, msg
);
562 case DP_REMOTE_I2C_READ
:
563 return drm_dp_sideband_parse_remote_i2c_read_ack(raw
, msg
);
564 case DP_ENUM_PATH_RESOURCES
:
565 return drm_dp_sideband_parse_enum_path_resources_ack(raw
, msg
);
566 case DP_ALLOCATE_PAYLOAD
:
567 return drm_dp_sideband_parse_allocate_payload_ack(raw
, msg
);
569 DRM_ERROR("Got unknown reply 0x%02x\n", msg
->req_type
);
574 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx
*raw
,
575 struct drm_dp_sideband_msg_req_body
*msg
)
579 msg
->u
.conn_stat
.port_number
= (raw
->msg
[idx
] & 0xf0) >> 4;
581 if (idx
> raw
->curlen
)
584 memcpy(msg
->u
.conn_stat
.guid
, &raw
->msg
[idx
], 16);
586 if (idx
> raw
->curlen
)
589 msg
->u
.conn_stat
.legacy_device_plug_status
= (raw
->msg
[idx
] >> 6) & 0x1;
590 msg
->u
.conn_stat
.displayport_device_plug_status
= (raw
->msg
[idx
] >> 5) & 0x1;
591 msg
->u
.conn_stat
.message_capability_status
= (raw
->msg
[idx
] >> 4) & 0x1;
592 msg
->u
.conn_stat
.input_port
= (raw
->msg
[idx
] >> 3) & 0x1;
593 msg
->u
.conn_stat
.peer_device_type
= (raw
->msg
[idx
] & 0x7);
597 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx
, raw
->curlen
);
601 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx
*raw
,
602 struct drm_dp_sideband_msg_req_body
*msg
)
606 msg
->u
.resource_stat
.port_number
= (raw
->msg
[idx
] & 0xf0) >> 4;
608 if (idx
> raw
->curlen
)
611 memcpy(msg
->u
.resource_stat
.guid
, &raw
->msg
[idx
], 16);
613 if (idx
> raw
->curlen
)
616 msg
->u
.resource_stat
.available_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+ 1]);
620 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx
, raw
->curlen
);
624 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx
*raw
,
625 struct drm_dp_sideband_msg_req_body
*msg
)
627 memset(msg
, 0, sizeof(*msg
));
628 msg
->req_type
= (raw
->msg
[0] & 0x7f);
630 switch (msg
->req_type
) {
631 case DP_CONNECTION_STATUS_NOTIFY
:
632 return drm_dp_sideband_parse_connection_status_notify(raw
, msg
);
633 case DP_RESOURCE_STATUS_NOTIFY
:
634 return drm_dp_sideband_parse_resource_status_notify(raw
, msg
);
636 DRM_ERROR("Got unknown request 0x%02x\n", msg
->req_type
);
641 static int build_dpcd_write(struct drm_dp_sideband_msg_tx
*msg
, u8 port_num
, u32 offset
, u8 num_bytes
, u8
*bytes
)
643 struct drm_dp_sideband_msg_req_body req
;
645 req
.req_type
= DP_REMOTE_DPCD_WRITE
;
646 req
.u
.dpcd_write
.port_number
= port_num
;
647 req
.u
.dpcd_write
.dpcd_address
= offset
;
648 req
.u
.dpcd_write
.num_bytes
= num_bytes
;
649 req
.u
.dpcd_write
.bytes
= bytes
;
650 drm_dp_encode_sideband_req(&req
, msg
);
655 static int build_link_address(struct drm_dp_sideband_msg_tx
*msg
)
657 struct drm_dp_sideband_msg_req_body req
;
659 req
.req_type
= DP_LINK_ADDRESS
;
660 drm_dp_encode_sideband_req(&req
, msg
);
664 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx
*msg
, int port_num
)
666 struct drm_dp_sideband_msg_req_body req
;
668 req
.req_type
= DP_ENUM_PATH_RESOURCES
;
669 req
.u
.port_num
.port_number
= port_num
;
670 drm_dp_encode_sideband_req(&req
, msg
);
671 msg
->path_msg
= true;
675 static int build_allocate_payload(struct drm_dp_sideband_msg_tx
*msg
, int port_num
,
676 u8 vcpi
, uint16_t pbn
,
677 u8 number_sdp_streams
,
680 struct drm_dp_sideband_msg_req_body req
;
681 memset(&req
, 0, sizeof(req
));
682 req
.req_type
= DP_ALLOCATE_PAYLOAD
;
683 req
.u
.allocate_payload
.port_number
= port_num
;
684 req
.u
.allocate_payload
.vcpi
= vcpi
;
685 req
.u
.allocate_payload
.pbn
= pbn
;
686 req
.u
.allocate_payload
.number_sdp_streams
= number_sdp_streams
;
687 memcpy(req
.u
.allocate_payload
.sdp_stream_sink
, sdp_stream_sink
,
689 drm_dp_encode_sideband_req(&req
, msg
);
690 msg
->path_msg
= true;
694 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr
*mgr
,
695 struct drm_dp_vcpi
*vcpi
)
699 mutex_lock(&mgr
->payload_lock
);
700 ret
= find_first_zero_bit(&mgr
->payload_mask
, mgr
->max_payloads
+ 1);
701 if (ret
> mgr
->max_payloads
) {
703 DRM_DEBUG_KMS("out of payload ids %d\n", ret
);
707 vcpi_ret
= find_first_zero_bit(&mgr
->vcpi_mask
, mgr
->max_payloads
+ 1);
708 if (vcpi_ret
> mgr
->max_payloads
) {
710 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret
);
714 set_bit(ret
, &mgr
->payload_mask
);
715 set_bit(vcpi_ret
, &mgr
->vcpi_mask
);
716 vcpi
->vcpi
= vcpi_ret
+ 1;
717 mgr
->proposed_vcpis
[ret
- 1] = vcpi
;
719 mutex_unlock(&mgr
->payload_lock
);
723 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr
*mgr
,
730 mutex_lock(&mgr
->payload_lock
);
731 DRM_DEBUG_KMS("putting payload %d\n", vcpi
);
732 clear_bit(vcpi
- 1, &mgr
->vcpi_mask
);
734 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
735 if (mgr
->proposed_vcpis
[i
])
736 if (mgr
->proposed_vcpis
[i
]->vcpi
== vcpi
) {
737 mgr
->proposed_vcpis
[i
] = NULL
;
738 clear_bit(i
+ 1, &mgr
->payload_mask
);
741 mutex_unlock(&mgr
->payload_lock
);
744 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr
*mgr
,
745 struct drm_dp_sideband_msg_tx
*txmsg
)
750 * All updates to txmsg->state are protected by mgr->qlock, and the two
751 * cases we check here are terminal states. For those the barriers
752 * provided by the wake_up/wait_event pair are enough.
754 state
= READ_ONCE(txmsg
->state
);
755 return (state
== DRM_DP_SIDEBAND_TX_RX
||
756 state
== DRM_DP_SIDEBAND_TX_TIMEOUT
);
759 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch
*mstb
,
760 struct drm_dp_sideband_msg_tx
*txmsg
)
762 struct drm_dp_mst_topology_mgr
*mgr
= mstb
->mgr
;
765 ret
= wait_event_timeout(mgr
->tx_waitq
,
766 check_txmsg_state(mgr
, txmsg
),
768 mutex_lock(&mstb
->mgr
->qlock
);
770 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_TIMEOUT
) {
775 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg
, txmsg
->state
, txmsg
->seqno
);
777 /* dump some state */
781 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_QUEUED
||
782 txmsg
->state
== DRM_DP_SIDEBAND_TX_START_SEND
) {
783 list_del(&txmsg
->next
);
786 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_START_SEND
||
787 txmsg
->state
== DRM_DP_SIDEBAND_TX_SENT
) {
788 mstb
->tx_slots
[txmsg
->seqno
] = NULL
;
792 mutex_unlock(&mgr
->qlock
);
797 static struct drm_dp_mst_branch
*drm_dp_add_mst_branch_device(u8 lct
, u8
*rad
)
799 struct drm_dp_mst_branch
*mstb
;
801 mstb
= kzalloc(sizeof(*mstb
), GFP_KERNEL
);
807 memcpy(mstb
->rad
, rad
, lct
/ 2);
808 INIT_LIST_HEAD(&mstb
->ports
);
809 kref_init(&mstb
->kref
);
813 static void drm_dp_free_mst_port(struct kref
*kref
);
815 static void drm_dp_free_mst_branch_device(struct kref
*kref
)
817 struct drm_dp_mst_branch
*mstb
= container_of(kref
, struct drm_dp_mst_branch
, kref
);
818 if (mstb
->port_parent
) {
819 if (list_empty(&mstb
->port_parent
->next
))
820 kref_put(&mstb
->port_parent
->kref
, drm_dp_free_mst_port
);
825 static void drm_dp_destroy_mst_branch_device(struct kref
*kref
)
827 struct drm_dp_mst_branch
*mstb
= container_of(kref
, struct drm_dp_mst_branch
, kref
);
828 struct drm_dp_mst_port
*port
, *tmp
;
829 bool wake_tx
= false;
832 * init kref again to be used by ports to remove mst branch when it is
837 if (mstb
->port_parent
&& list_empty(&mstb
->port_parent
->next
))
838 kref_get(&mstb
->port_parent
->kref
);
841 * destroy all ports - don't need lock
842 * as there are no more references to the mst branch
843 * device at this point.
845 list_for_each_entry_safe(port
, tmp
, &mstb
->ports
, next
) {
846 list_del(&port
->next
);
847 drm_dp_put_port(port
);
850 /* drop any tx slots msg */
851 mutex_lock(&mstb
->mgr
->qlock
);
852 if (mstb
->tx_slots
[0]) {
853 mstb
->tx_slots
[0]->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
854 mstb
->tx_slots
[0] = NULL
;
857 if (mstb
->tx_slots
[1]) {
858 mstb
->tx_slots
[1]->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
859 mstb
->tx_slots
[1] = NULL
;
862 mutex_unlock(&mstb
->mgr
->qlock
);
865 wake_up_all(&mstb
->mgr
->tx_waitq
);
867 kref_put(kref
, drm_dp_free_mst_branch_device
);
870 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch
*mstb
)
872 kref_put(&mstb
->kref
, drm_dp_destroy_mst_branch_device
);
876 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port
*port
, int old_pdt
)
878 struct drm_dp_mst_branch
*mstb
;
881 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
882 case DP_PEER_DEVICE_SST_SINK
:
883 /* remove i2c over sideband */
884 drm_dp_mst_unregister_i2c_bus(&port
->aux
);
886 case DP_PEER_DEVICE_MST_BRANCHING
:
889 drm_dp_put_mst_branch_device(mstb
);
894 static void drm_dp_destroy_port(struct kref
*kref
)
896 struct drm_dp_mst_port
*port
= container_of(kref
, struct drm_dp_mst_port
, kref
);
897 struct drm_dp_mst_topology_mgr
*mgr
= port
->mgr
;
900 port
->vcpi
.num_slots
= 0;
902 kfree(port
->cached_edid
);
905 * The only time we don't have a connector
906 * on an output port is if the connector init
909 if (port
->connector
) {
910 /* we can't destroy the connector here, as
911 * we might be holding the mode_config.mutex
912 * from an EDID retrieval */
914 mutex_lock(&mgr
->destroy_connector_lock
);
915 kref_get(&port
->parent
->kref
);
916 list_add(&port
->next
, &mgr
->destroy_connector_list
);
917 mutex_unlock(&mgr
->destroy_connector_lock
);
918 schedule_work(&mgr
->destroy_connector_work
);
921 /* no need to clean up vcpi
922 * as if we have no connector we never setup a vcpi */
923 drm_dp_port_teardown_pdt(port
, port
->pdt
);
924 port
->pdt
= DP_PEER_DEVICE_NONE
;
929 static void drm_dp_put_port(struct drm_dp_mst_port
*port
)
931 kref_put(&port
->kref
, drm_dp_destroy_port
);
934 static struct drm_dp_mst_branch
*drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch
*mstb
, struct drm_dp_mst_branch
*to_find
)
936 struct drm_dp_mst_port
*port
;
937 struct drm_dp_mst_branch
*rmstb
;
938 if (to_find
== mstb
) {
939 kref_get(&mstb
->kref
);
942 list_for_each_entry(port
, &mstb
->ports
, next
) {
944 rmstb
= drm_dp_mst_get_validated_mstb_ref_locked(port
->mstb
, to_find
);
952 static struct drm_dp_mst_branch
*drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_branch
*mstb
)
954 struct drm_dp_mst_branch
*rmstb
= NULL
;
955 mutex_lock(&mgr
->lock
);
956 if (mgr
->mst_primary
)
957 rmstb
= drm_dp_mst_get_validated_mstb_ref_locked(mgr
->mst_primary
, mstb
);
958 mutex_unlock(&mgr
->lock
);
962 static struct drm_dp_mst_port
*drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch
*mstb
, struct drm_dp_mst_port
*to_find
)
964 struct drm_dp_mst_port
*port
, *mport
;
966 list_for_each_entry(port
, &mstb
->ports
, next
) {
967 if (port
== to_find
) {
968 kref_get(&port
->kref
);
972 mport
= drm_dp_mst_get_port_ref_locked(port
->mstb
, to_find
);
980 static struct drm_dp_mst_port
*drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
982 struct drm_dp_mst_port
*rport
= NULL
;
983 mutex_lock(&mgr
->lock
);
984 if (mgr
->mst_primary
)
985 rport
= drm_dp_mst_get_port_ref_locked(mgr
->mst_primary
, port
);
986 mutex_unlock(&mgr
->lock
);
990 static struct drm_dp_mst_port
*drm_dp_get_port(struct drm_dp_mst_branch
*mstb
, u8 port_num
)
992 struct drm_dp_mst_port
*port
;
994 list_for_each_entry(port
, &mstb
->ports
, next
) {
995 if (port
->port_num
== port_num
) {
996 kref_get(&port
->kref
);
1005 * calculate a new RAD for this MST branch device
1006 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1007 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1009 static u8
drm_dp_calculate_rad(struct drm_dp_mst_port
*port
,
1012 int parent_lct
= port
->parent
->lct
;
1014 int idx
= (parent_lct
- 1) / 2;
1015 if (parent_lct
> 1) {
1016 memcpy(rad
, port
->parent
->rad
, idx
+ 1);
1017 shift
= (parent_lct
% 2) ? 4 : 0;
1021 rad
[idx
] |= port
->port_num
<< shift
;
1022 return parent_lct
+ 1;
1026 * return sends link address for new mstb
1028 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port
*port
)
1032 bool send_link
= false;
1033 switch (port
->pdt
) {
1034 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
1035 case DP_PEER_DEVICE_SST_SINK
:
1036 /* add i2c over sideband */
1037 ret
= drm_dp_mst_register_i2c_bus(&port
->aux
);
1039 case DP_PEER_DEVICE_MST_BRANCHING
:
1040 lct
= drm_dp_calculate_rad(port
, rad
);
1042 port
->mstb
= drm_dp_add_mst_branch_device(lct
, rad
);
1043 port
->mstb
->mgr
= port
->mgr
;
1044 port
->mstb
->port_parent
= port
;
1052 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch
*mstb
, u8
*guid
)
1056 memcpy(mstb
->guid
, guid
, 16);
1058 if (!drm_dp_validate_guid(mstb
->mgr
, mstb
->guid
)) {
1059 if (mstb
->port_parent
) {
1060 ret
= drm_dp_send_dpcd_write(
1068 ret
= drm_dp_dpcd_write(
1077 static void build_mst_prop_path(const struct drm_dp_mst_branch
*mstb
,
1080 size_t proppath_size
)
1084 snprintf(proppath
, proppath_size
, "mst:%d", mstb
->mgr
->conn_base_id
);
1085 for (i
= 0; i
< (mstb
->lct
- 1); i
++) {
1086 int shift
= (i
% 2) ? 0 : 4;
1087 int port_num
= (mstb
->rad
[i
/ 2] >> shift
) & 0xf;
1088 snprintf(temp
, sizeof(temp
), "-%d", port_num
);
1089 strlcat(proppath
, temp
, proppath_size
);
1091 snprintf(temp
, sizeof(temp
), "-%d", pnum
);
1092 strlcat(proppath
, temp
, proppath_size
);
1095 static void drm_dp_add_port(struct drm_dp_mst_branch
*mstb
,
1096 struct drm_device
*dev
,
1097 struct drm_dp_link_addr_reply_port
*port_msg
)
1099 struct drm_dp_mst_port
*port
;
1101 bool created
= false;
1104 port
= drm_dp_get_port(mstb
, port_msg
->port_number
);
1106 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1109 kref_init(&port
->kref
);
1110 port
->parent
= mstb
;
1111 port
->port_num
= port_msg
->port_number
;
1112 port
->mgr
= mstb
->mgr
;
1113 port
->aux
.name
= "DPMST";
1114 port
->aux
.dev
= dev
->dev
;
1117 old_pdt
= port
->pdt
;
1118 old_ddps
= port
->ddps
;
1121 port
->pdt
= port_msg
->peer_device_type
;
1122 port
->input
= port_msg
->input_port
;
1123 port
->mcs
= port_msg
->mcs
;
1124 port
->ddps
= port_msg
->ddps
;
1125 port
->ldps
= port_msg
->legacy_device_plug_status
;
1126 port
->dpcd_rev
= port_msg
->dpcd_revision
;
1127 port
->num_sdp_streams
= port_msg
->num_sdp_streams
;
1128 port
->num_sdp_stream_sinks
= port_msg
->num_sdp_stream_sinks
;
1130 /* manage mstb port lists with mgr lock - take a reference
1133 mutex_lock(&mstb
->mgr
->lock
);
1134 kref_get(&port
->kref
);
1135 list_add(&port
->next
, &mstb
->ports
);
1136 mutex_unlock(&mstb
->mgr
->lock
);
1139 if (old_ddps
!= port
->ddps
) {
1142 drm_dp_send_enum_path_resources(mstb
->mgr
, mstb
, port
);
1144 port
->available_pbn
= 0;
1148 if (old_pdt
!= port
->pdt
&& !port
->input
) {
1149 drm_dp_port_teardown_pdt(port
, old_pdt
);
1151 ret
= drm_dp_port_setup_pdt(port
);
1153 drm_dp_send_link_address(mstb
->mgr
, port
->mstb
);
1156 if (created
&& !port
->input
) {
1159 build_mst_prop_path(mstb
, port
->port_num
, proppath
, sizeof(proppath
));
1160 port
->connector
= (*mstb
->mgr
->cbs
->add_connector
)(mstb
->mgr
, port
, proppath
);
1161 if (!port
->connector
) {
1162 /* remove it from the port list */
1163 mutex_lock(&mstb
->mgr
->lock
);
1164 list_del(&port
->next
);
1165 mutex_unlock(&mstb
->mgr
->lock
);
1166 /* drop port list reference */
1167 drm_dp_put_port(port
);
1170 if ((port
->pdt
== DP_PEER_DEVICE_DP_LEGACY_CONV
||
1171 port
->pdt
== DP_PEER_DEVICE_SST_SINK
) &&
1172 port
->port_num
>= DP_MST_LOGICAL_PORT_0
) {
1173 port
->cached_edid
= drm_get_edid(port
->connector
, &port
->aux
.ddc
);
1174 drm_mode_connector_set_tile_property(port
->connector
);
1176 (*mstb
->mgr
->cbs
->register_connector
)(port
->connector
);
1180 /* put reference to this port */
1181 drm_dp_put_port(port
);
1184 static void drm_dp_update_port(struct drm_dp_mst_branch
*mstb
,
1185 struct drm_dp_connection_status_notify
*conn_stat
)
1187 struct drm_dp_mst_port
*port
;
1190 bool dowork
= false;
1191 port
= drm_dp_get_port(mstb
, conn_stat
->port_number
);
1195 old_ddps
= port
->ddps
;
1196 old_pdt
= port
->pdt
;
1197 port
->pdt
= conn_stat
->peer_device_type
;
1198 port
->mcs
= conn_stat
->message_capability_status
;
1199 port
->ldps
= conn_stat
->legacy_device_plug_status
;
1200 port
->ddps
= conn_stat
->displayport_device_plug_status
;
1202 if (old_ddps
!= port
->ddps
) {
1206 port
->available_pbn
= 0;
1209 if (old_pdt
!= port
->pdt
&& !port
->input
) {
1210 drm_dp_port_teardown_pdt(port
, old_pdt
);
1212 if (drm_dp_port_setup_pdt(port
))
1216 drm_dp_put_port(port
);
1218 queue_work(system_long_wq
, &mstb
->mgr
->work
);
1222 static struct drm_dp_mst_branch
*drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr
*mgr
,
1225 struct drm_dp_mst_branch
*mstb
;
1226 struct drm_dp_mst_port
*port
;
1228 /* find the port by iterating down */
1230 mutex_lock(&mgr
->lock
);
1231 mstb
= mgr
->mst_primary
;
1233 for (i
= 0; i
< lct
- 1; i
++) {
1234 int shift
= (i
% 2) ? 0 : 4;
1235 int port_num
= (rad
[i
/ 2] >> shift
) & 0xf;
1237 list_for_each_entry(port
, &mstb
->ports
, next
) {
1238 if (port
->port_num
== port_num
) {
1241 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct
, rad
[0]);
1249 kref_get(&mstb
->kref
);
1251 mutex_unlock(&mgr
->lock
);
1255 static struct drm_dp_mst_branch
*get_mst_branch_device_by_guid_helper(
1256 struct drm_dp_mst_branch
*mstb
,
1259 struct drm_dp_mst_branch
*found_mstb
;
1260 struct drm_dp_mst_port
*port
;
1262 if (memcmp(mstb
->guid
, guid
, 16) == 0)
1266 list_for_each_entry(port
, &mstb
->ports
, next
) {
1270 found_mstb
= get_mst_branch_device_by_guid_helper(port
->mstb
, guid
);
1279 static struct drm_dp_mst_branch
*drm_dp_get_mst_branch_device_by_guid(
1280 struct drm_dp_mst_topology_mgr
*mgr
,
1283 struct drm_dp_mst_branch
*mstb
;
1285 /* find the port by iterating down */
1286 mutex_lock(&mgr
->lock
);
1288 mstb
= get_mst_branch_device_by_guid_helper(mgr
->mst_primary
, guid
);
1291 kref_get(&mstb
->kref
);
1293 mutex_unlock(&mgr
->lock
);
1297 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
1298 struct drm_dp_mst_branch
*mstb
)
1300 struct drm_dp_mst_port
*port
;
1301 struct drm_dp_mst_branch
*mstb_child
;
1302 if (!mstb
->link_address_sent
)
1303 drm_dp_send_link_address(mgr
, mstb
);
1305 list_for_each_entry(port
, &mstb
->ports
, next
) {
1312 if (!port
->available_pbn
)
1313 drm_dp_send_enum_path_resources(mgr
, mstb
, port
);
1316 mstb_child
= drm_dp_get_validated_mstb_ref(mgr
, port
->mstb
);
1318 drm_dp_check_and_send_link_address(mgr
, mstb_child
);
1319 drm_dp_put_mst_branch_device(mstb_child
);
1325 static void drm_dp_mst_link_probe_work(struct work_struct
*work
)
1327 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, work
);
1328 struct drm_dp_mst_branch
*mstb
;
1330 mutex_lock(&mgr
->lock
);
1331 mstb
= mgr
->mst_primary
;
1333 kref_get(&mstb
->kref
);
1335 mutex_unlock(&mgr
->lock
);
1337 drm_dp_check_and_send_link_address(mgr
, mstb
);
1338 drm_dp_put_mst_branch_device(mstb
);
1342 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr
*mgr
,
1345 static u8 zero_guid
[16];
1347 if (!memcmp(guid
, zero_guid
, 16)) {
1348 u64 salt
= get_jiffies_64();
1349 memcpy(&guid
[0], &salt
, sizeof(u64
));
1350 memcpy(&guid
[8], &salt
, sizeof(u64
));
1357 static int build_dpcd_read(struct drm_dp_sideband_msg_tx
*msg
, u8 port_num
, u32 offset
, u8 num_bytes
)
1359 struct drm_dp_sideband_msg_req_body req
;
1361 req
.req_type
= DP_REMOTE_DPCD_READ
;
1362 req
.u
.dpcd_read
.port_number
= port_num
;
1363 req
.u
.dpcd_read
.dpcd_address
= offset
;
1364 req
.u
.dpcd_read
.num_bytes
= num_bytes
;
1365 drm_dp_encode_sideband_req(&req
, msg
);
1371 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr
*mgr
,
1372 bool up
, u8
*msg
, int len
)
1375 int regbase
= up
? DP_SIDEBAND_MSG_UP_REP_BASE
: DP_SIDEBAND_MSG_DOWN_REQ_BASE
;
1376 int tosend
, total
, offset
;
1383 tosend
= min3(mgr
->max_dpcd_transaction_bytes
, 16, total
);
1385 ret
= drm_dp_dpcd_write(mgr
->aux
, regbase
+ offset
,
1388 if (ret
!= tosend
) {
1389 if (ret
== -EIO
&& retries
< 5) {
1393 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend
, ret
);
1399 } while (total
> 0);
1403 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr
*hdr
,
1404 struct drm_dp_sideband_msg_tx
*txmsg
)
1406 struct drm_dp_mst_branch
*mstb
= txmsg
->dst
;
1409 /* both msg slots are full */
1410 if (txmsg
->seqno
== -1) {
1411 if (mstb
->tx_slots
[0] && mstb
->tx_slots
[1]) {
1412 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__
);
1415 if (mstb
->tx_slots
[0] == NULL
&& mstb
->tx_slots
[1] == NULL
) {
1416 txmsg
->seqno
= mstb
->last_seqno
;
1417 mstb
->last_seqno
^= 1;
1418 } else if (mstb
->tx_slots
[0] == NULL
)
1422 mstb
->tx_slots
[txmsg
->seqno
] = txmsg
;
1425 req_type
= txmsg
->msg
[0] & 0x7f;
1426 if (req_type
== DP_CONNECTION_STATUS_NOTIFY
||
1427 req_type
== DP_RESOURCE_STATUS_NOTIFY
)
1431 hdr
->path_msg
= txmsg
->path_msg
;
1432 hdr
->lct
= mstb
->lct
;
1433 hdr
->lcr
= mstb
->lct
- 1;
1435 memcpy(hdr
->rad
, mstb
->rad
, mstb
->lct
/ 2);
1436 hdr
->seqno
= txmsg
->seqno
;
1440 * process a single block of the next message in the sideband queue
1442 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
,
1443 struct drm_dp_sideband_msg_tx
*txmsg
,
1447 struct drm_dp_sideband_msg_hdr hdr
;
1448 int len
, space
, idx
, tosend
;
1451 memset(&hdr
, 0, sizeof(struct drm_dp_sideband_msg_hdr
));
1453 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_QUEUED
) {
1455 txmsg
->state
= DRM_DP_SIDEBAND_TX_START_SEND
;
1458 /* make hdr from dst mst - for replies use seqno
1459 otherwise assign one */
1460 ret
= set_hdr_from_dst_qlock(&hdr
, txmsg
);
1464 /* amount left to send in this message */
1465 len
= txmsg
->cur_len
- txmsg
->cur_offset
;
1467 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1468 space
= 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr
);
1470 tosend
= min(len
, space
);
1471 if (len
== txmsg
->cur_len
)
1477 hdr
.msg_len
= tosend
+ 1;
1478 drm_dp_encode_sideband_msg_hdr(&hdr
, chunk
, &idx
);
1479 memcpy(&chunk
[idx
], &txmsg
->msg
[txmsg
->cur_offset
], tosend
);
1480 /* add crc at end */
1481 drm_dp_crc_sideband_chunk_req(&chunk
[idx
], tosend
);
1484 ret
= drm_dp_send_sideband_msg(mgr
, up
, chunk
, idx
);
1486 DRM_DEBUG_KMS("sideband msg failed to send\n");
1490 txmsg
->cur_offset
+= tosend
;
1491 if (txmsg
->cur_offset
== txmsg
->cur_len
) {
1492 txmsg
->state
= DRM_DP_SIDEBAND_TX_SENT
;
1498 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
)
1500 struct drm_dp_sideband_msg_tx
*txmsg
;
1503 WARN_ON(!mutex_is_locked(&mgr
->qlock
));
1505 /* construct a chunk from the first msg in the tx_msg queue */
1506 if (list_empty(&mgr
->tx_msg_downq
))
1509 txmsg
= list_first_entry(&mgr
->tx_msg_downq
, struct drm_dp_sideband_msg_tx
, next
);
1510 ret
= process_single_tx_qlock(mgr
, txmsg
, false);
1512 /* txmsg is sent it should be in the slots now */
1513 list_del(&txmsg
->next
);
1515 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret
);
1516 list_del(&txmsg
->next
);
1517 if (txmsg
->seqno
!= -1)
1518 txmsg
->dst
->tx_slots
[txmsg
->seqno
] = NULL
;
1519 txmsg
->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
1520 wake_up_all(&mgr
->tx_waitq
);
1524 /* called holding qlock */
1525 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
,
1526 struct drm_dp_sideband_msg_tx
*txmsg
)
1530 /* construct a chunk from the first msg in the tx_msg queue */
1531 ret
= process_single_tx_qlock(mgr
, txmsg
, true);
1534 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret
);
1536 txmsg
->dst
->tx_slots
[txmsg
->seqno
] = NULL
;
1539 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr
*mgr
,
1540 struct drm_dp_sideband_msg_tx
*txmsg
)
1542 mutex_lock(&mgr
->qlock
);
1543 list_add_tail(&txmsg
->next
, &mgr
->tx_msg_downq
);
1544 if (list_is_singular(&mgr
->tx_msg_downq
))
1545 process_single_down_tx_qlock(mgr
);
1546 mutex_unlock(&mgr
->qlock
);
1549 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
1550 struct drm_dp_mst_branch
*mstb
)
1553 struct drm_dp_sideband_msg_tx
*txmsg
;
1556 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1561 len
= build_link_address(txmsg
);
1563 mstb
->link_address_sent
= true;
1564 drm_dp_queue_down_tx(mgr
, txmsg
);
1566 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1570 if (txmsg
->reply
.reply_type
== 1)
1571 DRM_DEBUG_KMS("link address nak received\n");
1573 DRM_DEBUG_KMS("link address reply: %d\n", txmsg
->reply
.u
.link_addr
.nports
);
1574 for (i
= 0; i
< txmsg
->reply
.u
.link_addr
.nports
; i
++) {
1575 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i
,
1576 txmsg
->reply
.u
.link_addr
.ports
[i
].input_port
,
1577 txmsg
->reply
.u
.link_addr
.ports
[i
].peer_device_type
,
1578 txmsg
->reply
.u
.link_addr
.ports
[i
].port_number
,
1579 txmsg
->reply
.u
.link_addr
.ports
[i
].dpcd_revision
,
1580 txmsg
->reply
.u
.link_addr
.ports
[i
].mcs
,
1581 txmsg
->reply
.u
.link_addr
.ports
[i
].ddps
,
1582 txmsg
->reply
.u
.link_addr
.ports
[i
].legacy_device_plug_status
,
1583 txmsg
->reply
.u
.link_addr
.ports
[i
].num_sdp_streams
,
1584 txmsg
->reply
.u
.link_addr
.ports
[i
].num_sdp_stream_sinks
);
1587 drm_dp_check_mstb_guid(mstb
, txmsg
->reply
.u
.link_addr
.guid
);
1589 for (i
= 0; i
< txmsg
->reply
.u
.link_addr
.nports
; i
++) {
1590 drm_dp_add_port(mstb
, mgr
->dev
, &txmsg
->reply
.u
.link_addr
.ports
[i
]);
1592 (*mgr
->cbs
->hotplug
)(mgr
);
1595 mstb
->link_address_sent
= false;
1596 DRM_DEBUG_KMS("link address failed %d\n", ret
);
1602 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr
*mgr
,
1603 struct drm_dp_mst_branch
*mstb
,
1604 struct drm_dp_mst_port
*port
)
1607 struct drm_dp_sideband_msg_tx
*txmsg
;
1610 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1615 len
= build_enum_path_resources(txmsg
, port
->port_num
);
1617 drm_dp_queue_down_tx(mgr
, txmsg
);
1619 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1621 if (txmsg
->reply
.reply_type
== 1)
1622 DRM_DEBUG_KMS("enum path resources nak received\n");
1624 if (port
->port_num
!= txmsg
->reply
.u
.path_resources
.port_number
)
1625 DRM_ERROR("got incorrect port in response\n");
1626 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg
->reply
.u
.path_resources
.port_number
, txmsg
->reply
.u
.path_resources
.full_payload_bw_number
,
1627 txmsg
->reply
.u
.path_resources
.avail_payload_bw_number
);
1628 port
->available_pbn
= txmsg
->reply
.u
.path_resources
.avail_payload_bw_number
;
1636 static struct drm_dp_mst_port
*drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch
*mstb
)
1638 if (!mstb
->port_parent
)
1641 if (mstb
->port_parent
->mstb
!= mstb
)
1642 return mstb
->port_parent
;
1644 return drm_dp_get_last_connected_port_to_mstb(mstb
->port_parent
->parent
);
1647 static struct drm_dp_mst_branch
*drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr
*mgr
,
1648 struct drm_dp_mst_branch
*mstb
,
1651 struct drm_dp_mst_branch
*rmstb
= NULL
;
1652 struct drm_dp_mst_port
*found_port
;
1653 mutex_lock(&mgr
->lock
);
1654 if (mgr
->mst_primary
) {
1655 found_port
= drm_dp_get_last_connected_port_to_mstb(mstb
);
1658 rmstb
= found_port
->parent
;
1659 kref_get(&rmstb
->kref
);
1660 *port_num
= found_port
->port_num
;
1663 mutex_unlock(&mgr
->lock
);
1667 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr
*mgr
,
1668 struct drm_dp_mst_port
*port
,
1672 struct drm_dp_sideband_msg_tx
*txmsg
;
1673 struct drm_dp_mst_branch
*mstb
;
1674 int len
, ret
, port_num
;
1675 u8 sinks
[DRM_DP_MAX_SDP_STREAMS
];
1678 port
= drm_dp_get_validated_port_ref(mgr
, port
);
1682 port_num
= port
->port_num
;
1683 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
1685 mstb
= drm_dp_get_last_connected_port_and_mstb(mgr
, port
->parent
, &port_num
);
1688 drm_dp_put_port(port
);
1693 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1699 for (i
= 0; i
< port
->num_sdp_streams
; i
++)
1703 len
= build_allocate_payload(txmsg
, port_num
,
1705 pbn
, port
->num_sdp_streams
, sinks
);
1707 drm_dp_queue_down_tx(mgr
, txmsg
);
1709 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1711 if (txmsg
->reply
.reply_type
== 1) {
1718 drm_dp_put_mst_branch_device(mstb
);
1719 drm_dp_put_port(port
);
1723 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr
*mgr
,
1725 struct drm_dp_payload
*payload
)
1729 ret
= drm_dp_dpcd_write_payload(mgr
, id
, payload
);
1731 payload
->payload_state
= 0;
1734 payload
->payload_state
= DP_PAYLOAD_LOCAL
;
1738 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr
*mgr
,
1739 struct drm_dp_mst_port
*port
,
1741 struct drm_dp_payload
*payload
)
1744 ret
= drm_dp_payload_send_msg(mgr
, port
, id
, port
->vcpi
.pbn
);
1747 payload
->payload_state
= DP_PAYLOAD_REMOTE
;
1751 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr
*mgr
,
1752 struct drm_dp_mst_port
*port
,
1754 struct drm_dp_payload
*payload
)
1756 DRM_DEBUG_KMS("\n");
1757 /* its okay for these to fail */
1759 drm_dp_payload_send_msg(mgr
, port
, id
, 0);
1762 drm_dp_dpcd_write_payload(mgr
, id
, payload
);
1763 payload
->payload_state
= DP_PAYLOAD_DELETE_LOCAL
;
1767 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr
*mgr
,
1769 struct drm_dp_payload
*payload
)
1771 payload
->payload_state
= 0;
1776 * drm_dp_update_payload_part1() - Execute payload update part 1
1777 * @mgr: manager to use.
1779 * This iterates over all proposed virtual channels, and tries to
1780 * allocate space in the link for them. For 0->slots transitions,
1781 * this step just writes the VCPI to the MST device. For slots->0
1782 * transitions, this writes the updated VCPIs and removes the
1783 * remote VC payloads.
1785 * after calling this the driver should generate ACT and payload
1788 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr
*mgr
)
1792 struct drm_dp_payload req_payload
;
1793 struct drm_dp_mst_port
*port
;
1795 mutex_lock(&mgr
->payload_lock
);
1796 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1797 /* solve the current payloads - compare to the hw ones
1798 - update the hw view */
1799 req_payload
.start_slot
= cur_slots
;
1800 if (mgr
->proposed_vcpis
[i
]) {
1801 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
1802 port
= drm_dp_get_validated_port_ref(mgr
, port
);
1804 mutex_unlock(&mgr
->payload_lock
);
1807 req_payload
.num_slots
= mgr
->proposed_vcpis
[i
]->num_slots
;
1808 req_payload
.vcpi
= mgr
->proposed_vcpis
[i
]->vcpi
;
1811 req_payload
.num_slots
= 0;
1814 if (mgr
->payloads
[i
].start_slot
!= req_payload
.start_slot
) {
1815 mgr
->payloads
[i
].start_slot
= req_payload
.start_slot
;
1817 /* work out what is required to happen with this payload */
1818 if (mgr
->payloads
[i
].num_slots
!= req_payload
.num_slots
) {
1820 /* need to push an update for this payload */
1821 if (req_payload
.num_slots
) {
1822 drm_dp_create_payload_step1(mgr
, mgr
->proposed_vcpis
[i
]->vcpi
, &req_payload
);
1823 mgr
->payloads
[i
].num_slots
= req_payload
.num_slots
;
1824 mgr
->payloads
[i
].vcpi
= req_payload
.vcpi
;
1825 } else if (mgr
->payloads
[i
].num_slots
) {
1826 mgr
->payloads
[i
].num_slots
= 0;
1827 drm_dp_destroy_payload_step1(mgr
, port
, mgr
->payloads
[i
].vcpi
, &mgr
->payloads
[i
]);
1828 req_payload
.payload_state
= mgr
->payloads
[i
].payload_state
;
1829 mgr
->payloads
[i
].start_slot
= 0;
1831 mgr
->payloads
[i
].payload_state
= req_payload
.payload_state
;
1833 cur_slots
+= req_payload
.num_slots
;
1836 drm_dp_put_port(port
);
1839 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1840 if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_DELETE_LOCAL
) {
1841 DRM_DEBUG_KMS("removing payload %d\n", i
);
1842 for (j
= i
; j
< mgr
->max_payloads
- 1; j
++) {
1843 memcpy(&mgr
->payloads
[j
], &mgr
->payloads
[j
+ 1], sizeof(struct drm_dp_payload
));
1844 mgr
->proposed_vcpis
[j
] = mgr
->proposed_vcpis
[j
+ 1];
1845 if (mgr
->proposed_vcpis
[j
] && mgr
->proposed_vcpis
[j
]->num_slots
) {
1846 set_bit(j
+ 1, &mgr
->payload_mask
);
1848 clear_bit(j
+ 1, &mgr
->payload_mask
);
1851 memset(&mgr
->payloads
[mgr
->max_payloads
- 1], 0, sizeof(struct drm_dp_payload
));
1852 mgr
->proposed_vcpis
[mgr
->max_payloads
- 1] = NULL
;
1853 clear_bit(mgr
->max_payloads
, &mgr
->payload_mask
);
1857 mutex_unlock(&mgr
->payload_lock
);
1861 EXPORT_SYMBOL(drm_dp_update_payload_part1
);
1864 * drm_dp_update_payload_part2() - Execute payload update part 2
1865 * @mgr: manager to use.
1867 * This iterates over all proposed virtual channels, and tries to
1868 * allocate space in the link for them. For 0->slots transitions,
1869 * this step writes the remote VC payload commands. For slots->0
1870 * this just resets some internal state.
1872 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr
*mgr
)
1874 struct drm_dp_mst_port
*port
;
1877 mutex_lock(&mgr
->payload_lock
);
1878 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1880 if (!mgr
->proposed_vcpis
[i
])
1883 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
1885 DRM_DEBUG_KMS("payload %d %d\n", i
, mgr
->payloads
[i
].payload_state
);
1886 if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_LOCAL
) {
1887 ret
= drm_dp_create_payload_step2(mgr
, port
, mgr
->proposed_vcpis
[i
]->vcpi
, &mgr
->payloads
[i
]);
1888 } else if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_DELETE_LOCAL
) {
1889 ret
= drm_dp_destroy_payload_step2(mgr
, mgr
->proposed_vcpis
[i
]->vcpi
, &mgr
->payloads
[i
]);
1892 mutex_unlock(&mgr
->payload_lock
);
1896 mutex_unlock(&mgr
->payload_lock
);
1899 EXPORT_SYMBOL(drm_dp_update_payload_part2
);
1901 #if 0 /* unused as of yet */
1902 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr
*mgr
,
1903 struct drm_dp_mst_port
*port
,
1904 int offset
, int size
)
1907 struct drm_dp_sideband_msg_tx
*txmsg
;
1909 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1913 len
= build_dpcd_read(txmsg
, port
->port_num
, 0, 8);
1914 txmsg
->dst
= port
->parent
;
1916 drm_dp_queue_down_tx(mgr
, txmsg
);
1922 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr
*mgr
,
1923 struct drm_dp_mst_port
*port
,
1924 int offset
, int size
, u8
*bytes
)
1928 struct drm_dp_sideband_msg_tx
*txmsg
;
1929 struct drm_dp_mst_branch
*mstb
;
1931 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
1935 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1941 len
= build_dpcd_write(txmsg
, port
->port_num
, offset
, size
, bytes
);
1944 drm_dp_queue_down_tx(mgr
, txmsg
);
1946 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1948 if (txmsg
->reply
.reply_type
== 1) {
1955 drm_dp_put_mst_branch_device(mstb
);
1959 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx
*msg
, u8 req_type
)
1961 struct drm_dp_sideband_msg_reply_body reply
;
1963 reply
.reply_type
= 0;
1964 reply
.req_type
= req_type
;
1965 drm_dp_encode_sideband_reply(&reply
, msg
);
1969 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr
*mgr
,
1970 struct drm_dp_mst_branch
*mstb
,
1971 int req_type
, int seqno
, bool broadcast
)
1973 struct drm_dp_sideband_msg_tx
*txmsg
;
1975 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1980 txmsg
->seqno
= seqno
;
1981 drm_dp_encode_up_ack_reply(txmsg
, req_type
);
1983 mutex_lock(&mgr
->qlock
);
1985 process_single_up_tx_qlock(mgr
, txmsg
);
1987 mutex_unlock(&mgr
->qlock
);
1993 static bool drm_dp_get_vc_payload_bw(int dp_link_bw
,
1997 switch (dp_link_bw
) {
1999 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2000 dp_link_bw
, dp_link_count
);
2003 case DP_LINK_BW_1_62
:
2004 *out
= 3 * dp_link_count
;
2006 case DP_LINK_BW_2_7
:
2007 *out
= 5 * dp_link_count
;
2009 case DP_LINK_BW_5_4
:
2010 *out
= 10 * dp_link_count
;
2017 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2018 * @mgr: manager to set state for
2019 * @mst_state: true to enable MST on this connector - false to disable.
2021 * This is called by the driver when it detects an MST capable device plugged
2022 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2024 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr
*mgr
, bool mst_state
)
2027 struct drm_dp_mst_branch
*mstb
= NULL
;
2029 mutex_lock(&mgr
->lock
);
2030 if (mst_state
== mgr
->mst_state
)
2033 mgr
->mst_state
= mst_state
;
2034 /* set the device into MST mode */
2036 WARN_ON(mgr
->mst_primary
);
2039 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, mgr
->dpcd
, DP_RECEIVER_CAP_SIZE
);
2040 if (ret
!= DP_RECEIVER_CAP_SIZE
) {
2041 DRM_DEBUG_KMS("failed to read DPCD\n");
2045 if (!drm_dp_get_vc_payload_bw(mgr
->dpcd
[1],
2046 mgr
->dpcd
[2] & DP_MAX_LANE_COUNT_MASK
,
2052 /* add initial branch device at LCT 1 */
2053 mstb
= drm_dp_add_mst_branch_device(1, NULL
);
2060 /* give this the main reference */
2061 mgr
->mst_primary
= mstb
;
2062 kref_get(&mgr
->mst_primary
->kref
);
2064 ret
= drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
2065 DP_MST_EN
| DP_UP_REQ_EN
| DP_UPSTREAM_IS_SRC
);
2071 struct drm_dp_payload reset_pay
;
2072 reset_pay
.start_slot
= 0;
2073 reset_pay
.num_slots
= 0x3f;
2074 drm_dp_dpcd_write_payload(mgr
, 0, &reset_pay
);
2077 queue_work(system_long_wq
, &mgr
->work
);
2081 /* disable MST on the device */
2082 mstb
= mgr
->mst_primary
;
2083 mgr
->mst_primary
= NULL
;
2084 /* this can fail if the device is gone */
2085 drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
, 0);
2087 memset(mgr
->payloads
, 0, mgr
->max_payloads
* sizeof(struct drm_dp_payload
));
2088 mgr
->payload_mask
= 0;
2089 set_bit(0, &mgr
->payload_mask
);
2094 mutex_unlock(&mgr
->lock
);
2096 drm_dp_put_mst_branch_device(mstb
);
2100 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst
);
2103 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2104 * @mgr: manager to suspend
2106 * This function tells the MST device that we can't handle UP messages
2107 * anymore. This should stop it from sending any since we are suspended.
2109 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr
*mgr
)
2111 mutex_lock(&mgr
->lock
);
2112 drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
2113 DP_MST_EN
| DP_UPSTREAM_IS_SRC
);
2114 mutex_unlock(&mgr
->lock
);
2115 flush_work(&mgr
->work
);
2116 flush_work(&mgr
->destroy_connector_work
);
2118 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend
);
2121 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2122 * @mgr: manager to resume
2124 * This will fetch DPCD and see if the device is still there,
2125 * if it is, it will rewrite the MSTM control bits, and return.
2127 * if the device fails this returns -1, and the driver should do
2128 * a full MST reprobe, in case we were undocked.
2130 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr
*mgr
)
2134 mutex_lock(&mgr
->lock
);
2136 if (mgr
->mst_primary
) {
2140 sret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, mgr
->dpcd
, DP_RECEIVER_CAP_SIZE
);
2141 if (sret
!= DP_RECEIVER_CAP_SIZE
) {
2142 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2147 ret
= drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
2148 DP_MST_EN
| DP_UP_REQ_EN
| DP_UPSTREAM_IS_SRC
);
2150 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2155 /* Some hubs forget their guids after they resume */
2156 sret
= drm_dp_dpcd_read(mgr
->aux
, DP_GUID
, guid
, 16);
2158 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2162 drm_dp_check_mstb_guid(mgr
->mst_primary
, guid
);
2169 mutex_unlock(&mgr
->lock
);
2172 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume
);
2174 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr
*mgr
, bool up
)
2178 int replylen
, origlen
, curreply
;
2180 struct drm_dp_sideband_msg_rx
*msg
;
2181 int basereg
= up
? DP_SIDEBAND_MSG_UP_REQ_BASE
: DP_SIDEBAND_MSG_DOWN_REP_BASE
;
2182 msg
= up
? &mgr
->up_req_recv
: &mgr
->down_rep_recv
;
2184 len
= min(mgr
->max_dpcd_transaction_bytes
, 16);
2185 ret
= drm_dp_dpcd_read(mgr
->aux
, basereg
,
2188 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len
, ret
);
2191 ret
= drm_dp_sideband_msg_build(msg
, replyblock
, len
, true);
2193 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock
[0]);
2196 replylen
= msg
->curchunk_len
+ msg
->curchunk_hdrlen
;
2201 while (replylen
> 0) {
2202 len
= min3(replylen
, mgr
->max_dpcd_transaction_bytes
, 16);
2203 ret
= drm_dp_dpcd_read(mgr
->aux
, basereg
+ curreply
,
2206 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2211 ret
= drm_dp_sideband_msg_build(msg
, replyblock
, len
, false);
2213 DRM_DEBUG_KMS("failed to build sideband msg\n");
2223 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr
*mgr
)
2227 if (!drm_dp_get_one_sb_msg(mgr
, false)) {
2228 memset(&mgr
->down_rep_recv
, 0,
2229 sizeof(struct drm_dp_sideband_msg_rx
));
2233 if (mgr
->down_rep_recv
.have_eomt
) {
2234 struct drm_dp_sideband_msg_tx
*txmsg
;
2235 struct drm_dp_mst_branch
*mstb
;
2237 mstb
= drm_dp_get_mst_branch_device(mgr
,
2238 mgr
->down_rep_recv
.initial_hdr
.lct
,
2239 mgr
->down_rep_recv
.initial_hdr
.rad
);
2242 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->down_rep_recv
.initial_hdr
.lct
);
2243 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2247 /* find the message */
2248 slot
= mgr
->down_rep_recv
.initial_hdr
.seqno
;
2249 mutex_lock(&mgr
->qlock
);
2250 txmsg
= mstb
->tx_slots
[slot
];
2251 /* remove from slots */
2252 mutex_unlock(&mgr
->qlock
);
2255 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2257 mgr
->down_rep_recv
.initial_hdr
.seqno
,
2258 mgr
->down_rep_recv
.initial_hdr
.lct
,
2259 mgr
->down_rep_recv
.initial_hdr
.rad
[0],
2260 mgr
->down_rep_recv
.msg
[0]);
2261 drm_dp_put_mst_branch_device(mstb
);
2262 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2266 drm_dp_sideband_parse_reply(&mgr
->down_rep_recv
, &txmsg
->reply
);
2267 if (txmsg
->reply
.reply_type
== 1) {
2268 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg
->reply
.req_type
, txmsg
->reply
.u
.nak
.reason
, txmsg
->reply
.u
.nak
.nak_data
);
2271 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2272 drm_dp_put_mst_branch_device(mstb
);
2274 mutex_lock(&mgr
->qlock
);
2275 txmsg
->state
= DRM_DP_SIDEBAND_TX_RX
;
2276 mstb
->tx_slots
[slot
] = NULL
;
2277 mutex_unlock(&mgr
->qlock
);
2279 wake_up_all(&mgr
->tx_waitq
);
2284 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr
*mgr
)
2288 if (!drm_dp_get_one_sb_msg(mgr
, true)) {
2289 memset(&mgr
->up_req_recv
, 0,
2290 sizeof(struct drm_dp_sideband_msg_rx
));
2294 if (mgr
->up_req_recv
.have_eomt
) {
2295 struct drm_dp_sideband_msg_req_body msg
;
2296 struct drm_dp_mst_branch
*mstb
= NULL
;
2299 if (!mgr
->up_req_recv
.initial_hdr
.broadcast
) {
2300 mstb
= drm_dp_get_mst_branch_device(mgr
,
2301 mgr
->up_req_recv
.initial_hdr
.lct
,
2302 mgr
->up_req_recv
.initial_hdr
.rad
);
2304 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->up_req_recv
.initial_hdr
.lct
);
2305 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2310 seqno
= mgr
->up_req_recv
.initial_hdr
.seqno
;
2311 drm_dp_sideband_parse_req(&mgr
->up_req_recv
, &msg
);
2313 if (msg
.req_type
== DP_CONNECTION_STATUS_NOTIFY
) {
2314 drm_dp_send_up_ack_reply(mgr
, mgr
->mst_primary
, msg
.req_type
, seqno
, false);
2317 mstb
= drm_dp_get_mst_branch_device_by_guid(mgr
, msg
.u
.conn_stat
.guid
);
2320 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->up_req_recv
.initial_hdr
.lct
);
2321 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2325 drm_dp_update_port(mstb
, &msg
.u
.conn_stat
);
2327 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg
.u
.conn_stat
.port_number
, msg
.u
.conn_stat
.legacy_device_plug_status
, msg
.u
.conn_stat
.displayport_device_plug_status
, msg
.u
.conn_stat
.message_capability_status
, msg
.u
.conn_stat
.input_port
, msg
.u
.conn_stat
.peer_device_type
);
2328 (*mgr
->cbs
->hotplug
)(mgr
);
2330 } else if (msg
.req_type
== DP_RESOURCE_STATUS_NOTIFY
) {
2331 drm_dp_send_up_ack_reply(mgr
, mgr
->mst_primary
, msg
.req_type
, seqno
, false);
2333 mstb
= drm_dp_get_mst_branch_device_by_guid(mgr
, msg
.u
.resource_stat
.guid
);
2336 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->up_req_recv
.initial_hdr
.lct
);
2337 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2341 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg
.u
.resource_stat
.port_number
, msg
.u
.resource_stat
.available_pbn
);
2345 drm_dp_put_mst_branch_device(mstb
);
2347 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2353 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2354 * @mgr: manager to notify irq for.
2355 * @esi: 4 bytes from SINK_COUNT_ESI
2356 * @handled: whether the hpd interrupt was consumed or not
2358 * This should be called from the driver when it detects a short IRQ,
2359 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2360 * topology manager will process the sideband messages received as a result
2363 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr
*mgr
, u8
*esi
, bool *handled
)
2370 if (sc
!= mgr
->sink_count
) {
2371 mgr
->sink_count
= sc
;
2375 if (esi
[1] & DP_DOWN_REP_MSG_RDY
) {
2376 ret
= drm_dp_mst_handle_down_rep(mgr
);
2380 if (esi
[1] & DP_UP_REQ_MSG_RDY
) {
2381 ret
|= drm_dp_mst_handle_up_req(mgr
);
2385 drm_dp_mst_kick_tx(mgr
);
2388 EXPORT_SYMBOL(drm_dp_mst_hpd_irq
);
2391 * drm_dp_mst_detect_port() - get connection status for an MST port
2392 * @connector: DRM connector for this port
2393 * @mgr: manager for this port
2394 * @port: unverified pointer to a port
2396 * This returns the current connection state for a port. It validates the
2397 * port pointer still exists so the caller doesn't require a reference
2399 enum drm_connector_status
drm_dp_mst_detect_port(struct drm_connector
*connector
,
2400 struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2402 enum drm_connector_status status
= connector_status_disconnected
;
2404 /* we need to search for the port in the mgr in case its gone */
2405 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2407 return connector_status_disconnected
;
2412 switch (port
->pdt
) {
2413 case DP_PEER_DEVICE_NONE
:
2414 case DP_PEER_DEVICE_MST_BRANCHING
:
2417 case DP_PEER_DEVICE_SST_SINK
:
2418 status
= connector_status_connected
;
2419 /* for logical ports - cache the EDID */
2420 if (port
->port_num
>= 8 && !port
->cached_edid
) {
2421 port
->cached_edid
= drm_get_edid(connector
, &port
->aux
.ddc
);
2424 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
2426 status
= connector_status_connected
;
2430 drm_dp_put_port(port
);
2433 EXPORT_SYMBOL(drm_dp_mst_detect_port
);
2436 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
2437 * @mgr: manager for this port
2438 * @port: unverified pointer to a port.
2440 * This returns whether the port supports audio or not.
2442 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr
*mgr
,
2443 struct drm_dp_mst_port
*port
)
2447 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2450 ret
= port
->has_audio
;
2451 drm_dp_put_port(port
);
2454 EXPORT_SYMBOL(drm_dp_mst_port_has_audio
);
2457 * drm_dp_mst_get_edid() - get EDID for an MST port
2458 * @connector: toplevel connector to get EDID for
2459 * @mgr: manager for this port
2460 * @port: unverified pointer to a port.
2462 * This returns an EDID for the port connected to a connector,
2463 * It validates the pointer still exists so the caller doesn't require a
2466 struct edid
*drm_dp_mst_get_edid(struct drm_connector
*connector
, struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2468 struct edid
*edid
= NULL
;
2470 /* we need to search for the port in the mgr in case its gone */
2471 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2475 if (port
->cached_edid
)
2476 edid
= drm_edid_duplicate(port
->cached_edid
);
2478 edid
= drm_get_edid(connector
, &port
->aux
.ddc
);
2479 drm_mode_connector_set_tile_property(connector
);
2481 port
->has_audio
= drm_detect_monitor_audio(edid
);
2482 drm_dp_put_port(port
);
2485 EXPORT_SYMBOL(drm_dp_mst_get_edid
);
2488 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2489 * @mgr: manager to use
2490 * @pbn: payload bandwidth to convert into slots.
2492 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
,
2497 num_slots
= DIV_ROUND_UP(pbn
, mgr
->pbn_div
);
2499 /* max. time slots - one slot for MTP header */
2504 EXPORT_SYMBOL(drm_dp_find_vcpi_slots
);
2506 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr
*mgr
,
2507 struct drm_dp_vcpi
*vcpi
, int pbn
, int slots
)
2511 /* max. time slots - one slot for MTP header */
2516 vcpi
->aligned_pbn
= slots
* mgr
->pbn_div
;
2517 vcpi
->num_slots
= slots
;
2519 ret
= drm_dp_mst_assign_payload_id(mgr
, vcpi
);
2526 * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state
2527 * @state: global atomic state
2528 * @mgr: MST topology manager for the port
2529 * @port: port to find vcpi slots for
2530 * @pbn: bandwidth required for the mode in PBN
2533 * Total slots in the atomic state assigned for this port or error
2535 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state
*state
,
2536 struct drm_dp_mst_topology_mgr
*mgr
,
2537 struct drm_dp_mst_port
*port
, int pbn
)
2539 struct drm_dp_mst_topology_state
*topology_state
;
2542 topology_state
= drm_atomic_get_mst_topology_state(state
, mgr
);
2543 if (topology_state
== NULL
)
2546 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2549 req_slots
= DIV_ROUND_UP(pbn
, mgr
->pbn_div
);
2550 DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n",
2551 req_slots
, topology_state
->avail_slots
);
2553 if (req_slots
> topology_state
->avail_slots
) {
2554 drm_dp_put_port(port
);
2558 topology_state
->avail_slots
-= req_slots
;
2559 DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state
->avail_slots
);
2561 drm_dp_put_port(port
);
2564 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots
);
2567 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
2568 * @state: global atomic state
2569 * @mgr: MST topology manager for the port
2570 * @slots: number of vcpi slots to release
2573 * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or
2574 * negative error code
2576 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state
*state
,
2577 struct drm_dp_mst_topology_mgr
*mgr
,
2580 struct drm_dp_mst_topology_state
*topology_state
;
2582 topology_state
= drm_atomic_get_mst_topology_state(state
, mgr
);
2583 if (topology_state
== NULL
)
2586 /* We cannot rely on port->vcpi.num_slots to update
2587 * topology_state->avail_slots as the port may not exist if the parent
2588 * branch device was unplugged. This should be fixed by tracking
2589 * per-port slot allocation in drm_dp_mst_topology_state instead of
2590 * depending on the caller to tell us how many slots to release.
2592 topology_state
->avail_slots
+= slots
;
2593 DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n",
2594 slots
, topology_state
->avail_slots
);
2598 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots
);
2601 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2602 * @mgr: manager for this port
2603 * @port: port to allocate a virtual channel for.
2604 * @pbn: payload bandwidth number to request
2605 * @slots: returned number of slots for this PBN.
2607 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
,
2608 struct drm_dp_mst_port
*port
, int pbn
, int slots
)
2612 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2619 if (port
->vcpi
.vcpi
> 0) {
2620 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port
->vcpi
.vcpi
, port
->vcpi
.pbn
, pbn
);
2621 if (pbn
== port
->vcpi
.pbn
) {
2622 drm_dp_put_port(port
);
2627 ret
= drm_dp_init_vcpi(mgr
, &port
->vcpi
, pbn
, slots
);
2629 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
2630 DIV_ROUND_UP(pbn
, mgr
->pbn_div
), ret
);
2633 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
2634 pbn
, port
->vcpi
.num_slots
);
2636 drm_dp_put_port(port
);
2641 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi
);
2643 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2646 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2650 slots
= port
->vcpi
.num_slots
;
2651 drm_dp_put_port(port
);
2654 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots
);
2657 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2658 * @mgr: manager for this port
2659 * @port: unverified pointer to a port.
2661 * This just resets the number of slots for the ports VCPI for later programming.
2663 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2665 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2668 port
->vcpi
.num_slots
= 0;
2669 drm_dp_put_port(port
);
2671 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots
);
2674 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2675 * @mgr: manager for this port
2676 * @port: unverified port to deallocate vcpi for
2678 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2680 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2684 drm_dp_mst_put_payload_id(mgr
, port
->vcpi
.vcpi
);
2685 port
->vcpi
.num_slots
= 0;
2687 port
->vcpi
.aligned_pbn
= 0;
2688 port
->vcpi
.vcpi
= 0;
2689 drm_dp_put_port(port
);
2691 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi
);
2693 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr
*mgr
,
2694 int id
, struct drm_dp_payload
*payload
)
2696 u8 payload_alloc
[3], status
;
2700 drm_dp_dpcd_writeb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
,
2701 DP_PAYLOAD_TABLE_UPDATED
);
2703 payload_alloc
[0] = id
;
2704 payload_alloc
[1] = payload
->start_slot
;
2705 payload_alloc
[2] = payload
->num_slots
;
2707 ret
= drm_dp_dpcd_write(mgr
->aux
, DP_PAYLOAD_ALLOCATE_SET
, payload_alloc
, 3);
2709 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret
);
2714 ret
= drm_dp_dpcd_readb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
, &status
);
2716 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret
);
2720 if (!(status
& DP_PAYLOAD_TABLE_UPDATED
)) {
2723 usleep_range(10000, 20000);
2726 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status
);
2737 * drm_dp_check_act_status() - Check ACT handled status.
2738 * @mgr: manager to use
2740 * Check the payload status bits in the DPCD for ACT handled completion.
2742 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr
*mgr
)
2749 ret
= drm_dp_dpcd_readb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
, &status
);
2752 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret
);
2756 if (status
& DP_PAYLOAD_ACT_HANDLED
)
2761 } while (count
< 30);
2763 if (!(status
& DP_PAYLOAD_ACT_HANDLED
)) {
2764 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status
, count
);
2772 EXPORT_SYMBOL(drm_dp_check_act_status
);
2775 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2776 * @clock: dot clock for the mode
2777 * @bpp: bpp for the mode.
2779 * This uses the formula in the spec to calculate the PBN value for a mode.
2781 int drm_dp_calc_pbn_mode(int clock
, int bpp
)
2791 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2792 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2793 * common multiplier to render an integer PBN for all link rate/lane
2794 * counts combinations
2796 * peak_kbps *= (1006/1000)
2797 * peak_kbps *= (64/54)
2798 * peak_kbps *= 8 convert to bytes
2801 numerator
= 64 * 1006;
2802 denominator
= 54 * 8 * 1000 * 1000;
2805 peak_kbps
= drm_fixp_from_fraction(kbps
, denominator
);
2807 return drm_fixp2int_ceil(peak_kbps
);
2809 EXPORT_SYMBOL(drm_dp_calc_pbn_mode
);
2811 static int test_calc_pbn_mode(void)
2814 ret
= drm_dp_calc_pbn_mode(154000, 30);
2816 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2817 154000, 30, 689, ret
);
2820 ret
= drm_dp_calc_pbn_mode(234000, 30);
2822 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2823 234000, 30, 1047, ret
);
2826 ret
= drm_dp_calc_pbn_mode(297000, 24);
2828 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2829 297000, 24, 1063, ret
);
2835 /* we want to kick the TX after we've ack the up/down IRQs. */
2836 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr
*mgr
)
2838 queue_work(system_long_wq
, &mgr
->tx_work
);
2841 static void drm_dp_mst_dump_mstb(struct seq_file
*m
,
2842 struct drm_dp_mst_branch
*mstb
)
2844 struct drm_dp_mst_port
*port
;
2845 int tabs
= mstb
->lct
;
2849 for (i
= 0; i
< tabs
; i
++)
2853 seq_printf(m
, "%smst: %p, %d\n", prefix
, mstb
, mstb
->num_ports
);
2854 list_for_each_entry(port
, &mstb
->ports
, next
) {
2855 seq_printf(m
, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix
, port
->port_num
, port
->input
, port
->pdt
, port
->ddps
, port
->ldps
, port
->num_sdp_streams
, port
->num_sdp_stream_sinks
, port
, port
->connector
);
2857 drm_dp_mst_dump_mstb(m
, port
->mstb
);
2861 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr
*mgr
,
2866 for (i
= 0; i
< 64; i
+= 16) {
2867 if (drm_dp_dpcd_read(mgr
->aux
,
2868 DP_PAYLOAD_TABLE_UPDATE_STATUS
+ i
,
2875 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr
*mgr
,
2876 struct drm_dp_mst_port
*port
, char *name
,
2879 struct edid
*mst_edid
;
2881 mst_edid
= drm_dp_mst_get_edid(port
->connector
, mgr
, port
);
2882 drm_edid_get_monitor_name(mst_edid
, name
, namelen
);
2886 * drm_dp_mst_dump_topology(): dump topology to seq file.
2887 * @m: seq_file to dump output to
2888 * @mgr: manager to dump current topology for.
2890 * helper to dump MST topology to a seq file for debugfs.
2892 void drm_dp_mst_dump_topology(struct seq_file
*m
,
2893 struct drm_dp_mst_topology_mgr
*mgr
)
2896 struct drm_dp_mst_port
*port
;
2898 mutex_lock(&mgr
->lock
);
2899 if (mgr
->mst_primary
)
2900 drm_dp_mst_dump_mstb(m
, mgr
->mst_primary
);
2903 mutex_unlock(&mgr
->lock
);
2905 mutex_lock(&mgr
->payload_lock
);
2906 seq_printf(m
, "vcpi: %lx %lx %d\n", mgr
->payload_mask
, mgr
->vcpi_mask
,
2909 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
2910 if (mgr
->proposed_vcpis
[i
]) {
2913 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
2914 fetch_monitor_name(mgr
, port
, name
, sizeof(name
));
2915 seq_printf(m
, "vcpi %d: %d %d %d sink name: %s\n", i
,
2916 port
->port_num
, port
->vcpi
.vcpi
,
2917 port
->vcpi
.num_slots
,
2918 (*name
!= 0) ? name
: "Unknown");
2920 seq_printf(m
, "vcpi %d:unused\n", i
);
2922 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
2923 seq_printf(m
, "payload %d: %d, %d, %d\n",
2925 mgr
->payloads
[i
].payload_state
,
2926 mgr
->payloads
[i
].start_slot
,
2927 mgr
->payloads
[i
].num_slots
);
2931 mutex_unlock(&mgr
->payload_lock
);
2933 mutex_lock(&mgr
->lock
);
2934 if (mgr
->mst_primary
) {
2938 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, buf
, DP_RECEIVER_CAP_SIZE
);
2939 seq_printf(m
, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE
, buf
);
2940 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_FAUX_CAP
, buf
, 2);
2941 seq_printf(m
, "faux/mst: %*ph\n", 2, buf
);
2942 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_MSTM_CTRL
, buf
, 1);
2943 seq_printf(m
, "mst ctrl: %*ph\n", 1, buf
);
2945 /* dump the standard OUI branch header */
2946 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_BRANCH_OUI
, buf
, DP_BRANCH_OUI_HEADER_SIZE
);
2947 seq_printf(m
, "branch oui: %*phN devid: ", 3, buf
);
2948 for (i
= 0x3; i
< 0x8 && buf
[i
]; i
++)
2949 seq_printf(m
, "%c", buf
[i
]);
2950 seq_printf(m
, " revision: hw: %x.%x sw: %x.%x\n",
2951 buf
[0x9] >> 4, buf
[0x9] & 0xf, buf
[0xa], buf
[0xb]);
2952 if (dump_dp_payload_table(mgr
, buf
))
2953 seq_printf(m
, "payload table: %*ph\n", 63, buf
);
2957 mutex_unlock(&mgr
->lock
);
2960 EXPORT_SYMBOL(drm_dp_mst_dump_topology
);
2962 static void drm_dp_tx_work(struct work_struct
*work
)
2964 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, tx_work
);
2966 mutex_lock(&mgr
->qlock
);
2967 if (!list_empty(&mgr
->tx_msg_downq
))
2968 process_single_down_tx_qlock(mgr
);
2969 mutex_unlock(&mgr
->qlock
);
2972 static void drm_dp_free_mst_port(struct kref
*kref
)
2974 struct drm_dp_mst_port
*port
= container_of(kref
, struct drm_dp_mst_port
, kref
);
2975 kref_put(&port
->parent
->kref
, drm_dp_free_mst_branch_device
);
2979 static void drm_dp_destroy_connector_work(struct work_struct
*work
)
2981 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, destroy_connector_work
);
2982 struct drm_dp_mst_port
*port
;
2983 bool send_hotplug
= false;
2985 * Not a regular list traverse as we have to drop the destroy
2986 * connector lock before destroying the connector, to avoid AB->BA
2987 * ordering between this lock and the config mutex.
2990 mutex_lock(&mgr
->destroy_connector_lock
);
2991 port
= list_first_entry_or_null(&mgr
->destroy_connector_list
, struct drm_dp_mst_port
, next
);
2993 mutex_unlock(&mgr
->destroy_connector_lock
);
2996 list_del(&port
->next
);
2997 mutex_unlock(&mgr
->destroy_connector_lock
);
2999 kref_init(&port
->kref
);
3000 INIT_LIST_HEAD(&port
->next
);
3002 mgr
->cbs
->destroy_connector(mgr
, port
->connector
);
3004 drm_dp_port_teardown_pdt(port
, port
->pdt
);
3005 port
->pdt
= DP_PEER_DEVICE_NONE
;
3007 if (!port
->input
&& port
->vcpi
.vcpi
> 0) {
3008 drm_dp_mst_reset_vcpi_slots(mgr
, port
);
3009 drm_dp_update_payload_part1(mgr
);
3010 drm_dp_mst_put_payload_id(mgr
, port
->vcpi
.vcpi
);
3013 kref_put(&port
->kref
, drm_dp_free_mst_port
);
3014 send_hotplug
= true;
3017 (*mgr
->cbs
->hotplug
)(mgr
);
3020 void *drm_dp_mst_duplicate_state(struct drm_atomic_state
*state
, void *obj
)
3022 struct drm_dp_mst_topology_mgr
*mgr
= obj
;
3023 struct drm_dp_mst_topology_state
*new_mst_state
;
3025 if (WARN_ON(!mgr
->state
))
3028 new_mst_state
= kmemdup(mgr
->state
, sizeof(*new_mst_state
), GFP_KERNEL
);
3030 new_mst_state
->state
= state
;
3031 return new_mst_state
;
3034 void drm_dp_mst_swap_state(void *obj
, void **obj_state_ptr
)
3036 struct drm_dp_mst_topology_mgr
*mgr
= obj
;
3037 struct drm_dp_mst_topology_state
**topology_state_ptr
;
3039 topology_state_ptr
= (struct drm_dp_mst_topology_state
**)obj_state_ptr
;
3041 mgr
->state
->state
= (*topology_state_ptr
)->state
;
3042 swap(*topology_state_ptr
, mgr
->state
);
3043 mgr
->state
->state
= NULL
;
3046 void drm_dp_mst_destroy_state(void *obj_state
)
3051 static const struct drm_private_state_funcs mst_state_funcs
= {
3052 .duplicate_state
= drm_dp_mst_duplicate_state
,
3053 .swap_state
= drm_dp_mst_swap_state
,
3054 .destroy_state
= drm_dp_mst_destroy_state
,
3058 * drm_atomic_get_mst_topology_state: get MST topology state
3060 * @state: global atomic state
3061 * @mgr: MST topology manager, also the private object in this case
3063 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
3064 * state vtable so that the private object state returned is that of a MST
3065 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
3066 * to care of the locking, so warn if don't hold the connection_mutex.
3070 * The MST topology state or error pointer.
3072 struct drm_dp_mst_topology_state
*drm_atomic_get_mst_topology_state(struct drm_atomic_state
*state
,
3073 struct drm_dp_mst_topology_mgr
*mgr
)
3075 struct drm_device
*dev
= mgr
->dev
;
3077 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
3078 return drm_atomic_get_private_obj_state(state
, mgr
,
3081 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state
);
3084 * drm_dp_mst_topology_mgr_init - initialise a topology manager
3085 * @mgr: manager struct to initialise
3086 * @dev: device providing this structure - for i2c addition.
3087 * @aux: DP helper aux channel to talk to this device
3088 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
3089 * @max_payloads: maximum number of payloads this GPU can source
3090 * @conn_base_id: the connector object ID the MST device is connected to.
3092 * Return 0 for success, or negative error code on failure
3094 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr
*mgr
,
3095 struct drm_device
*dev
, struct drm_dp_aux
*aux
,
3096 int max_dpcd_transaction_bytes
,
3097 int max_payloads
, int conn_base_id
)
3099 mutex_init(&mgr
->lock
);
3100 mutex_init(&mgr
->qlock
);
3101 mutex_init(&mgr
->payload_lock
);
3102 mutex_init(&mgr
->destroy_connector_lock
);
3103 INIT_LIST_HEAD(&mgr
->tx_msg_downq
);
3104 INIT_LIST_HEAD(&mgr
->destroy_connector_list
);
3105 INIT_WORK(&mgr
->work
, drm_dp_mst_link_probe_work
);
3106 INIT_WORK(&mgr
->tx_work
, drm_dp_tx_work
);
3107 INIT_WORK(&mgr
->destroy_connector_work
, drm_dp_destroy_connector_work
);
3108 init_waitqueue_head(&mgr
->tx_waitq
);
3111 mgr
->max_dpcd_transaction_bytes
= max_dpcd_transaction_bytes
;
3112 mgr
->max_payloads
= max_payloads
;
3113 mgr
->conn_base_id
= conn_base_id
;
3114 if (max_payloads
+ 1 > sizeof(mgr
->payload_mask
) * 8 ||
3115 max_payloads
+ 1 > sizeof(mgr
->vcpi_mask
) * 8)
3117 mgr
->payloads
= kcalloc(max_payloads
, sizeof(struct drm_dp_payload
), GFP_KERNEL
);
3120 mgr
->proposed_vcpis
= kcalloc(max_payloads
, sizeof(struct drm_dp_vcpi
*), GFP_KERNEL
);
3121 if (!mgr
->proposed_vcpis
)
3123 set_bit(0, &mgr
->payload_mask
);
3124 if (test_calc_pbn_mode() < 0)
3125 DRM_ERROR("MST PBN self-test failed\n");
3127 mgr
->state
= kzalloc(sizeof(*mgr
->state
), GFP_KERNEL
);
3128 if (mgr
->state
== NULL
)
3130 mgr
->state
->mgr
= mgr
;
3132 /* max. time slots - one slot for MTP header */
3133 mgr
->state
->avail_slots
= 63;
3134 mgr
->funcs
= &mst_state_funcs
;
3138 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init
);
3141 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
3142 * @mgr: manager to destroy
3144 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr
*mgr
)
3146 flush_work(&mgr
->work
);
3147 flush_work(&mgr
->destroy_connector_work
);
3148 mutex_lock(&mgr
->payload_lock
);
3149 kfree(mgr
->payloads
);
3150 mgr
->payloads
= NULL
;
3151 kfree(mgr
->proposed_vcpis
);
3152 mgr
->proposed_vcpis
= NULL
;
3153 mutex_unlock(&mgr
->payload_lock
);
3160 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy
);
3163 static int drm_dp_mst_i2c_xfer(struct i2c_adapter
*adapter
, struct i2c_msg
*msgs
,
3166 struct drm_dp_aux
*aux
= adapter
->algo_data
;
3167 struct drm_dp_mst_port
*port
= container_of(aux
, struct drm_dp_mst_port
, aux
);
3168 struct drm_dp_mst_branch
*mstb
;
3169 struct drm_dp_mst_topology_mgr
*mgr
= port
->mgr
;
3171 bool reading
= false;
3172 struct drm_dp_sideband_msg_req_body msg
;
3173 struct drm_dp_sideband_msg_tx
*txmsg
= NULL
;
3176 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
3180 /* construct i2c msg */
3181 /* see if last msg is a read */
3182 if (msgs
[num
- 1].flags
& I2C_M_RD
)
3185 if (!reading
|| (num
- 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS
)) {
3186 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
3191 memset(&msg
, 0, sizeof(msg
));
3192 msg
.req_type
= DP_REMOTE_I2C_READ
;
3193 msg
.u
.i2c_read
.num_transactions
= num
- 1;
3194 msg
.u
.i2c_read
.port_number
= port
->port_num
;
3195 for (i
= 0; i
< num
- 1; i
++) {
3196 msg
.u
.i2c_read
.transactions
[i
].i2c_dev_id
= msgs
[i
].addr
;
3197 msg
.u
.i2c_read
.transactions
[i
].num_bytes
= msgs
[i
].len
;
3198 msg
.u
.i2c_read
.transactions
[i
].bytes
= msgs
[i
].buf
;
3200 msg
.u
.i2c_read
.read_i2c_device_id
= msgs
[num
- 1].addr
;
3201 msg
.u
.i2c_read
.num_bytes_read
= msgs
[num
- 1].len
;
3203 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
3210 drm_dp_encode_sideband_req(&msg
, txmsg
);
3212 drm_dp_queue_down_tx(mgr
, txmsg
);
3214 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
3217 if (txmsg
->reply
.reply_type
== 1) { /* got a NAK back */
3221 if (txmsg
->reply
.u
.remote_i2c_read_ack
.num_bytes
!= msgs
[num
- 1].len
) {
3225 memcpy(msgs
[num
- 1].buf
, txmsg
->reply
.u
.remote_i2c_read_ack
.bytes
, msgs
[num
- 1].len
);
3230 drm_dp_put_mst_branch_device(mstb
);
3234 static u32
drm_dp_mst_i2c_functionality(struct i2c_adapter
*adapter
)
3236 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
|
3237 I2C_FUNC_SMBUS_READ_BLOCK_DATA
|
3238 I2C_FUNC_SMBUS_BLOCK_PROC_CALL
|
3239 I2C_FUNC_10BIT_ADDR
;
3242 static const struct i2c_algorithm drm_dp_mst_i2c_algo
= {
3243 .functionality
= drm_dp_mst_i2c_functionality
,
3244 .master_xfer
= drm_dp_mst_i2c_xfer
,
3248 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
3249 * @aux: DisplayPort AUX channel
3251 * Returns 0 on success or a negative error code on failure.
3253 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux
*aux
)
3255 aux
->ddc
.algo
= &drm_dp_mst_i2c_algo
;
3256 aux
->ddc
.algo_data
= aux
;
3257 aux
->ddc
.retries
= 3;
3259 aux
->ddc
.class = I2C_CLASS_DDC
;
3260 aux
->ddc
.owner
= THIS_MODULE
;
3261 aux
->ddc
.dev
.parent
= aux
->dev
;
3262 aux
->ddc
.dev
.of_node
= aux
->dev
->of_node
;
3264 strlcpy(aux
->ddc
.name
, aux
->name
? aux
->name
: dev_name(aux
->dev
),
3265 sizeof(aux
->ddc
.name
));
3267 return i2c_add_adapter(&aux
->ddc
);
3271 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
3272 * @aux: DisplayPort AUX channel
3274 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux
*aux
)
3276 i2c_del_adapter(&aux
->ddc
);