2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/seq_file.h>
29 #include <linux/i2c.h>
30 #include <drm/drm_dp_mst_helper.h>
33 #include <drm/drm_fixed.h>
38 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
39 * protocol. The helpers contain a topology manager and bandwidth manager.
40 * The helpers encapsulate the sending and received of sideband msgs.
42 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr
*mgr
,
44 static int test_calc_pbn_mode(void);
46 static void drm_dp_put_port(struct drm_dp_mst_port
*port
);
48 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr
*mgr
,
50 struct drm_dp_payload
*payload
);
52 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr
*mgr
,
53 struct drm_dp_mst_port
*port
,
54 int offset
, int size
, u8
*bytes
);
56 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
57 struct drm_dp_mst_branch
*mstb
);
58 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr
*mgr
,
59 struct drm_dp_mst_branch
*mstb
,
60 struct drm_dp_mst_port
*port
);
61 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr
*mgr
,
64 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux
*aux
);
65 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux
*aux
);
66 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr
*mgr
);
67 /* sideband msg handling */
68 static u8
drm_dp_msg_header_crc4(const uint8_t *data
, size_t num_nibbles
)
73 int number_of_bits
= num_nibbles
* 4;
76 while (number_of_bits
!= 0) {
79 remainder
|= (data
[array_index
] & bitmask
) >> bitshift
;
87 if ((remainder
& 0x10) == 0x10)
92 while (number_of_bits
!= 0) {
95 if ((remainder
& 0x10) != 0)
102 static u8
drm_dp_msg_data_crc4(const uint8_t *data
, u8 number_of_bytes
)
107 int number_of_bits
= number_of_bytes
* 8;
110 while (number_of_bits
!= 0) {
113 remainder
|= (data
[array_index
] & bitmask
) >> bitshift
;
121 if ((remainder
& 0x100) == 0x100)
126 while (number_of_bits
!= 0) {
129 if ((remainder
& 0x100) != 0)
133 return remainder
& 0xff;
135 static inline u8
drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr
*hdr
)
138 size
+= (hdr
->lct
/ 2);
142 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr
*hdr
,
148 buf
[idx
++] = ((hdr
->lct
& 0xf) << 4) | (hdr
->lcr
& 0xf);
149 for (i
= 0; i
< (hdr
->lct
/ 2); i
++)
150 buf
[idx
++] = hdr
->rad
[i
];
151 buf
[idx
++] = (hdr
->broadcast
<< 7) | (hdr
->path_msg
<< 6) |
152 (hdr
->msg_len
& 0x3f);
153 buf
[idx
++] = (hdr
->somt
<< 7) | (hdr
->eomt
<< 6) | (hdr
->seqno
<< 4);
155 crc4
= drm_dp_msg_header_crc4(buf
, (idx
* 2) - 1);
156 buf
[idx
- 1] |= (crc4
& 0xf);
161 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr
*hdr
,
162 u8
*buf
, int buflen
, u8
*hdrlen
)
171 len
+= ((buf
[0] & 0xf0) >> 4) / 2;
174 crc4
= drm_dp_msg_header_crc4(buf
, (len
* 2) - 1);
176 if ((crc4
& 0xf) != (buf
[len
- 1] & 0xf)) {
177 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4
, buf
[len
- 1]);
181 hdr
->lct
= (buf
[0] & 0xf0) >> 4;
182 hdr
->lcr
= (buf
[0] & 0xf);
184 for (i
= 0; i
< (hdr
->lct
/ 2); i
++)
185 hdr
->rad
[i
] = buf
[idx
++];
186 hdr
->broadcast
= (buf
[idx
] >> 7) & 0x1;
187 hdr
->path_msg
= (buf
[idx
] >> 6) & 0x1;
188 hdr
->msg_len
= buf
[idx
] & 0x3f;
190 hdr
->somt
= (buf
[idx
] >> 7) & 0x1;
191 hdr
->eomt
= (buf
[idx
] >> 6) & 0x1;
192 hdr
->seqno
= (buf
[idx
] >> 4) & 0x1;
198 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body
*req
,
199 struct drm_dp_sideband_msg_tx
*raw
)
204 buf
[idx
++] = req
->req_type
& 0x7f;
206 switch (req
->req_type
) {
207 case DP_ENUM_PATH_RESOURCES
:
208 buf
[idx
] = (req
->u
.port_num
.port_number
& 0xf) << 4;
211 case DP_ALLOCATE_PAYLOAD
:
212 buf
[idx
] = (req
->u
.allocate_payload
.port_number
& 0xf) << 4 |
213 (req
->u
.allocate_payload
.number_sdp_streams
& 0xf);
215 buf
[idx
] = (req
->u
.allocate_payload
.vcpi
& 0x7f);
217 buf
[idx
] = (req
->u
.allocate_payload
.pbn
>> 8);
219 buf
[idx
] = (req
->u
.allocate_payload
.pbn
& 0xff);
221 for (i
= 0; i
< req
->u
.allocate_payload
.number_sdp_streams
/ 2; i
++) {
222 buf
[idx
] = ((req
->u
.allocate_payload
.sdp_stream_sink
[i
* 2] & 0xf) << 4) |
223 (req
->u
.allocate_payload
.sdp_stream_sink
[i
* 2 + 1] & 0xf);
226 if (req
->u
.allocate_payload
.number_sdp_streams
& 1) {
227 i
= req
->u
.allocate_payload
.number_sdp_streams
- 1;
228 buf
[idx
] = (req
->u
.allocate_payload
.sdp_stream_sink
[i
] & 0xf) << 4;
232 case DP_QUERY_PAYLOAD
:
233 buf
[idx
] = (req
->u
.query_payload
.port_number
& 0xf) << 4;
235 buf
[idx
] = (req
->u
.query_payload
.vcpi
& 0x7f);
238 case DP_REMOTE_DPCD_READ
:
239 buf
[idx
] = (req
->u
.dpcd_read
.port_number
& 0xf) << 4;
240 buf
[idx
] |= ((req
->u
.dpcd_read
.dpcd_address
& 0xf0000) >> 16) & 0xf;
242 buf
[idx
] = (req
->u
.dpcd_read
.dpcd_address
& 0xff00) >> 8;
244 buf
[idx
] = (req
->u
.dpcd_read
.dpcd_address
& 0xff);
246 buf
[idx
] = (req
->u
.dpcd_read
.num_bytes
);
250 case DP_REMOTE_DPCD_WRITE
:
251 buf
[idx
] = (req
->u
.dpcd_write
.port_number
& 0xf) << 4;
252 buf
[idx
] |= ((req
->u
.dpcd_write
.dpcd_address
& 0xf0000) >> 16) & 0xf;
254 buf
[idx
] = (req
->u
.dpcd_write
.dpcd_address
& 0xff00) >> 8;
256 buf
[idx
] = (req
->u
.dpcd_write
.dpcd_address
& 0xff);
258 buf
[idx
] = (req
->u
.dpcd_write
.num_bytes
);
260 memcpy(&buf
[idx
], req
->u
.dpcd_write
.bytes
, req
->u
.dpcd_write
.num_bytes
);
261 idx
+= req
->u
.dpcd_write
.num_bytes
;
263 case DP_REMOTE_I2C_READ
:
264 buf
[idx
] = (req
->u
.i2c_read
.port_number
& 0xf) << 4;
265 buf
[idx
] |= (req
->u
.i2c_read
.num_transactions
& 0x3);
267 for (i
= 0; i
< (req
->u
.i2c_read
.num_transactions
& 0x3); i
++) {
268 buf
[idx
] = req
->u
.i2c_read
.transactions
[i
].i2c_dev_id
& 0x7f;
270 buf
[idx
] = req
->u
.i2c_read
.transactions
[i
].num_bytes
;
272 memcpy(&buf
[idx
], req
->u
.i2c_read
.transactions
[i
].bytes
, req
->u
.i2c_read
.transactions
[i
].num_bytes
);
273 idx
+= req
->u
.i2c_read
.transactions
[i
].num_bytes
;
275 buf
[idx
] = (req
->u
.i2c_read
.transactions
[i
].no_stop_bit
& 0x1) << 5;
276 buf
[idx
] |= (req
->u
.i2c_read
.transactions
[i
].i2c_transaction_delay
& 0xf);
279 buf
[idx
] = (req
->u
.i2c_read
.read_i2c_device_id
) & 0x7f;
281 buf
[idx
] = (req
->u
.i2c_read
.num_bytes_read
);
285 case DP_REMOTE_I2C_WRITE
:
286 buf
[idx
] = (req
->u
.i2c_write
.port_number
& 0xf) << 4;
288 buf
[idx
] = (req
->u
.i2c_write
.write_i2c_device_id
) & 0x7f;
290 buf
[idx
] = (req
->u
.i2c_write
.num_bytes
);
292 memcpy(&buf
[idx
], req
->u
.i2c_write
.bytes
, req
->u
.i2c_write
.num_bytes
);
293 idx
+= req
->u
.i2c_write
.num_bytes
;
299 static void drm_dp_crc_sideband_chunk_req(u8
*msg
, u8 len
)
302 crc4
= drm_dp_msg_data_crc4(msg
, len
);
306 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body
*rep
,
307 struct drm_dp_sideband_msg_tx
*raw
)
312 buf
[idx
++] = (rep
->reply_type
& 0x1) << 7 | (rep
->req_type
& 0x7f);
317 /* this adds a chunk of msg to the builder to get the final msg */
318 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx
*msg
,
319 u8
*replybuf
, u8 replybuflen
, bool hdr
)
326 struct drm_dp_sideband_msg_hdr recv_hdr
;
327 ret
= drm_dp_decode_sideband_msg_hdr(&recv_hdr
, replybuf
, replybuflen
, &hdrlen
);
329 print_hex_dump(KERN_DEBUG
, "failed hdr", DUMP_PREFIX_NONE
, 16, 1, replybuf
, replybuflen
, false);
333 /* get length contained in this portion */
334 msg
->curchunk_len
= recv_hdr
.msg_len
;
335 msg
->curchunk_hdrlen
= hdrlen
;
337 /* we have already gotten an somt - don't bother parsing */
338 if (recv_hdr
.somt
&& msg
->have_somt
)
342 memcpy(&msg
->initial_hdr
, &recv_hdr
, sizeof(struct drm_dp_sideband_msg_hdr
));
343 msg
->have_somt
= true;
346 msg
->have_eomt
= true;
348 /* copy the bytes for the remainder of this header chunk */
349 msg
->curchunk_idx
= min(msg
->curchunk_len
, (u8
)(replybuflen
- hdrlen
));
350 memcpy(&msg
->chunk
[0], replybuf
+ hdrlen
, msg
->curchunk_idx
);
352 memcpy(&msg
->chunk
[msg
->curchunk_idx
], replybuf
, replybuflen
);
353 msg
->curchunk_idx
+= replybuflen
;
356 if (msg
->curchunk_idx
>= msg
->curchunk_len
) {
358 crc4
= drm_dp_msg_data_crc4(msg
->chunk
, msg
->curchunk_len
- 1);
359 /* copy chunk into bigger msg */
360 memcpy(&msg
->msg
[msg
->curlen
], msg
->chunk
, msg
->curchunk_len
- 1);
361 msg
->curlen
+= msg
->curchunk_len
- 1;
366 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx
*raw
,
367 struct drm_dp_sideband_msg_reply_body
*repmsg
)
371 memcpy(repmsg
->u
.link_addr
.guid
, &raw
->msg
[idx
], 16);
373 repmsg
->u
.link_addr
.nports
= raw
->msg
[idx
] & 0xf;
375 if (idx
> raw
->curlen
)
377 for (i
= 0; i
< repmsg
->u
.link_addr
.nports
; i
++) {
378 if (raw
->msg
[idx
] & 0x80)
379 repmsg
->u
.link_addr
.ports
[i
].input_port
= 1;
381 repmsg
->u
.link_addr
.ports
[i
].peer_device_type
= (raw
->msg
[idx
] >> 4) & 0x7;
382 repmsg
->u
.link_addr
.ports
[i
].port_number
= (raw
->msg
[idx
] & 0xf);
385 if (idx
> raw
->curlen
)
387 repmsg
->u
.link_addr
.ports
[i
].mcs
= (raw
->msg
[idx
] >> 7) & 0x1;
388 repmsg
->u
.link_addr
.ports
[i
].ddps
= (raw
->msg
[idx
] >> 6) & 0x1;
389 if (repmsg
->u
.link_addr
.ports
[i
].input_port
== 0)
390 repmsg
->u
.link_addr
.ports
[i
].legacy_device_plug_status
= (raw
->msg
[idx
] >> 5) & 0x1;
392 if (idx
> raw
->curlen
)
394 if (repmsg
->u
.link_addr
.ports
[i
].input_port
== 0) {
395 repmsg
->u
.link_addr
.ports
[i
].dpcd_revision
= (raw
->msg
[idx
]);
397 if (idx
> raw
->curlen
)
399 memcpy(repmsg
->u
.link_addr
.ports
[i
].peer_guid
, &raw
->msg
[idx
], 16);
401 if (idx
> raw
->curlen
)
403 repmsg
->u
.link_addr
.ports
[i
].num_sdp_streams
= (raw
->msg
[idx
] >> 4) & 0xf;
404 repmsg
->u
.link_addr
.ports
[i
].num_sdp_stream_sinks
= (raw
->msg
[idx
] & 0xf);
408 if (idx
> raw
->curlen
)
414 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx
, raw
->curlen
);
418 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx
*raw
,
419 struct drm_dp_sideband_msg_reply_body
*repmsg
)
422 repmsg
->u
.remote_dpcd_read_ack
.port_number
= raw
->msg
[idx
] & 0xf;
424 if (idx
> raw
->curlen
)
426 repmsg
->u
.remote_dpcd_read_ack
.num_bytes
= raw
->msg
[idx
];
427 if (idx
> raw
->curlen
)
430 memcpy(repmsg
->u
.remote_dpcd_read_ack
.bytes
, &raw
->msg
[idx
], repmsg
->u
.remote_dpcd_read_ack
.num_bytes
);
433 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx
, raw
->curlen
);
437 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx
*raw
,
438 struct drm_dp_sideband_msg_reply_body
*repmsg
)
441 repmsg
->u
.remote_dpcd_write_ack
.port_number
= raw
->msg
[idx
] & 0xf;
443 if (idx
> raw
->curlen
)
447 DRM_DEBUG_KMS("parse length fail %d %d\n", idx
, raw
->curlen
);
451 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx
*raw
,
452 struct drm_dp_sideband_msg_reply_body
*repmsg
)
456 repmsg
->u
.remote_i2c_read_ack
.port_number
= (raw
->msg
[idx
] & 0xf);
458 if (idx
> raw
->curlen
)
460 repmsg
->u
.remote_i2c_read_ack
.num_bytes
= raw
->msg
[idx
];
463 memcpy(repmsg
->u
.remote_i2c_read_ack
.bytes
, &raw
->msg
[idx
], repmsg
->u
.remote_i2c_read_ack
.num_bytes
);
466 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx
, raw
->curlen
);
470 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx
*raw
,
471 struct drm_dp_sideband_msg_reply_body
*repmsg
)
474 repmsg
->u
.path_resources
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
476 if (idx
> raw
->curlen
)
478 repmsg
->u
.path_resources
.full_payload_bw_number
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
480 if (idx
> raw
->curlen
)
482 repmsg
->u
.path_resources
.avail_payload_bw_number
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
484 if (idx
> raw
->curlen
)
488 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx
, raw
->curlen
);
492 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx
*raw
,
493 struct drm_dp_sideband_msg_reply_body
*repmsg
)
496 repmsg
->u
.allocate_payload
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
498 if (idx
> raw
->curlen
)
500 repmsg
->u
.allocate_payload
.vcpi
= raw
->msg
[idx
];
502 if (idx
> raw
->curlen
)
504 repmsg
->u
.allocate_payload
.allocated_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
506 if (idx
> raw
->curlen
)
510 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx
, raw
->curlen
);
514 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx
*raw
,
515 struct drm_dp_sideband_msg_reply_body
*repmsg
)
518 repmsg
->u
.query_payload
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
520 if (idx
> raw
->curlen
)
522 repmsg
->u
.query_payload
.allocated_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+ 1]);
524 if (idx
> raw
->curlen
)
528 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx
, raw
->curlen
);
532 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx
*raw
,
533 struct drm_dp_sideband_msg_reply_body
*msg
)
535 memset(msg
, 0, sizeof(*msg
));
536 msg
->reply_type
= (raw
->msg
[0] & 0x80) >> 7;
537 msg
->req_type
= (raw
->msg
[0] & 0x7f);
539 if (msg
->reply_type
) {
540 memcpy(msg
->u
.nak
.guid
, &raw
->msg
[1], 16);
541 msg
->u
.nak
.reason
= raw
->msg
[17];
542 msg
->u
.nak
.nak_data
= raw
->msg
[18];
546 switch (msg
->req_type
) {
547 case DP_LINK_ADDRESS
:
548 return drm_dp_sideband_parse_link_address(raw
, msg
);
549 case DP_QUERY_PAYLOAD
:
550 return drm_dp_sideband_parse_query_payload_ack(raw
, msg
);
551 case DP_REMOTE_DPCD_READ
:
552 return drm_dp_sideband_parse_remote_dpcd_read(raw
, msg
);
553 case DP_REMOTE_DPCD_WRITE
:
554 return drm_dp_sideband_parse_remote_dpcd_write(raw
, msg
);
555 case DP_REMOTE_I2C_READ
:
556 return drm_dp_sideband_parse_remote_i2c_read_ack(raw
, msg
);
557 case DP_ENUM_PATH_RESOURCES
:
558 return drm_dp_sideband_parse_enum_path_resources_ack(raw
, msg
);
559 case DP_ALLOCATE_PAYLOAD
:
560 return drm_dp_sideband_parse_allocate_payload_ack(raw
, msg
);
562 DRM_ERROR("Got unknown reply 0x%02x\n", msg
->req_type
);
567 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx
*raw
,
568 struct drm_dp_sideband_msg_req_body
*msg
)
572 msg
->u
.conn_stat
.port_number
= (raw
->msg
[idx
] & 0xf0) >> 4;
574 if (idx
> raw
->curlen
)
577 memcpy(msg
->u
.conn_stat
.guid
, &raw
->msg
[idx
], 16);
579 if (idx
> raw
->curlen
)
582 msg
->u
.conn_stat
.legacy_device_plug_status
= (raw
->msg
[idx
] >> 6) & 0x1;
583 msg
->u
.conn_stat
.displayport_device_plug_status
= (raw
->msg
[idx
] >> 5) & 0x1;
584 msg
->u
.conn_stat
.message_capability_status
= (raw
->msg
[idx
] >> 4) & 0x1;
585 msg
->u
.conn_stat
.input_port
= (raw
->msg
[idx
] >> 3) & 0x1;
586 msg
->u
.conn_stat
.peer_device_type
= (raw
->msg
[idx
] & 0x7);
590 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx
, raw
->curlen
);
594 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx
*raw
,
595 struct drm_dp_sideband_msg_req_body
*msg
)
599 msg
->u
.resource_stat
.port_number
= (raw
->msg
[idx
] & 0xf0) >> 4;
601 if (idx
> raw
->curlen
)
604 memcpy(msg
->u
.resource_stat
.guid
, &raw
->msg
[idx
], 16);
606 if (idx
> raw
->curlen
)
609 msg
->u
.resource_stat
.available_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+ 1]);
613 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx
, raw
->curlen
);
617 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx
*raw
,
618 struct drm_dp_sideband_msg_req_body
*msg
)
620 memset(msg
, 0, sizeof(*msg
));
621 msg
->req_type
= (raw
->msg
[0] & 0x7f);
623 switch (msg
->req_type
) {
624 case DP_CONNECTION_STATUS_NOTIFY
:
625 return drm_dp_sideband_parse_connection_status_notify(raw
, msg
);
626 case DP_RESOURCE_STATUS_NOTIFY
:
627 return drm_dp_sideband_parse_resource_status_notify(raw
, msg
);
629 DRM_ERROR("Got unknown request 0x%02x\n", msg
->req_type
);
634 static int build_dpcd_write(struct drm_dp_sideband_msg_tx
*msg
, u8 port_num
, u32 offset
, u8 num_bytes
, u8
*bytes
)
636 struct drm_dp_sideband_msg_req_body req
;
638 req
.req_type
= DP_REMOTE_DPCD_WRITE
;
639 req
.u
.dpcd_write
.port_number
= port_num
;
640 req
.u
.dpcd_write
.dpcd_address
= offset
;
641 req
.u
.dpcd_write
.num_bytes
= num_bytes
;
642 req
.u
.dpcd_write
.bytes
= bytes
;
643 drm_dp_encode_sideband_req(&req
, msg
);
648 static int build_link_address(struct drm_dp_sideband_msg_tx
*msg
)
650 struct drm_dp_sideband_msg_req_body req
;
652 req
.req_type
= DP_LINK_ADDRESS
;
653 drm_dp_encode_sideband_req(&req
, msg
);
657 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx
*msg
, int port_num
)
659 struct drm_dp_sideband_msg_req_body req
;
661 req
.req_type
= DP_ENUM_PATH_RESOURCES
;
662 req
.u
.port_num
.port_number
= port_num
;
663 drm_dp_encode_sideband_req(&req
, msg
);
664 msg
->path_msg
= true;
668 static int build_allocate_payload(struct drm_dp_sideband_msg_tx
*msg
, int port_num
,
669 u8 vcpi
, uint16_t pbn
,
670 u8 number_sdp_streams
,
673 struct drm_dp_sideband_msg_req_body req
;
674 memset(&req
, 0, sizeof(req
));
675 req
.req_type
= DP_ALLOCATE_PAYLOAD
;
676 req
.u
.allocate_payload
.port_number
= port_num
;
677 req
.u
.allocate_payload
.vcpi
= vcpi
;
678 req
.u
.allocate_payload
.pbn
= pbn
;
679 req
.u
.allocate_payload
.number_sdp_streams
= number_sdp_streams
;
680 memcpy(req
.u
.allocate_payload
.sdp_stream_sink
, sdp_stream_sink
,
682 drm_dp_encode_sideband_req(&req
, msg
);
683 msg
->path_msg
= true;
687 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr
*mgr
,
688 struct drm_dp_vcpi
*vcpi
)
692 mutex_lock(&mgr
->payload_lock
);
693 ret
= find_first_zero_bit(&mgr
->payload_mask
, mgr
->max_payloads
+ 1);
694 if (ret
> mgr
->max_payloads
) {
696 DRM_DEBUG_KMS("out of payload ids %d\n", ret
);
700 vcpi_ret
= find_first_zero_bit(&mgr
->vcpi_mask
, mgr
->max_payloads
+ 1);
701 if (vcpi_ret
> mgr
->max_payloads
) {
703 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret
);
707 set_bit(ret
, &mgr
->payload_mask
);
708 set_bit(vcpi_ret
, &mgr
->vcpi_mask
);
709 vcpi
->vcpi
= vcpi_ret
+ 1;
710 mgr
->proposed_vcpis
[ret
- 1] = vcpi
;
712 mutex_unlock(&mgr
->payload_lock
);
716 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr
*mgr
,
723 mutex_lock(&mgr
->payload_lock
);
724 DRM_DEBUG_KMS("putting payload %d\n", vcpi
);
725 clear_bit(vcpi
- 1, &mgr
->vcpi_mask
);
727 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
728 if (mgr
->proposed_vcpis
[i
])
729 if (mgr
->proposed_vcpis
[i
]->vcpi
== vcpi
) {
730 mgr
->proposed_vcpis
[i
] = NULL
;
731 clear_bit(i
+ 1, &mgr
->payload_mask
);
734 mutex_unlock(&mgr
->payload_lock
);
737 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr
*mgr
,
738 struct drm_dp_sideband_msg_tx
*txmsg
)
743 * All updates to txmsg->state are protected by mgr->qlock, and the two
744 * cases we check here are terminal states. For those the barriers
745 * provided by the wake_up/wait_event pair are enough.
747 ret
= (txmsg
->state
== DRM_DP_SIDEBAND_TX_RX
||
748 txmsg
->state
== DRM_DP_SIDEBAND_TX_TIMEOUT
);
752 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch
*mstb
,
753 struct drm_dp_sideband_msg_tx
*txmsg
)
755 struct drm_dp_mst_topology_mgr
*mgr
= mstb
->mgr
;
758 ret
= wait_event_timeout(mgr
->tx_waitq
,
759 check_txmsg_state(mgr
, txmsg
),
761 mutex_lock(&mstb
->mgr
->qlock
);
763 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_TIMEOUT
) {
768 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg
, txmsg
->state
, txmsg
->seqno
);
770 /* dump some state */
774 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_QUEUED
||
775 txmsg
->state
== DRM_DP_SIDEBAND_TX_START_SEND
) {
776 list_del(&txmsg
->next
);
779 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_START_SEND
||
780 txmsg
->state
== DRM_DP_SIDEBAND_TX_SENT
) {
781 mstb
->tx_slots
[txmsg
->seqno
] = NULL
;
785 mutex_unlock(&mgr
->qlock
);
790 static struct drm_dp_mst_branch
*drm_dp_add_mst_branch_device(u8 lct
, u8
*rad
)
792 struct drm_dp_mst_branch
*mstb
;
794 mstb
= kzalloc(sizeof(*mstb
), GFP_KERNEL
);
800 memcpy(mstb
->rad
, rad
, lct
/ 2);
801 INIT_LIST_HEAD(&mstb
->ports
);
802 kref_init(&mstb
->kref
);
806 static void drm_dp_free_mst_port(struct kref
*kref
);
808 static void drm_dp_free_mst_branch_device(struct kref
*kref
)
810 struct drm_dp_mst_branch
*mstb
= container_of(kref
, struct drm_dp_mst_branch
, kref
);
811 if (mstb
->port_parent
) {
812 if (list_empty(&mstb
->port_parent
->next
))
813 kref_put(&mstb
->port_parent
->kref
, drm_dp_free_mst_port
);
818 static void drm_dp_destroy_mst_branch_device(struct kref
*kref
)
820 struct drm_dp_mst_branch
*mstb
= container_of(kref
, struct drm_dp_mst_branch
, kref
);
821 struct drm_dp_mst_port
*port
, *tmp
;
822 bool wake_tx
= false;
825 * init kref again to be used by ports to remove mst branch when it is
830 if (mstb
->port_parent
&& list_empty(&mstb
->port_parent
->next
))
831 kref_get(&mstb
->port_parent
->kref
);
834 * destroy all ports - don't need lock
835 * as there are no more references to the mst branch
836 * device at this point.
838 list_for_each_entry_safe(port
, tmp
, &mstb
->ports
, next
) {
839 list_del(&port
->next
);
840 drm_dp_put_port(port
);
843 /* drop any tx slots msg */
844 mutex_lock(&mstb
->mgr
->qlock
);
845 if (mstb
->tx_slots
[0]) {
846 mstb
->tx_slots
[0]->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
847 mstb
->tx_slots
[0] = NULL
;
850 if (mstb
->tx_slots
[1]) {
851 mstb
->tx_slots
[1]->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
852 mstb
->tx_slots
[1] = NULL
;
855 mutex_unlock(&mstb
->mgr
->qlock
);
858 wake_up(&mstb
->mgr
->tx_waitq
);
860 kref_put(kref
, drm_dp_free_mst_branch_device
);
863 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch
*mstb
)
865 kref_put(&mstb
->kref
, drm_dp_destroy_mst_branch_device
);
869 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port
*port
, int old_pdt
)
871 struct drm_dp_mst_branch
*mstb
;
874 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
875 case DP_PEER_DEVICE_SST_SINK
:
876 /* remove i2c over sideband */
877 drm_dp_mst_unregister_i2c_bus(&port
->aux
);
879 case DP_PEER_DEVICE_MST_BRANCHING
:
882 drm_dp_put_mst_branch_device(mstb
);
887 static void drm_dp_destroy_port(struct kref
*kref
)
889 struct drm_dp_mst_port
*port
= container_of(kref
, struct drm_dp_mst_port
, kref
);
890 struct drm_dp_mst_topology_mgr
*mgr
= port
->mgr
;
893 port
->vcpi
.num_slots
= 0;
895 kfree(port
->cached_edid
);
898 * The only time we don't have a connector
899 * on an output port is if the connector init
902 if (port
->connector
) {
903 /* we can't destroy the connector here, as
904 * we might be holding the mode_config.mutex
905 * from an EDID retrieval */
907 mutex_lock(&mgr
->destroy_connector_lock
);
908 kref_get(&port
->parent
->kref
);
909 list_add(&port
->next
, &mgr
->destroy_connector_list
);
910 mutex_unlock(&mgr
->destroy_connector_lock
);
911 schedule_work(&mgr
->destroy_connector_work
);
914 /* no need to clean up vcpi
915 * as if we have no connector we never setup a vcpi */
916 drm_dp_port_teardown_pdt(port
, port
->pdt
);
921 static void drm_dp_put_port(struct drm_dp_mst_port
*port
)
923 kref_put(&port
->kref
, drm_dp_destroy_port
);
926 static struct drm_dp_mst_branch
*drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch
*mstb
, struct drm_dp_mst_branch
*to_find
)
928 struct drm_dp_mst_port
*port
;
929 struct drm_dp_mst_branch
*rmstb
;
930 if (to_find
== mstb
) {
931 kref_get(&mstb
->kref
);
934 list_for_each_entry(port
, &mstb
->ports
, next
) {
936 rmstb
= drm_dp_mst_get_validated_mstb_ref_locked(port
->mstb
, to_find
);
944 static struct drm_dp_mst_branch
*drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_branch
*mstb
)
946 struct drm_dp_mst_branch
*rmstb
= NULL
;
947 mutex_lock(&mgr
->lock
);
948 if (mgr
->mst_primary
)
949 rmstb
= drm_dp_mst_get_validated_mstb_ref_locked(mgr
->mst_primary
, mstb
);
950 mutex_unlock(&mgr
->lock
);
954 static struct drm_dp_mst_port
*drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch
*mstb
, struct drm_dp_mst_port
*to_find
)
956 struct drm_dp_mst_port
*port
, *mport
;
958 list_for_each_entry(port
, &mstb
->ports
, next
) {
959 if (port
== to_find
) {
960 kref_get(&port
->kref
);
964 mport
= drm_dp_mst_get_port_ref_locked(port
->mstb
, to_find
);
972 static struct drm_dp_mst_port
*drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
974 struct drm_dp_mst_port
*rport
= NULL
;
975 mutex_lock(&mgr
->lock
);
976 if (mgr
->mst_primary
)
977 rport
= drm_dp_mst_get_port_ref_locked(mgr
->mst_primary
, port
);
978 mutex_unlock(&mgr
->lock
);
982 static struct drm_dp_mst_port
*drm_dp_get_port(struct drm_dp_mst_branch
*mstb
, u8 port_num
)
984 struct drm_dp_mst_port
*port
;
986 list_for_each_entry(port
, &mstb
->ports
, next
) {
987 if (port
->port_num
== port_num
) {
988 kref_get(&port
->kref
);
997 * calculate a new RAD for this MST branch device
998 * if parent has an LCT of 2 then it has 1 nibble of RAD,
999 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1001 static u8
drm_dp_calculate_rad(struct drm_dp_mst_port
*port
,
1004 int parent_lct
= port
->parent
->lct
;
1006 int idx
= (parent_lct
- 1) / 2;
1007 if (parent_lct
> 1) {
1008 memcpy(rad
, port
->parent
->rad
, idx
+ 1);
1009 shift
= (parent_lct
% 2) ? 4 : 0;
1013 rad
[idx
] |= port
->port_num
<< shift
;
1014 return parent_lct
+ 1;
1018 * return sends link address for new mstb
1020 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port
*port
)
1024 bool send_link
= false;
1025 switch (port
->pdt
) {
1026 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
1027 case DP_PEER_DEVICE_SST_SINK
:
1028 /* add i2c over sideband */
1029 ret
= drm_dp_mst_register_i2c_bus(&port
->aux
);
1031 case DP_PEER_DEVICE_MST_BRANCHING
:
1032 lct
= drm_dp_calculate_rad(port
, rad
);
1034 port
->mstb
= drm_dp_add_mst_branch_device(lct
, rad
);
1035 port
->mstb
->mgr
= port
->mgr
;
1036 port
->mstb
->port_parent
= port
;
1044 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch
*mstb
, u8
*guid
)
1048 memcpy(mstb
->guid
, guid
, 16);
1050 if (!drm_dp_validate_guid(mstb
->mgr
, mstb
->guid
)) {
1051 if (mstb
->port_parent
) {
1052 ret
= drm_dp_send_dpcd_write(
1060 ret
= drm_dp_dpcd_write(
1069 static void build_mst_prop_path(const struct drm_dp_mst_branch
*mstb
,
1072 size_t proppath_size
)
1076 snprintf(proppath
, proppath_size
, "mst:%d", mstb
->mgr
->conn_base_id
);
1077 for (i
= 0; i
< (mstb
->lct
- 1); i
++) {
1078 int shift
= (i
% 2) ? 0 : 4;
1079 int port_num
= (mstb
->rad
[i
/ 2] >> shift
) & 0xf;
1080 snprintf(temp
, sizeof(temp
), "-%d", port_num
);
1081 strlcat(proppath
, temp
, proppath_size
);
1083 snprintf(temp
, sizeof(temp
), "-%d", pnum
);
1084 strlcat(proppath
, temp
, proppath_size
);
1087 static void drm_dp_add_port(struct drm_dp_mst_branch
*mstb
,
1089 struct drm_dp_link_addr_reply_port
*port_msg
)
1091 struct drm_dp_mst_port
*port
;
1093 bool created
= false;
1096 port
= drm_dp_get_port(mstb
, port_msg
->port_number
);
1098 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1101 kref_init(&port
->kref
);
1102 port
->parent
= mstb
;
1103 port
->port_num
= port_msg
->port_number
;
1104 port
->mgr
= mstb
->mgr
;
1105 port
->aux
.name
= "DPMST";
1106 port
->aux
.dev
= dev
;
1109 old_pdt
= port
->pdt
;
1110 old_ddps
= port
->ddps
;
1113 port
->pdt
= port_msg
->peer_device_type
;
1114 port
->input
= port_msg
->input_port
;
1115 port
->mcs
= port_msg
->mcs
;
1116 port
->ddps
= port_msg
->ddps
;
1117 port
->ldps
= port_msg
->legacy_device_plug_status
;
1118 port
->dpcd_rev
= port_msg
->dpcd_revision
;
1119 port
->num_sdp_streams
= port_msg
->num_sdp_streams
;
1120 port
->num_sdp_stream_sinks
= port_msg
->num_sdp_stream_sinks
;
1122 /* manage mstb port lists with mgr lock - take a reference
1125 mutex_lock(&mstb
->mgr
->lock
);
1126 kref_get(&port
->kref
);
1127 list_add(&port
->next
, &mstb
->ports
);
1128 mutex_unlock(&mstb
->mgr
->lock
);
1131 if (old_ddps
!= port
->ddps
) {
1134 drm_dp_send_enum_path_resources(mstb
->mgr
, mstb
, port
);
1136 port
->available_pbn
= 0;
1140 if (old_pdt
!= port
->pdt
&& !port
->input
) {
1141 drm_dp_port_teardown_pdt(port
, old_pdt
);
1143 ret
= drm_dp_port_setup_pdt(port
);
1145 drm_dp_send_link_address(mstb
->mgr
, port
->mstb
);
1148 if (created
&& !port
->input
) {
1151 build_mst_prop_path(mstb
, port
->port_num
, proppath
, sizeof(proppath
));
1152 port
->connector
= (*mstb
->mgr
->cbs
->add_connector
)(mstb
->mgr
, port
, proppath
);
1153 if (!port
->connector
) {
1154 /* remove it from the port list */
1155 mutex_lock(&mstb
->mgr
->lock
);
1156 list_del(&port
->next
);
1157 mutex_unlock(&mstb
->mgr
->lock
);
1158 /* drop port list reference */
1159 drm_dp_put_port(port
);
1163 drm_mode_connector_set_tile_property(port
->connector
);
1165 (*mstb
->mgr
->cbs
->register_connector
)(port
->connector
);
1168 /* put reference to this port */
1169 drm_dp_put_port(port
);
1172 static void drm_dp_update_port(struct drm_dp_mst_branch
*mstb
,
1173 struct drm_dp_connection_status_notify
*conn_stat
)
1175 struct drm_dp_mst_port
*port
;
1178 bool dowork
= false;
1179 port
= drm_dp_get_port(mstb
, conn_stat
->port_number
);
1183 old_ddps
= port
->ddps
;
1184 old_pdt
= port
->pdt
;
1185 port
->pdt
= conn_stat
->peer_device_type
;
1186 port
->mcs
= conn_stat
->message_capability_status
;
1187 port
->ldps
= conn_stat
->legacy_device_plug_status
;
1188 port
->ddps
= conn_stat
->displayport_device_plug_status
;
1190 if (old_ddps
!= port
->ddps
) {
1194 port
->available_pbn
= 0;
1197 if (old_pdt
!= port
->pdt
&& !port
->input
) {
1198 drm_dp_port_teardown_pdt(port
, old_pdt
);
1200 if (drm_dp_port_setup_pdt(port
))
1204 drm_dp_put_port(port
);
1206 queue_work(system_long_wq
, &mstb
->mgr
->work
);
1210 static struct drm_dp_mst_branch
*drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr
*mgr
,
1213 struct drm_dp_mst_branch
*mstb
;
1214 struct drm_dp_mst_port
*port
;
1216 /* find the port by iterating down */
1218 mutex_lock(&mgr
->lock
);
1219 mstb
= mgr
->mst_primary
;
1221 for (i
= 0; i
< lct
- 1; i
++) {
1222 int shift
= (i
% 2) ? 0 : 4;
1223 int port_num
= (rad
[i
/ 2] >> shift
) & 0xf;
1225 list_for_each_entry(port
, &mstb
->ports
, next
) {
1226 if (port
->port_num
== port_num
) {
1229 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct
, rad
[0]);
1237 kref_get(&mstb
->kref
);
1239 mutex_unlock(&mgr
->lock
);
1243 static struct drm_dp_mst_branch
*get_mst_branch_device_by_guid_helper(
1244 struct drm_dp_mst_branch
*mstb
,
1247 struct drm_dp_mst_branch
*found_mstb
;
1248 struct drm_dp_mst_port
*port
;
1250 if (memcmp(mstb
->guid
, guid
, 16) == 0)
1254 list_for_each_entry(port
, &mstb
->ports
, next
) {
1258 found_mstb
= get_mst_branch_device_by_guid_helper(port
->mstb
, guid
);
1267 static struct drm_dp_mst_branch
*drm_dp_get_mst_branch_device_by_guid(
1268 struct drm_dp_mst_topology_mgr
*mgr
,
1271 struct drm_dp_mst_branch
*mstb
;
1273 /* find the port by iterating down */
1274 mutex_lock(&mgr
->lock
);
1276 mstb
= get_mst_branch_device_by_guid_helper(mgr
->mst_primary
, guid
);
1279 kref_get(&mstb
->kref
);
1281 mutex_unlock(&mgr
->lock
);
1285 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
1286 struct drm_dp_mst_branch
*mstb
)
1288 struct drm_dp_mst_port
*port
;
1289 struct drm_dp_mst_branch
*mstb_child
;
1290 if (!mstb
->link_address_sent
)
1291 drm_dp_send_link_address(mgr
, mstb
);
1293 list_for_each_entry(port
, &mstb
->ports
, next
) {
1298 if (port
->cached_edid
) {
1299 kfree(port
->cached_edid
);
1300 port
->cached_edid
= NULL
;
1305 if (!port
->available_pbn
)
1306 drm_dp_send_enum_path_resources(mgr
, mstb
, port
);
1309 mstb_child
= drm_dp_get_validated_mstb_ref(mgr
, port
->mstb
);
1311 drm_dp_check_and_send_link_address(mgr
, mstb_child
);
1312 drm_dp_put_mst_branch_device(mstb_child
);
1314 } else if (port
->pdt
== DP_PEER_DEVICE_SST_SINK
||
1315 port
->pdt
== DP_PEER_DEVICE_DP_LEGACY_CONV
) {
1316 if (!port
->cached_edid
) {
1318 drm_get_edid(port
->connector
, &port
->aux
.ddc
);
1324 static void drm_dp_mst_link_probe_work(struct work_struct
*work
)
1326 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, work
);
1327 struct drm_dp_mst_branch
*mstb
;
1329 mutex_lock(&mgr
->lock
);
1330 mstb
= mgr
->mst_primary
;
1332 kref_get(&mstb
->kref
);
1334 mutex_unlock(&mgr
->lock
);
1336 drm_dp_check_and_send_link_address(mgr
, mstb
);
1337 drm_dp_put_mst_branch_device(mstb
);
1340 (*mgr
->cbs
->hotplug
)(mgr
);
1343 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr
*mgr
,
1346 static u8 zero_guid
[16];
1348 if (!memcmp(guid
, zero_guid
, 16)) {
1349 u64 salt
= get_jiffies_64();
1350 memcpy(&guid
[0], &salt
, sizeof(u64
));
1351 memcpy(&guid
[8], &salt
, sizeof(u64
));
1358 static int build_dpcd_read(struct drm_dp_sideband_msg_tx
*msg
, u8 port_num
, u32 offset
, u8 num_bytes
)
1360 struct drm_dp_sideband_msg_req_body req
;
1362 req
.req_type
= DP_REMOTE_DPCD_READ
;
1363 req
.u
.dpcd_read
.port_number
= port_num
;
1364 req
.u
.dpcd_read
.dpcd_address
= offset
;
1365 req
.u
.dpcd_read
.num_bytes
= num_bytes
;
1366 drm_dp_encode_sideband_req(&req
, msg
);
1372 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr
*mgr
,
1373 bool up
, u8
*msg
, int len
)
1376 int regbase
= up
? DP_SIDEBAND_MSG_UP_REP_BASE
: DP_SIDEBAND_MSG_DOWN_REQ_BASE
;
1377 int tosend
, total
, offset
;
1384 tosend
= min3(mgr
->max_dpcd_transaction_bytes
, 16, total
);
1386 ret
= drm_dp_dpcd_write(mgr
->aux
, regbase
+ offset
,
1389 if (ret
!= tosend
) {
1390 if (ret
== -EIO
&& retries
< 5) {
1394 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend
, ret
);
1400 } while (total
> 0);
1404 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr
*hdr
,
1405 struct drm_dp_sideband_msg_tx
*txmsg
)
1407 struct drm_dp_mst_branch
*mstb
= txmsg
->dst
;
1410 /* both msg slots are full */
1411 if (txmsg
->seqno
== -1) {
1412 if (mstb
->tx_slots
[0] && mstb
->tx_slots
[1]) {
1413 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__
);
1416 if (mstb
->tx_slots
[0] == NULL
&& mstb
->tx_slots
[1] == NULL
) {
1417 txmsg
->seqno
= mstb
->last_seqno
;
1418 mstb
->last_seqno
^= 1;
1419 } else if (mstb
->tx_slots
[0] == NULL
)
1423 mstb
->tx_slots
[txmsg
->seqno
] = txmsg
;
1426 req_type
= txmsg
->msg
[0] & 0x7f;
1427 if (req_type
== DP_CONNECTION_STATUS_NOTIFY
||
1428 req_type
== DP_RESOURCE_STATUS_NOTIFY
)
1432 hdr
->path_msg
= txmsg
->path_msg
;
1433 hdr
->lct
= mstb
->lct
;
1434 hdr
->lcr
= mstb
->lct
- 1;
1436 memcpy(hdr
->rad
, mstb
->rad
, mstb
->lct
/ 2);
1437 hdr
->seqno
= txmsg
->seqno
;
1441 * process a single block of the next message in the sideband queue
1443 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
,
1444 struct drm_dp_sideband_msg_tx
*txmsg
,
1448 struct drm_dp_sideband_msg_hdr hdr
;
1449 int len
, space
, idx
, tosend
;
1452 memset(&hdr
, 0, sizeof(struct drm_dp_sideband_msg_hdr
));
1454 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_QUEUED
) {
1456 txmsg
->state
= DRM_DP_SIDEBAND_TX_START_SEND
;
1459 /* make hdr from dst mst - for replies use seqno
1460 otherwise assign one */
1461 ret
= set_hdr_from_dst_qlock(&hdr
, txmsg
);
1465 /* amount left to send in this message */
1466 len
= txmsg
->cur_len
- txmsg
->cur_offset
;
1468 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1469 space
= 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr
);
1471 tosend
= min(len
, space
);
1472 if (len
== txmsg
->cur_len
)
1478 hdr
.msg_len
= tosend
+ 1;
1479 drm_dp_encode_sideband_msg_hdr(&hdr
, chunk
, &idx
);
1480 memcpy(&chunk
[idx
], &txmsg
->msg
[txmsg
->cur_offset
], tosend
);
1481 /* add crc at end */
1482 drm_dp_crc_sideband_chunk_req(&chunk
[idx
], tosend
);
1485 ret
= drm_dp_send_sideband_msg(mgr
, up
, chunk
, idx
);
1487 DRM_DEBUG_KMS("sideband msg failed to send\n");
1491 txmsg
->cur_offset
+= tosend
;
1492 if (txmsg
->cur_offset
== txmsg
->cur_len
) {
1493 txmsg
->state
= DRM_DP_SIDEBAND_TX_SENT
;
1499 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
)
1501 struct drm_dp_sideband_msg_tx
*txmsg
;
1504 WARN_ON(!mutex_is_locked(&mgr
->qlock
));
1506 /* construct a chunk from the first msg in the tx_msg queue */
1507 if (list_empty(&mgr
->tx_msg_downq
)) {
1508 mgr
->tx_down_in_progress
= false;
1511 mgr
->tx_down_in_progress
= true;
1513 txmsg
= list_first_entry(&mgr
->tx_msg_downq
, struct drm_dp_sideband_msg_tx
, next
);
1514 ret
= process_single_tx_qlock(mgr
, txmsg
, false);
1516 /* txmsg is sent it should be in the slots now */
1517 list_del(&txmsg
->next
);
1519 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret
);
1520 list_del(&txmsg
->next
);
1521 if (txmsg
->seqno
!= -1)
1522 txmsg
->dst
->tx_slots
[txmsg
->seqno
] = NULL
;
1523 txmsg
->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
1524 wake_up(&mgr
->tx_waitq
);
1526 if (list_empty(&mgr
->tx_msg_downq
)) {
1527 mgr
->tx_down_in_progress
= false;
1532 /* called holding qlock */
1533 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
,
1534 struct drm_dp_sideband_msg_tx
*txmsg
)
1538 /* construct a chunk from the first msg in the tx_msg queue */
1539 ret
= process_single_tx_qlock(mgr
, txmsg
, true);
1542 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret
);
1544 txmsg
->dst
->tx_slots
[txmsg
->seqno
] = NULL
;
1547 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr
*mgr
,
1548 struct drm_dp_sideband_msg_tx
*txmsg
)
1550 mutex_lock(&mgr
->qlock
);
1551 list_add_tail(&txmsg
->next
, &mgr
->tx_msg_downq
);
1552 if (!mgr
->tx_down_in_progress
)
1553 process_single_down_tx_qlock(mgr
);
1554 mutex_unlock(&mgr
->qlock
);
1557 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
1558 struct drm_dp_mst_branch
*mstb
)
1561 struct drm_dp_sideband_msg_tx
*txmsg
;
1564 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1569 len
= build_link_address(txmsg
);
1571 mstb
->link_address_sent
= true;
1572 drm_dp_queue_down_tx(mgr
, txmsg
);
1574 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1578 if (txmsg
->reply
.reply_type
== 1)
1579 DRM_DEBUG_KMS("link address nak received\n");
1581 DRM_DEBUG_KMS("link address reply: %d\n", txmsg
->reply
.u
.link_addr
.nports
);
1582 for (i
= 0; i
< txmsg
->reply
.u
.link_addr
.nports
; i
++) {
1583 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i
,
1584 txmsg
->reply
.u
.link_addr
.ports
[i
].input_port
,
1585 txmsg
->reply
.u
.link_addr
.ports
[i
].peer_device_type
,
1586 txmsg
->reply
.u
.link_addr
.ports
[i
].port_number
,
1587 txmsg
->reply
.u
.link_addr
.ports
[i
].dpcd_revision
,
1588 txmsg
->reply
.u
.link_addr
.ports
[i
].mcs
,
1589 txmsg
->reply
.u
.link_addr
.ports
[i
].ddps
,
1590 txmsg
->reply
.u
.link_addr
.ports
[i
].legacy_device_plug_status
,
1591 txmsg
->reply
.u
.link_addr
.ports
[i
].num_sdp_streams
,
1592 txmsg
->reply
.u
.link_addr
.ports
[i
].num_sdp_stream_sinks
);
1595 drm_dp_check_mstb_guid(mstb
, txmsg
->reply
.u
.link_addr
.guid
);
1597 for (i
= 0; i
< txmsg
->reply
.u
.link_addr
.nports
; i
++) {
1598 drm_dp_add_port(mstb
, mgr
->dev
, &txmsg
->reply
.u
.link_addr
.ports
[i
]);
1602 mstb
->link_address_sent
= false;
1603 DRM_DEBUG_KMS("link address failed %d\n", ret
);
1609 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr
*mgr
,
1610 struct drm_dp_mst_branch
*mstb
,
1611 struct drm_dp_mst_port
*port
)
1614 struct drm_dp_sideband_msg_tx
*txmsg
;
1617 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1622 len
= build_enum_path_resources(txmsg
, port
->port_num
);
1624 drm_dp_queue_down_tx(mgr
, txmsg
);
1626 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1628 if (txmsg
->reply
.reply_type
== 1)
1629 DRM_DEBUG_KMS("enum path resources nak received\n");
1631 if (port
->port_num
!= txmsg
->reply
.u
.path_resources
.port_number
)
1632 DRM_ERROR("got incorrect port in response\n");
1633 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg
->reply
.u
.path_resources
.port_number
, txmsg
->reply
.u
.path_resources
.full_payload_bw_number
,
1634 txmsg
->reply
.u
.path_resources
.avail_payload_bw_number
);
1635 port
->available_pbn
= txmsg
->reply
.u
.path_resources
.avail_payload_bw_number
;
1643 static struct drm_dp_mst_port
*drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch
*mstb
)
1645 if (!mstb
->port_parent
)
1648 if (mstb
->port_parent
->mstb
!= mstb
)
1649 return mstb
->port_parent
;
1651 return drm_dp_get_last_connected_port_to_mstb(mstb
->port_parent
->parent
);
1654 static struct drm_dp_mst_branch
*drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr
*mgr
,
1655 struct drm_dp_mst_branch
*mstb
,
1658 struct drm_dp_mst_branch
*rmstb
= NULL
;
1659 struct drm_dp_mst_port
*found_port
;
1660 mutex_lock(&mgr
->lock
);
1661 if (mgr
->mst_primary
) {
1662 found_port
= drm_dp_get_last_connected_port_to_mstb(mstb
);
1665 rmstb
= found_port
->parent
;
1666 kref_get(&rmstb
->kref
);
1667 *port_num
= found_port
->port_num
;
1670 mutex_unlock(&mgr
->lock
);
1674 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr
*mgr
,
1675 struct drm_dp_mst_port
*port
,
1679 struct drm_dp_sideband_msg_tx
*txmsg
;
1680 struct drm_dp_mst_branch
*mstb
;
1681 int len
, ret
, port_num
;
1682 u8 sinks
[DRM_DP_MAX_SDP_STREAMS
];
1685 port_num
= port
->port_num
;
1686 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
1688 mstb
= drm_dp_get_last_connected_port_and_mstb(mgr
, port
->parent
, &port_num
);
1694 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1700 for (i
= 0; i
< port
->num_sdp_streams
; i
++)
1704 len
= build_allocate_payload(txmsg
, port_num
,
1706 pbn
, port
->num_sdp_streams
, sinks
);
1708 drm_dp_queue_down_tx(mgr
, txmsg
);
1710 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1712 if (txmsg
->reply
.reply_type
== 1) {
1719 drm_dp_put_mst_branch_device(mstb
);
1723 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr
*mgr
,
1725 struct drm_dp_payload
*payload
)
1729 ret
= drm_dp_dpcd_write_payload(mgr
, id
, payload
);
1731 payload
->payload_state
= 0;
1734 payload
->payload_state
= DP_PAYLOAD_LOCAL
;
1738 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr
*mgr
,
1739 struct drm_dp_mst_port
*port
,
1741 struct drm_dp_payload
*payload
)
1744 ret
= drm_dp_payload_send_msg(mgr
, port
, id
, port
->vcpi
.pbn
);
1747 payload
->payload_state
= DP_PAYLOAD_REMOTE
;
1751 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr
*mgr
,
1752 struct drm_dp_mst_port
*port
,
1754 struct drm_dp_payload
*payload
)
1756 DRM_DEBUG_KMS("\n");
1757 /* its okay for these to fail */
1759 drm_dp_payload_send_msg(mgr
, port
, id
, 0);
1762 drm_dp_dpcd_write_payload(mgr
, id
, payload
);
1763 payload
->payload_state
= DP_PAYLOAD_DELETE_LOCAL
;
1767 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr
*mgr
,
1769 struct drm_dp_payload
*payload
)
1771 payload
->payload_state
= 0;
1776 * drm_dp_update_payload_part1() - Execute payload update part 1
1777 * @mgr: manager to use.
1779 * This iterates over all proposed virtual channels, and tries to
1780 * allocate space in the link for them. For 0->slots transitions,
1781 * this step just writes the VCPI to the MST device. For slots->0
1782 * transitions, this writes the updated VCPIs and removes the
1783 * remote VC payloads.
1785 * after calling this the driver should generate ACT and payload
1788 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr
*mgr
)
1792 struct drm_dp_payload req_payload
;
1793 struct drm_dp_mst_port
*port
;
1795 mutex_lock(&mgr
->payload_lock
);
1796 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1797 /* solve the current payloads - compare to the hw ones
1798 - update the hw view */
1799 req_payload
.start_slot
= cur_slots
;
1800 if (mgr
->proposed_vcpis
[i
]) {
1801 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
1802 req_payload
.num_slots
= mgr
->proposed_vcpis
[i
]->num_slots
;
1803 req_payload
.vcpi
= mgr
->proposed_vcpis
[i
]->vcpi
;
1806 req_payload
.num_slots
= 0;
1809 if (mgr
->payloads
[i
].start_slot
!= req_payload
.start_slot
) {
1810 mgr
->payloads
[i
].start_slot
= req_payload
.start_slot
;
1812 /* work out what is required to happen with this payload */
1813 if (mgr
->payloads
[i
].num_slots
!= req_payload
.num_slots
) {
1815 /* need to push an update for this payload */
1816 if (req_payload
.num_slots
) {
1817 drm_dp_create_payload_step1(mgr
, mgr
->proposed_vcpis
[i
]->vcpi
, &req_payload
);
1818 mgr
->payloads
[i
].num_slots
= req_payload
.num_slots
;
1819 mgr
->payloads
[i
].vcpi
= req_payload
.vcpi
;
1820 } else if (mgr
->payloads
[i
].num_slots
) {
1821 mgr
->payloads
[i
].num_slots
= 0;
1822 drm_dp_destroy_payload_step1(mgr
, port
, port
->vcpi
.vcpi
, &mgr
->payloads
[i
]);
1823 req_payload
.payload_state
= mgr
->payloads
[i
].payload_state
;
1824 mgr
->payloads
[i
].start_slot
= 0;
1826 mgr
->payloads
[i
].payload_state
= req_payload
.payload_state
;
1828 cur_slots
+= req_payload
.num_slots
;
1831 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1832 if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_DELETE_LOCAL
) {
1833 DRM_DEBUG_KMS("removing payload %d\n", i
);
1834 for (j
= i
; j
< mgr
->max_payloads
- 1; j
++) {
1835 memcpy(&mgr
->payloads
[j
], &mgr
->payloads
[j
+ 1], sizeof(struct drm_dp_payload
));
1836 mgr
->proposed_vcpis
[j
] = mgr
->proposed_vcpis
[j
+ 1];
1837 if (mgr
->proposed_vcpis
[j
] && mgr
->proposed_vcpis
[j
]->num_slots
) {
1838 set_bit(j
+ 1, &mgr
->payload_mask
);
1840 clear_bit(j
+ 1, &mgr
->payload_mask
);
1843 memset(&mgr
->payloads
[mgr
->max_payloads
- 1], 0, sizeof(struct drm_dp_payload
));
1844 mgr
->proposed_vcpis
[mgr
->max_payloads
- 1] = NULL
;
1845 clear_bit(mgr
->max_payloads
, &mgr
->payload_mask
);
1849 mutex_unlock(&mgr
->payload_lock
);
1853 EXPORT_SYMBOL(drm_dp_update_payload_part1
);
1856 * drm_dp_update_payload_part2() - Execute payload update part 2
1857 * @mgr: manager to use.
1859 * This iterates over all proposed virtual channels, and tries to
1860 * allocate space in the link for them. For 0->slots transitions,
1861 * this step writes the remote VC payload commands. For slots->0
1862 * this just resets some internal state.
1864 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr
*mgr
)
1866 struct drm_dp_mst_port
*port
;
1869 mutex_lock(&mgr
->payload_lock
);
1870 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1872 if (!mgr
->proposed_vcpis
[i
])
1875 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
1877 DRM_DEBUG_KMS("payload %d %d\n", i
, mgr
->payloads
[i
].payload_state
);
1878 if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_LOCAL
) {
1879 ret
= drm_dp_create_payload_step2(mgr
, port
, mgr
->proposed_vcpis
[i
]->vcpi
, &mgr
->payloads
[i
]);
1880 } else if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_DELETE_LOCAL
) {
1881 ret
= drm_dp_destroy_payload_step2(mgr
, mgr
->proposed_vcpis
[i
]->vcpi
, &mgr
->payloads
[i
]);
1884 mutex_unlock(&mgr
->payload_lock
);
1888 mutex_unlock(&mgr
->payload_lock
);
1891 EXPORT_SYMBOL(drm_dp_update_payload_part2
);
1893 #if 0 /* unused as of yet */
1894 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr
*mgr
,
1895 struct drm_dp_mst_port
*port
,
1896 int offset
, int size
)
1899 struct drm_dp_sideband_msg_tx
*txmsg
;
1901 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1905 len
= build_dpcd_read(txmsg
, port
->port_num
, 0, 8);
1906 txmsg
->dst
= port
->parent
;
1908 drm_dp_queue_down_tx(mgr
, txmsg
);
1914 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr
*mgr
,
1915 struct drm_dp_mst_port
*port
,
1916 int offset
, int size
, u8
*bytes
)
1920 struct drm_dp_sideband_msg_tx
*txmsg
;
1921 struct drm_dp_mst_branch
*mstb
;
1923 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
1927 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1933 len
= build_dpcd_write(txmsg
, port
->port_num
, offset
, size
, bytes
);
1936 drm_dp_queue_down_tx(mgr
, txmsg
);
1938 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1940 if (txmsg
->reply
.reply_type
== 1) {
1947 drm_dp_put_mst_branch_device(mstb
);
1951 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx
*msg
, u8 req_type
)
1953 struct drm_dp_sideband_msg_reply_body reply
;
1955 reply
.reply_type
= 0;
1956 reply
.req_type
= req_type
;
1957 drm_dp_encode_sideband_reply(&reply
, msg
);
1961 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr
*mgr
,
1962 struct drm_dp_mst_branch
*mstb
,
1963 int req_type
, int seqno
, bool broadcast
)
1965 struct drm_dp_sideband_msg_tx
*txmsg
;
1967 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1972 txmsg
->seqno
= seqno
;
1973 drm_dp_encode_up_ack_reply(txmsg
, req_type
);
1975 mutex_lock(&mgr
->qlock
);
1977 process_single_up_tx_qlock(mgr
, txmsg
);
1979 mutex_unlock(&mgr
->qlock
);
1985 static bool drm_dp_get_vc_payload_bw(int dp_link_bw
,
1989 switch (dp_link_bw
) {
1991 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1992 dp_link_bw
, dp_link_count
);
1995 case DP_LINK_BW_1_62
:
1996 *out
= 3 * dp_link_count
;
1998 case DP_LINK_BW_2_7
:
1999 *out
= 5 * dp_link_count
;
2001 case DP_LINK_BW_5_4
:
2002 *out
= 10 * dp_link_count
;
2009 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2010 * @mgr: manager to set state for
2011 * @mst_state: true to enable MST on this connector - false to disable.
2013 * This is called by the driver when it detects an MST capable device plugged
2014 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2016 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr
*mgr
, bool mst_state
)
2019 struct drm_dp_mst_branch
*mstb
= NULL
;
2021 mutex_lock(&mgr
->lock
);
2022 if (mst_state
== mgr
->mst_state
)
2025 mgr
->mst_state
= mst_state
;
2026 /* set the device into MST mode */
2028 WARN_ON(mgr
->mst_primary
);
2031 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, mgr
->dpcd
, DP_RECEIVER_CAP_SIZE
);
2032 if (ret
!= DP_RECEIVER_CAP_SIZE
) {
2033 DRM_DEBUG_KMS("failed to read DPCD\n");
2037 if (!drm_dp_get_vc_payload_bw(mgr
->dpcd
[1],
2038 mgr
->dpcd
[2] & DP_MAX_LANE_COUNT_MASK
,
2044 mgr
->total_pbn
= 2560;
2045 mgr
->total_slots
= DIV_ROUND_UP(mgr
->total_pbn
, mgr
->pbn_div
);
2046 mgr
->avail_slots
= mgr
->total_slots
;
2048 /* add initial branch device at LCT 1 */
2049 mstb
= drm_dp_add_mst_branch_device(1, NULL
);
2056 /* give this the main reference */
2057 mgr
->mst_primary
= mstb
;
2058 kref_get(&mgr
->mst_primary
->kref
);
2060 ret
= drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
2061 DP_MST_EN
| DP_UP_REQ_EN
| DP_UPSTREAM_IS_SRC
);
2067 struct drm_dp_payload reset_pay
;
2068 reset_pay
.start_slot
= 0;
2069 reset_pay
.num_slots
= 0x3f;
2070 drm_dp_dpcd_write_payload(mgr
, 0, &reset_pay
);
2073 queue_work(system_long_wq
, &mgr
->work
);
2077 /* disable MST on the device */
2078 mstb
= mgr
->mst_primary
;
2079 mgr
->mst_primary
= NULL
;
2080 /* this can fail if the device is gone */
2081 drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
, 0);
2083 memset(mgr
->payloads
, 0, mgr
->max_payloads
* sizeof(struct drm_dp_payload
));
2084 mgr
->payload_mask
= 0;
2085 set_bit(0, &mgr
->payload_mask
);
2090 mutex_unlock(&mgr
->lock
);
2092 drm_dp_put_mst_branch_device(mstb
);
2096 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst
);
2099 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2100 * @mgr: manager to suspend
2102 * This function tells the MST device that we can't handle UP messages
2103 * anymore. This should stop it from sending any since we are suspended.
2105 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr
*mgr
)
2107 mutex_lock(&mgr
->lock
);
2108 drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
2109 DP_MST_EN
| DP_UPSTREAM_IS_SRC
);
2110 mutex_unlock(&mgr
->lock
);
2111 flush_work(&mgr
->work
);
2112 flush_work(&mgr
->destroy_connector_work
);
2114 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend
);
2117 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2118 * @mgr: manager to resume
2120 * This will fetch DPCD and see if the device is still there,
2121 * if it is, it will rewrite the MSTM control bits, and return.
2123 * if the device fails this returns -1, and the driver should do
2124 * a full MST reprobe, in case we were undocked.
2126 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr
*mgr
)
2130 mutex_lock(&mgr
->lock
);
2132 if (mgr
->mst_primary
) {
2134 sret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, mgr
->dpcd
, DP_RECEIVER_CAP_SIZE
);
2135 if (sret
!= DP_RECEIVER_CAP_SIZE
) {
2136 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2141 ret
= drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
2142 DP_MST_EN
| DP_UP_REQ_EN
| DP_UPSTREAM_IS_SRC
);
2144 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2153 mutex_unlock(&mgr
->lock
);
2156 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume
);
2158 static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr
*mgr
, bool up
)
2162 int replylen
, origlen
, curreply
;
2164 struct drm_dp_sideband_msg_rx
*msg
;
2165 int basereg
= up
? DP_SIDEBAND_MSG_UP_REQ_BASE
: DP_SIDEBAND_MSG_DOWN_REP_BASE
;
2166 msg
= up
? &mgr
->up_req_recv
: &mgr
->down_rep_recv
;
2168 len
= min(mgr
->max_dpcd_transaction_bytes
, 16);
2169 ret
= drm_dp_dpcd_read(mgr
->aux
, basereg
,
2172 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len
, ret
);
2175 ret
= drm_dp_sideband_msg_build(msg
, replyblock
, len
, true);
2177 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock
[0]);
2180 replylen
= msg
->curchunk_len
+ msg
->curchunk_hdrlen
;
2185 while (replylen
> 0) {
2186 len
= min3(replylen
, mgr
->max_dpcd_transaction_bytes
, 16);
2187 ret
= drm_dp_dpcd_read(mgr
->aux
, basereg
+ curreply
,
2190 DRM_DEBUG_KMS("failed to read a chunk\n");
2192 ret
= drm_dp_sideband_msg_build(msg
, replyblock
, len
, false);
2194 DRM_DEBUG_KMS("failed to build sideband msg\n");
2200 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr
*mgr
)
2204 drm_dp_get_one_sb_msg(mgr
, false);
2206 if (mgr
->down_rep_recv
.have_eomt
) {
2207 struct drm_dp_sideband_msg_tx
*txmsg
;
2208 struct drm_dp_mst_branch
*mstb
;
2210 mstb
= drm_dp_get_mst_branch_device(mgr
,
2211 mgr
->down_rep_recv
.initial_hdr
.lct
,
2212 mgr
->down_rep_recv
.initial_hdr
.rad
);
2215 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->down_rep_recv
.initial_hdr
.lct
);
2216 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2220 /* find the message */
2221 slot
= mgr
->down_rep_recv
.initial_hdr
.seqno
;
2222 mutex_lock(&mgr
->qlock
);
2223 txmsg
= mstb
->tx_slots
[slot
];
2224 /* remove from slots */
2225 mutex_unlock(&mgr
->qlock
);
2228 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2230 mgr
->down_rep_recv
.initial_hdr
.seqno
,
2231 mgr
->down_rep_recv
.initial_hdr
.lct
,
2232 mgr
->down_rep_recv
.initial_hdr
.rad
[0],
2233 mgr
->down_rep_recv
.msg
[0]);
2234 drm_dp_put_mst_branch_device(mstb
);
2235 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2239 drm_dp_sideband_parse_reply(&mgr
->down_rep_recv
, &txmsg
->reply
);
2240 if (txmsg
->reply
.reply_type
== 1) {
2241 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg
->reply
.req_type
, txmsg
->reply
.u
.nak
.reason
, txmsg
->reply
.u
.nak
.nak_data
);
2244 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2245 drm_dp_put_mst_branch_device(mstb
);
2247 mutex_lock(&mgr
->qlock
);
2248 txmsg
->state
= DRM_DP_SIDEBAND_TX_RX
;
2249 mstb
->tx_slots
[slot
] = NULL
;
2250 mutex_unlock(&mgr
->qlock
);
2252 wake_up(&mgr
->tx_waitq
);
2257 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr
*mgr
)
2260 drm_dp_get_one_sb_msg(mgr
, true);
2262 if (mgr
->up_req_recv
.have_eomt
) {
2263 struct drm_dp_sideband_msg_req_body msg
;
2264 struct drm_dp_mst_branch
*mstb
= NULL
;
2267 if (!mgr
->up_req_recv
.initial_hdr
.broadcast
) {
2268 mstb
= drm_dp_get_mst_branch_device(mgr
,
2269 mgr
->up_req_recv
.initial_hdr
.lct
,
2270 mgr
->up_req_recv
.initial_hdr
.rad
);
2272 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->up_req_recv
.initial_hdr
.lct
);
2273 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2278 seqno
= mgr
->up_req_recv
.initial_hdr
.seqno
;
2279 drm_dp_sideband_parse_req(&mgr
->up_req_recv
, &msg
);
2281 if (msg
.req_type
== DP_CONNECTION_STATUS_NOTIFY
) {
2282 drm_dp_send_up_ack_reply(mgr
, mgr
->mst_primary
, msg
.req_type
, seqno
, false);
2285 mstb
= drm_dp_get_mst_branch_device_by_guid(mgr
, msg
.u
.conn_stat
.guid
);
2288 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->up_req_recv
.initial_hdr
.lct
);
2289 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2293 drm_dp_update_port(mstb
, &msg
.u
.conn_stat
);
2295 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg
.u
.conn_stat
.port_number
, msg
.u
.conn_stat
.legacy_device_plug_status
, msg
.u
.conn_stat
.displayport_device_plug_status
, msg
.u
.conn_stat
.message_capability_status
, msg
.u
.conn_stat
.input_port
, msg
.u
.conn_stat
.peer_device_type
);
2296 } else if (msg
.req_type
== DP_RESOURCE_STATUS_NOTIFY
) {
2297 drm_dp_send_up_ack_reply(mgr
, mgr
->mst_primary
, msg
.req_type
, seqno
, false);
2299 mstb
= drm_dp_get_mst_branch_device_by_guid(mgr
, msg
.u
.resource_stat
.guid
);
2302 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->up_req_recv
.initial_hdr
.lct
);
2303 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2307 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg
.u
.resource_stat
.port_number
, msg
.u
.resource_stat
.available_pbn
);
2310 drm_dp_put_mst_branch_device(mstb
);
2311 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2317 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2318 * @mgr: manager to notify irq for.
2319 * @esi: 4 bytes from SINK_COUNT_ESI
2320 * @handled: whether the hpd interrupt was consumed or not
2322 * This should be called from the driver when it detects a short IRQ,
2323 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2324 * topology manager will process the sideband messages received as a result
2327 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr
*mgr
, u8
*esi
, bool *handled
)
2334 if (sc
!= mgr
->sink_count
) {
2335 mgr
->sink_count
= sc
;
2339 if (esi
[1] & DP_DOWN_REP_MSG_RDY
) {
2340 ret
= drm_dp_mst_handle_down_rep(mgr
);
2344 if (esi
[1] & DP_UP_REQ_MSG_RDY
) {
2345 ret
|= drm_dp_mst_handle_up_req(mgr
);
2349 drm_dp_mst_kick_tx(mgr
);
2352 EXPORT_SYMBOL(drm_dp_mst_hpd_irq
);
2355 * drm_dp_mst_detect_port() - get connection status for an MST port
2356 * @mgr: manager for this port
2357 * @port: unverified pointer to a port
2359 * This returns the current connection state for a port. It validates the
2360 * port pointer still exists so the caller doesn't require a reference
2362 enum drm_connector_status
drm_dp_mst_detect_port(struct drm_connector
*connector
,
2363 struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2365 enum drm_connector_status status
= connector_status_disconnected
;
2367 /* we need to search for the port in the mgr in case its gone */
2368 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2370 return connector_status_disconnected
;
2375 switch (port
->pdt
) {
2376 case DP_PEER_DEVICE_NONE
:
2377 case DP_PEER_DEVICE_MST_BRANCHING
:
2380 case DP_PEER_DEVICE_SST_SINK
:
2381 status
= connector_status_connected
;
2383 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
2385 status
= connector_status_connected
;
2389 drm_dp_put_port(port
);
2392 EXPORT_SYMBOL(drm_dp_mst_detect_port
);
2395 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
2396 * @mgr: manager for this port
2397 * @port: unverified pointer to a port.
2399 * This returns whether the port supports audio or not.
2401 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr
*mgr
,
2402 struct drm_dp_mst_port
*port
)
2406 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2409 ret
= port
->has_audio
;
2410 drm_dp_put_port(port
);
2413 EXPORT_SYMBOL(drm_dp_mst_port_has_audio
);
2416 * drm_dp_mst_get_edid() - get EDID for an MST port
2417 * @connector: toplevel connector to get EDID for
2418 * @mgr: manager for this port
2419 * @port: unverified pointer to a port.
2421 * This returns an EDID for the port connected to a connector,
2422 * It validates the pointer still exists so the caller doesn't require a
2425 struct edid
*drm_dp_mst_get_edid(struct drm_connector
*connector
, struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2427 struct edid
*edid
= NULL
;
2429 /* we need to search for the port in the mgr in case its gone */
2430 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2434 if (port
->cached_edid
)
2435 edid
= drm_edid_duplicate(port
->cached_edid
);
2437 port
->has_audio
= drm_detect_monitor_audio(edid
);
2438 drm_dp_put_port(port
);
2441 EXPORT_SYMBOL(drm_dp_mst_get_edid
);
2444 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2445 * @mgr: manager to use
2446 * @pbn: payload bandwidth to convert into slots.
2448 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
,
2453 num_slots
= DIV_ROUND_UP(pbn
, mgr
->pbn_div
);
2455 if (num_slots
> mgr
->avail_slots
)
2459 EXPORT_SYMBOL(drm_dp_find_vcpi_slots
);
2461 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr
*mgr
,
2462 struct drm_dp_vcpi
*vcpi
, int pbn
)
2467 num_slots
= DIV_ROUND_UP(pbn
, mgr
->pbn_div
);
2469 if (num_slots
> mgr
->avail_slots
)
2473 vcpi
->aligned_pbn
= num_slots
* mgr
->pbn_div
;
2474 vcpi
->num_slots
= num_slots
;
2476 ret
= drm_dp_mst_assign_payload_id(mgr
, vcpi
);
2483 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2484 * @mgr: manager for this port
2485 * @port: port to allocate a virtual channel for.
2486 * @pbn: payload bandwidth number to request
2487 * @slots: returned number of slots for this PBN.
2489 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
, int pbn
, int *slots
)
2493 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2497 if (port
->vcpi
.vcpi
> 0) {
2498 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port
->vcpi
.vcpi
, port
->vcpi
.pbn
, pbn
);
2499 if (pbn
== port
->vcpi
.pbn
) {
2500 *slots
= port
->vcpi
.num_slots
;
2501 drm_dp_put_port(port
);
2506 ret
= drm_dp_init_vcpi(mgr
, &port
->vcpi
, pbn
);
2508 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn
, mgr
->pbn_div
), mgr
->avail_slots
, ret
);
2511 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn
, port
->vcpi
.num_slots
);
2512 *slots
= port
->vcpi
.num_slots
;
2514 drm_dp_put_port(port
);
2519 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi
);
2521 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2524 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2528 slots
= port
->vcpi
.num_slots
;
2529 drm_dp_put_port(port
);
2532 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots
);
2535 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2536 * @mgr: manager for this port
2537 * @port: unverified pointer to a port.
2539 * This just resets the number of slots for the ports VCPI for later programming.
2541 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2543 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2546 port
->vcpi
.num_slots
= 0;
2547 drm_dp_put_port(port
);
2549 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots
);
2552 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2553 * @mgr: manager for this port
2554 * @port: unverified port to deallocate vcpi for
2556 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2558 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2562 drm_dp_mst_put_payload_id(mgr
, port
->vcpi
.vcpi
);
2563 port
->vcpi
.num_slots
= 0;
2565 port
->vcpi
.aligned_pbn
= 0;
2566 port
->vcpi
.vcpi
= 0;
2567 drm_dp_put_port(port
);
2569 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi
);
2571 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr
*mgr
,
2572 int id
, struct drm_dp_payload
*payload
)
2574 u8 payload_alloc
[3], status
;
2578 drm_dp_dpcd_writeb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
,
2579 DP_PAYLOAD_TABLE_UPDATED
);
2581 payload_alloc
[0] = id
;
2582 payload_alloc
[1] = payload
->start_slot
;
2583 payload_alloc
[2] = payload
->num_slots
;
2585 ret
= drm_dp_dpcd_write(mgr
->aux
, DP_PAYLOAD_ALLOCATE_SET
, payload_alloc
, 3);
2587 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret
);
2592 ret
= drm_dp_dpcd_readb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
, &status
);
2594 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret
);
2598 if (!(status
& DP_PAYLOAD_TABLE_UPDATED
)) {
2601 usleep_range(10000, 20000);
2604 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status
);
2615 * drm_dp_check_act_status() - Check ACT handled status.
2616 * @mgr: manager to use
2618 * Check the payload status bits in the DPCD for ACT handled completion.
2620 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr
*mgr
)
2627 ret
= drm_dp_dpcd_readb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
, &status
);
2630 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret
);
2634 if (status
& DP_PAYLOAD_ACT_HANDLED
)
2639 } while (count
< 30);
2641 if (!(status
& DP_PAYLOAD_ACT_HANDLED
)) {
2642 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status
, count
);
2650 EXPORT_SYMBOL(drm_dp_check_act_status
);
2653 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2654 * @clock: dot clock for the mode
2655 * @bpp: bpp for the mode.
2657 * This uses the formula in the spec to calculate the PBN value for a mode.
2659 int drm_dp_calc_pbn_mode(int clock
, int bpp
)
2669 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2670 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2671 * common multiplier to render an integer PBN for all link rate/lane
2672 * counts combinations
2674 * peak_kbps *= (1006/1000)
2675 * peak_kbps *= (64/54)
2676 * peak_kbps *= 8 convert to bytes
2679 numerator
= 64 * 1006;
2680 denominator
= 54 * 8 * 1000 * 1000;
2683 peak_kbps
= drm_fixp_from_fraction(kbps
, denominator
);
2685 return drm_fixp2int_ceil(peak_kbps
);
2687 EXPORT_SYMBOL(drm_dp_calc_pbn_mode
);
2689 static int test_calc_pbn_mode(void)
2692 ret
= drm_dp_calc_pbn_mode(154000, 30);
2694 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2695 154000, 30, 689, ret
);
2698 ret
= drm_dp_calc_pbn_mode(234000, 30);
2700 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2701 234000, 30, 1047, ret
);
2704 ret
= drm_dp_calc_pbn_mode(297000, 24);
2706 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2707 297000, 24, 1063, ret
);
2713 /* we want to kick the TX after we've ack the up/down IRQs. */
2714 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr
*mgr
)
2716 queue_work(system_long_wq
, &mgr
->tx_work
);
2719 static void drm_dp_mst_dump_mstb(struct seq_file
*m
,
2720 struct drm_dp_mst_branch
*mstb
)
2722 struct drm_dp_mst_port
*port
;
2723 int tabs
= mstb
->lct
;
2727 for (i
= 0; i
< tabs
; i
++)
2731 seq_printf(m
, "%smst: %p, %d\n", prefix
, mstb
, mstb
->num_ports
);
2732 list_for_each_entry(port
, &mstb
->ports
, next
) {
2733 seq_printf(m
, "%sport: %d: ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix
, port
->port_num
, port
->ddps
, port
->ldps
, port
->num_sdp_streams
, port
->num_sdp_stream_sinks
, port
, port
->connector
);
2735 drm_dp_mst_dump_mstb(m
, port
->mstb
);
2739 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr
*mgr
,
2744 for (i
= 0; i
< 4; i
++) {
2745 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
+ (i
* 16), &buf
[i
* 16], 16);
2755 * drm_dp_mst_dump_topology(): dump topology to seq file.
2756 * @m: seq_file to dump output to
2757 * @mgr: manager to dump current topology for.
2759 * helper to dump MST topology to a seq file for debugfs.
2761 void drm_dp_mst_dump_topology(struct seq_file
*m
,
2762 struct drm_dp_mst_topology_mgr
*mgr
)
2765 struct drm_dp_mst_port
*port
;
2766 mutex_lock(&mgr
->lock
);
2767 if (mgr
->mst_primary
)
2768 drm_dp_mst_dump_mstb(m
, mgr
->mst_primary
);
2771 mutex_unlock(&mgr
->lock
);
2773 mutex_lock(&mgr
->payload_lock
);
2774 seq_printf(m
, "vcpi: %lx %lx\n", mgr
->payload_mask
, mgr
->vcpi_mask
);
2776 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
2777 if (mgr
->proposed_vcpis
[i
]) {
2778 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
2779 seq_printf(m
, "vcpi %d: %d %d %d\n", i
, port
->port_num
, port
->vcpi
.vcpi
, port
->vcpi
.num_slots
);
2781 seq_printf(m
, "vcpi %d:unsed\n", i
);
2783 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
2784 seq_printf(m
, "payload %d: %d, %d, %d\n",
2786 mgr
->payloads
[i
].payload_state
,
2787 mgr
->payloads
[i
].start_slot
,
2788 mgr
->payloads
[i
].num_slots
);
2792 mutex_unlock(&mgr
->payload_lock
);
2794 mutex_lock(&mgr
->lock
);
2795 if (mgr
->mst_primary
) {
2799 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, buf
, DP_RECEIVER_CAP_SIZE
);
2800 seq_printf(m
, "dpcd: ");
2801 for (i
= 0; i
< DP_RECEIVER_CAP_SIZE
; i
++)
2802 seq_printf(m
, "%02x ", buf
[i
]);
2803 seq_printf(m
, "\n");
2804 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_FAUX_CAP
, buf
, 2);
2805 seq_printf(m
, "faux/mst: ");
2806 for (i
= 0; i
< 2; i
++)
2807 seq_printf(m
, "%02x ", buf
[i
]);
2808 seq_printf(m
, "\n");
2809 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_MSTM_CTRL
, buf
, 1);
2810 seq_printf(m
, "mst ctrl: ");
2811 for (i
= 0; i
< 1; i
++)
2812 seq_printf(m
, "%02x ", buf
[i
]);
2813 seq_printf(m
, "\n");
2815 /* dump the standard OUI branch header */
2816 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_BRANCH_OUI
, buf
, DP_BRANCH_OUI_HEADER_SIZE
);
2817 seq_printf(m
, "branch oui: ");
2818 for (i
= 0; i
< 0x3; i
++)
2819 seq_printf(m
, "%02x", buf
[i
]);
2820 seq_printf(m
, " devid: ");
2821 for (i
= 0x3; i
< 0x8; i
++)
2822 seq_printf(m
, "%c", buf
[i
]);
2823 seq_printf(m
, " revision: hw: %x.%x sw: %x.%x", buf
[0x9] >> 4, buf
[0x9] & 0xf, buf
[0xa], buf
[0xb]);
2824 seq_printf(m
, "\n");
2825 bret
= dump_dp_payload_table(mgr
, buf
);
2827 seq_printf(m
, "payload table: ");
2828 for (i
= 0; i
< 63; i
++)
2829 seq_printf(m
, "%02x ", buf
[i
]);
2830 seq_printf(m
, "\n");
2835 mutex_unlock(&mgr
->lock
);
2838 EXPORT_SYMBOL(drm_dp_mst_dump_topology
);
2840 static void drm_dp_tx_work(struct work_struct
*work
)
2842 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, tx_work
);
2844 mutex_lock(&mgr
->qlock
);
2845 if (mgr
->tx_down_in_progress
)
2846 process_single_down_tx_qlock(mgr
);
2847 mutex_unlock(&mgr
->qlock
);
2850 static void drm_dp_free_mst_port(struct kref
*kref
)
2852 struct drm_dp_mst_port
*port
= container_of(kref
, struct drm_dp_mst_port
, kref
);
2853 kref_put(&port
->parent
->kref
, drm_dp_free_mst_branch_device
);
2857 static void drm_dp_destroy_connector_work(struct work_struct
*work
)
2859 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, destroy_connector_work
);
2860 struct drm_dp_mst_port
*port
;
2861 bool send_hotplug
= false;
2863 * Not a regular list traverse as we have to drop the destroy
2864 * connector lock before destroying the connector, to avoid AB->BA
2865 * ordering between this lock and the config mutex.
2868 mutex_lock(&mgr
->destroy_connector_lock
);
2869 port
= list_first_entry_or_null(&mgr
->destroy_connector_list
, struct drm_dp_mst_port
, next
);
2871 mutex_unlock(&mgr
->destroy_connector_lock
);
2874 list_del(&port
->next
);
2875 mutex_unlock(&mgr
->destroy_connector_lock
);
2877 kref_init(&port
->kref
);
2878 INIT_LIST_HEAD(&port
->next
);
2880 mgr
->cbs
->destroy_connector(mgr
, port
->connector
);
2882 drm_dp_port_teardown_pdt(port
, port
->pdt
);
2884 if (!port
->input
&& port
->vcpi
.vcpi
> 0) {
2885 if (mgr
->mst_state
) {
2886 drm_dp_mst_reset_vcpi_slots(mgr
, port
);
2887 drm_dp_update_payload_part1(mgr
);
2888 drm_dp_mst_put_payload_id(mgr
, port
->vcpi
.vcpi
);
2892 kref_put(&port
->kref
, drm_dp_free_mst_port
);
2893 send_hotplug
= true;
2896 (*mgr
->cbs
->hotplug
)(mgr
);
2900 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2901 * @mgr: manager struct to initialise
2902 * @dev: device providing this structure - for i2c addition.
2903 * @aux: DP helper aux channel to talk to this device
2904 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2905 * @max_payloads: maximum number of payloads this GPU can source
2906 * @conn_base_id: the connector object ID the MST device is connected to.
2908 * Return 0 for success, or negative error code on failure
2910 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr
*mgr
,
2911 struct device
*dev
, struct drm_dp_aux
*aux
,
2912 int max_dpcd_transaction_bytes
,
2913 int max_payloads
, int conn_base_id
)
2915 mutex_init(&mgr
->lock
);
2916 mutex_init(&mgr
->qlock
);
2917 mutex_init(&mgr
->payload_lock
);
2918 mutex_init(&mgr
->destroy_connector_lock
);
2919 INIT_LIST_HEAD(&mgr
->tx_msg_downq
);
2920 INIT_LIST_HEAD(&mgr
->destroy_connector_list
);
2921 INIT_WORK(&mgr
->work
, drm_dp_mst_link_probe_work
);
2922 INIT_WORK(&mgr
->tx_work
, drm_dp_tx_work
);
2923 INIT_WORK(&mgr
->destroy_connector_work
, drm_dp_destroy_connector_work
);
2924 init_waitqueue_head(&mgr
->tx_waitq
);
2927 mgr
->max_dpcd_transaction_bytes
= max_dpcd_transaction_bytes
;
2928 mgr
->max_payloads
= max_payloads
;
2929 mgr
->conn_base_id
= conn_base_id
;
2930 if (max_payloads
+ 1 > sizeof(mgr
->payload_mask
) * 8 ||
2931 max_payloads
+ 1 > sizeof(mgr
->vcpi_mask
) * 8)
2933 mgr
->payloads
= kcalloc(max_payloads
, sizeof(struct drm_dp_payload
), GFP_KERNEL
);
2936 mgr
->proposed_vcpis
= kcalloc(max_payloads
, sizeof(struct drm_dp_vcpi
*), GFP_KERNEL
);
2937 if (!mgr
->proposed_vcpis
)
2939 set_bit(0, &mgr
->payload_mask
);
2940 if (test_calc_pbn_mode() < 0)
2941 DRM_ERROR("MST PBN self-test failed\n");
2945 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init
);
2948 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2949 * @mgr: manager to destroy
2951 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr
*mgr
)
2953 flush_work(&mgr
->work
);
2954 flush_work(&mgr
->destroy_connector_work
);
2955 mutex_lock(&mgr
->payload_lock
);
2956 kfree(mgr
->payloads
);
2957 mgr
->payloads
= NULL
;
2958 kfree(mgr
->proposed_vcpis
);
2959 mgr
->proposed_vcpis
= NULL
;
2960 mutex_unlock(&mgr
->payload_lock
);
2964 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy
);
2967 static int drm_dp_mst_i2c_xfer(struct i2c_adapter
*adapter
, struct i2c_msg
*msgs
,
2970 struct drm_dp_aux
*aux
= adapter
->algo_data
;
2971 struct drm_dp_mst_port
*port
= container_of(aux
, struct drm_dp_mst_port
, aux
);
2972 struct drm_dp_mst_branch
*mstb
;
2973 struct drm_dp_mst_topology_mgr
*mgr
= port
->mgr
;
2975 bool reading
= false;
2976 struct drm_dp_sideband_msg_req_body msg
;
2977 struct drm_dp_sideband_msg_tx
*txmsg
= NULL
;
2980 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
2984 /* construct i2c msg */
2985 /* see if last msg is a read */
2986 if (msgs
[num
- 1].flags
& I2C_M_RD
)
2989 if (!reading
|| (num
- 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS
)) {
2990 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2995 memset(&msg
, 0, sizeof(msg
));
2996 msg
.req_type
= DP_REMOTE_I2C_READ
;
2997 msg
.u
.i2c_read
.num_transactions
= num
- 1;
2998 msg
.u
.i2c_read
.port_number
= port
->port_num
;
2999 for (i
= 0; i
< num
- 1; i
++) {
3000 msg
.u
.i2c_read
.transactions
[i
].i2c_dev_id
= msgs
[i
].addr
;
3001 msg
.u
.i2c_read
.transactions
[i
].num_bytes
= msgs
[i
].len
;
3002 msg
.u
.i2c_read
.transactions
[i
].bytes
= msgs
[i
].buf
;
3004 msg
.u
.i2c_read
.read_i2c_device_id
= msgs
[num
- 1].addr
;
3005 msg
.u
.i2c_read
.num_bytes_read
= msgs
[num
- 1].len
;
3007 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
3014 drm_dp_encode_sideband_req(&msg
, txmsg
);
3016 drm_dp_queue_down_tx(mgr
, txmsg
);
3018 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
3021 if (txmsg
->reply
.reply_type
== 1) { /* got a NAK back */
3025 if (txmsg
->reply
.u
.remote_i2c_read_ack
.num_bytes
!= msgs
[num
- 1].len
) {
3029 memcpy(msgs
[num
- 1].buf
, txmsg
->reply
.u
.remote_i2c_read_ack
.bytes
, msgs
[num
- 1].len
);
3034 drm_dp_put_mst_branch_device(mstb
);
3038 static u32
drm_dp_mst_i2c_functionality(struct i2c_adapter
*adapter
)
3040 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
|
3041 I2C_FUNC_SMBUS_READ_BLOCK_DATA
|
3042 I2C_FUNC_SMBUS_BLOCK_PROC_CALL
|
3043 I2C_FUNC_10BIT_ADDR
;
3046 static const struct i2c_algorithm drm_dp_mst_i2c_algo
= {
3047 .functionality
= drm_dp_mst_i2c_functionality
,
3048 .master_xfer
= drm_dp_mst_i2c_xfer
,
3052 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
3053 * @aux: DisplayPort AUX channel
3055 * Returns 0 on success or a negative error code on failure.
3057 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux
*aux
)
3059 aux
->ddc
.algo
= &drm_dp_mst_i2c_algo
;
3060 aux
->ddc
.algo_data
= aux
;
3061 aux
->ddc
.retries
= 3;
3063 aux
->ddc
.class = I2C_CLASS_DDC
;
3064 aux
->ddc
.owner
= THIS_MODULE
;
3065 aux
->ddc
.dev
.parent
= aux
->dev
;
3066 aux
->ddc
.dev
.of_node
= aux
->dev
->of_node
;
3068 strlcpy(aux
->ddc
.name
, aux
->name
? aux
->name
: dev_name(aux
->dev
),
3069 sizeof(aux
->ddc
.name
));
3071 return i2c_add_adapter(&aux
->ddc
);
3075 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
3076 * @aux: DisplayPort AUX channel
3078 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux
*aux
)
3080 i2c_del_adapter(&aux
->ddc
);