2 * Copyright 2012-15 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <drm/drm_atomic.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_dp_mst_helper.h>
29 #include <drm/drm_dp_helper.h>
30 #include "dm_services.h"
32 #include "amdgpu_dm.h"
33 #include "amdgpu_dm_mst_types.h"
36 #include "dm_helpers.h"
38 #include "dc_link_ddc.h"
40 #include "i2caux_interface.h"
42 #if defined(CONFIG_DEBUG_FS)
43 #include "amdgpu_dm_debugfs.h"
46 #if defined(CONFIG_DRM_AMD_DC_DCN)
47 #include "dc/dcn20/dcn20_resource.h"
50 static ssize_t
dm_dp_aux_transfer(struct drm_dp_aux
*aux
,
51 struct drm_dp_aux_msg
*msg
)
54 struct aux_payload payload
;
55 enum aux_return_code_type operation_result
;
57 if (WARN_ON(msg
->size
> 16))
60 payload
.address
= msg
->address
;
61 payload
.data
= msg
->buffer
;
62 payload
.length
= msg
->size
;
63 payload
.reply
= &msg
->reply
;
64 payload
.i2c_over_aux
= (msg
->request
& DP_AUX_NATIVE_WRITE
) == 0;
65 payload
.write
= (msg
->request
& DP_AUX_I2C_READ
) == 0;
66 payload
.mot
= (msg
->request
& DP_AUX_I2C_MOT
) != 0;
67 payload
.defer_delay
= 0;
69 result
= dc_link_aux_transfer_raw(TO_DM_AUX(aux
)->ddc_service
, &payload
,
72 if (payload
.write
&& result
>= 0)
76 switch (operation_result
) {
79 case AUX_RET_ERROR_HPD_DISCON
:
80 case AUX_RET_ERROR_UNKNOWN
:
81 case AUX_RET_ERROR_INVALID_OPERATION
:
82 case AUX_RET_ERROR_PROTOCOL_ERROR
:
85 case AUX_RET_ERROR_INVALID_REPLY
:
86 case AUX_RET_ERROR_ENGINE_ACQUIRE
:
89 case AUX_RET_ERROR_TIMEOUT
:
98 dm_dp_mst_connector_destroy(struct drm_connector
*connector
)
100 struct amdgpu_dm_connector
*aconnector
=
101 to_amdgpu_dm_connector(connector
);
103 if (aconnector
->dc_sink
) {
104 dc_link_remove_remote_sink(aconnector
->dc_link
,
105 aconnector
->dc_sink
);
106 dc_sink_release(aconnector
->dc_sink
);
109 kfree(aconnector
->edid
);
111 drm_connector_cleanup(connector
);
112 drm_dp_mst_put_port_malloc(aconnector
->port
);
117 amdgpu_dm_mst_connector_late_register(struct drm_connector
*connector
)
119 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
120 to_amdgpu_dm_connector(connector
);
123 r
= drm_dp_mst_connector_late_register(connector
,
124 amdgpu_dm_connector
->port
);
128 #if defined(CONFIG_DEBUG_FS)
129 connector_debugfs_init(amdgpu_dm_connector
);
136 amdgpu_dm_mst_connector_early_unregister(struct drm_connector
*connector
)
138 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
139 to_amdgpu_dm_connector(connector
);
140 struct drm_dp_mst_port
*port
= amdgpu_dm_connector
->port
;
142 drm_dp_mst_connector_early_unregister(connector
, port
);
145 static const struct drm_connector_funcs dm_dp_mst_connector_funcs
= {
146 .fill_modes
= drm_helper_probe_single_connector_modes
,
147 .destroy
= dm_dp_mst_connector_destroy
,
148 .reset
= amdgpu_dm_connector_funcs_reset
,
149 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
150 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
151 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
152 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
,
153 .late_register
= amdgpu_dm_mst_connector_late_register
,
154 .early_unregister
= amdgpu_dm_mst_connector_early_unregister
,
157 #if defined(CONFIG_DRM_AMD_DC_DCN)
158 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector
*aconnector
)
160 struct dc_sink
*dc_sink
= aconnector
->dc_sink
;
161 struct drm_dp_mst_port
*port
= aconnector
->port
;
162 u8 dsc_caps
[16] = { 0 };
164 aconnector
->dsc_aux
= drm_dp_mst_dsc_aux_for_port(port
);
165 #if defined(CONFIG_HP_HOOK_WORKAROUND)
167 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
168 * because it only check the dsc/fec caps of the "port variable" and not the dock
170 * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
172 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
176 if (!aconnector
->dsc_aux
&& !port
->parent
->port_parent
)
177 aconnector
->dsc_aux
= &aconnector
->mst_port
->dm_dp_aux
.aux
;
179 if (!aconnector
->dsc_aux
)
182 if (drm_dp_dpcd_read(aconnector
->dsc_aux
, DP_DSC_SUPPORT
, dsc_caps
, 16) < 0)
185 if (!dc_dsc_parse_dsc_dpcd(aconnector
->dc_link
->ctx
->dc
,
187 &dc_sink
->dsc_caps
.dsc_dec_caps
))
194 static int dm_dp_mst_get_modes(struct drm_connector
*connector
)
196 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
200 return drm_add_edid_modes(connector
, NULL
);
202 if (!aconnector
->edid
) {
204 edid
= drm_dp_mst_get_edid(connector
, &aconnector
->mst_port
->mst_mgr
, aconnector
->port
);
207 drm_connector_update_edid_property(
213 aconnector
->edid
= edid
;
216 if (aconnector
->dc_sink
&& aconnector
->dc_sink
->sink_signal
== SIGNAL_TYPE_VIRTUAL
) {
217 dc_sink_release(aconnector
->dc_sink
);
218 aconnector
->dc_sink
= NULL
;
221 if (!aconnector
->dc_sink
) {
222 struct dc_sink
*dc_sink
;
223 struct dc_sink_init_data init_params
= {
224 .link
= aconnector
->dc_link
,
225 .sink_signal
= SIGNAL_TYPE_DISPLAY_PORT_MST
};
226 dc_sink
= dc_link_add_remote_sink(
228 (uint8_t *)aconnector
->edid
,
229 (aconnector
->edid
->extensions
+ 1) * EDID_LENGTH
,
233 DRM_ERROR("Unable to add a remote sink\n");
237 dc_sink
->priv
= aconnector
;
238 /* dc_link_add_remote_sink returns a new reference */
239 aconnector
->dc_sink
= dc_sink
;
241 if (aconnector
->dc_sink
) {
242 amdgpu_dm_update_freesync_caps(
243 connector
, aconnector
->edid
);
245 #if defined(CONFIG_DRM_AMD_DC_DCN)
246 if (!validate_dsc_caps_on_connector(aconnector
))
247 memset(&aconnector
->dc_sink
->dsc_caps
,
248 0, sizeof(aconnector
->dc_sink
->dsc_caps
));
253 drm_connector_update_edid_property(
254 &aconnector
->base
, aconnector
->edid
);
256 ret
= drm_add_edid_modes(connector
, aconnector
->edid
);
261 static struct drm_encoder
*
262 dm_mst_atomic_best_encoder(struct drm_connector
*connector
,
263 struct drm_atomic_state
*state
)
265 struct drm_connector_state
*connector_state
= drm_atomic_get_new_connector_state(state
,
267 struct drm_device
*dev
= connector
->dev
;
268 struct amdgpu_device
*adev
= drm_to_adev(dev
);
269 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(connector_state
->crtc
);
271 return &adev
->dm
.mst_encoders
[acrtc
->crtc_id
].base
;
275 dm_dp_mst_detect(struct drm_connector
*connector
,
276 struct drm_modeset_acquire_ctx
*ctx
, bool force
)
278 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
279 struct amdgpu_dm_connector
*master
= aconnector
->mst_port
;
281 if (drm_connector_is_unregistered(connector
))
282 return connector_status_disconnected
;
284 return drm_dp_mst_detect_port(connector
, ctx
, &master
->mst_mgr
,
288 static int dm_dp_mst_atomic_check(struct drm_connector
*connector
,
289 struct drm_atomic_state
*state
)
291 struct drm_connector_state
*new_conn_state
=
292 drm_atomic_get_new_connector_state(state
, connector
);
293 struct drm_connector_state
*old_conn_state
=
294 drm_atomic_get_old_connector_state(state
, connector
);
295 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
296 struct drm_crtc_state
*new_crtc_state
;
297 struct drm_dp_mst_topology_mgr
*mst_mgr
;
298 struct drm_dp_mst_port
*mst_port
;
300 mst_port
= aconnector
->port
;
301 mst_mgr
= &aconnector
->mst_port
->mst_mgr
;
303 if (!old_conn_state
->crtc
)
306 if (new_conn_state
->crtc
) {
307 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_conn_state
->crtc
);
308 if (!new_crtc_state
||
309 !drm_atomic_crtc_needs_modeset(new_crtc_state
) ||
310 new_crtc_state
->enable
)
314 return drm_dp_atomic_release_vcpi_slots(state
,
319 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs
= {
320 .get_modes
= dm_dp_mst_get_modes
,
321 .mode_valid
= amdgpu_dm_connector_mode_valid
,
322 .atomic_best_encoder
= dm_mst_atomic_best_encoder
,
323 .detect_ctx
= dm_dp_mst_detect
,
324 .atomic_check
= dm_dp_mst_atomic_check
,
327 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
329 drm_encoder_cleanup(encoder
);
333 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
334 .destroy
= amdgpu_dm_encoder_destroy
,
338 dm_dp_create_fake_mst_encoders(struct amdgpu_device
*adev
)
340 struct drm_device
*dev
= adev_to_drm(adev
);
343 for (i
= 0; i
< adev
->dm
.display_indexes_num
; i
++) {
344 struct amdgpu_encoder
*amdgpu_encoder
= &adev
->dm
.mst_encoders
[i
];
345 struct drm_encoder
*encoder
= &amdgpu_encoder
->base
;
347 encoder
->possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
351 &amdgpu_encoder
->base
,
352 &amdgpu_dm_encoder_funcs
,
353 DRM_MODE_ENCODER_DPMST
,
356 drm_encoder_helper_add(encoder
, &amdgpu_dm_encoder_helper_funcs
);
360 static struct drm_connector
*
361 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr
*mgr
,
362 struct drm_dp_mst_port
*port
,
363 const char *pathprop
)
365 struct amdgpu_dm_connector
*master
= container_of(mgr
, struct amdgpu_dm_connector
, mst_mgr
);
366 struct drm_device
*dev
= master
->base
.dev
;
367 struct amdgpu_device
*adev
= drm_to_adev(dev
);
368 struct amdgpu_dm_connector
*aconnector
;
369 struct drm_connector
*connector
;
372 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
376 connector
= &aconnector
->base
;
377 aconnector
->port
= port
;
378 aconnector
->mst_port
= master
;
380 if (drm_connector_init(
383 &dm_dp_mst_connector_funcs
,
384 DRM_MODE_CONNECTOR_DisplayPort
)) {
388 drm_connector_helper_add(connector
, &dm_dp_mst_connector_helper_funcs
);
390 amdgpu_dm_connector_init_helper(
393 DRM_MODE_CONNECTOR_DisplayPort
,
395 master
->connector_id
);
397 for (i
= 0; i
< adev
->dm
.display_indexes_num
; i
++) {
398 drm_connector_attach_encoder(&aconnector
->base
,
399 &adev
->dm
.mst_encoders
[i
].base
);
402 connector
->max_bpc_property
= master
->base
.max_bpc_property
;
403 if (connector
->max_bpc_property
)
404 drm_connector_attach_max_bpc_property(connector
, 8, 16);
406 connector
->vrr_capable_property
= master
->base
.vrr_capable_property
;
407 if (connector
->vrr_capable_property
)
408 drm_connector_attach_vrr_capable_property(connector
);
410 drm_object_attach_property(
412 dev
->mode_config
.path_property
,
414 drm_object_attach_property(
416 dev
->mode_config
.tile_property
,
419 drm_connector_set_path_property(connector
, pathprop
);
422 * Initialize connector state before adding the connectror to drm and
425 amdgpu_dm_connector_funcs_reset(connector
);
427 drm_dp_mst_get_port_malloc(port
);
432 static const struct drm_dp_mst_topology_cbs dm_mst_cbs
= {
433 .add_connector
= dm_dp_add_mst_connector
,
436 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager
*dm
,
437 struct amdgpu_dm_connector
*aconnector
,
440 struct dc_link_settings max_link_enc_cap
= {0};
442 aconnector
->dm_dp_aux
.aux
.name
=
443 kasprintf(GFP_KERNEL
, "AMDGPU DM aux hw bus %d",
445 aconnector
->dm_dp_aux
.aux
.transfer
= dm_dp_aux_transfer
;
446 aconnector
->dm_dp_aux
.aux
.drm_dev
= dm
->ddev
;
447 aconnector
->dm_dp_aux
.ddc_service
= aconnector
->dc_link
->ddc
;
449 drm_dp_aux_init(&aconnector
->dm_dp_aux
.aux
);
450 drm_dp_cec_register_connector(&aconnector
->dm_dp_aux
.aux
,
453 if (aconnector
->base
.connector_type
== DRM_MODE_CONNECTOR_eDP
)
456 dc_link_dp_get_max_link_enc_cap(aconnector
->dc_link
, &max_link_enc_cap
);
457 aconnector
->mst_mgr
.cbs
= &dm_mst_cbs
;
458 drm_dp_mst_topology_mgr_init(
459 &aconnector
->mst_mgr
,
460 adev_to_drm(dm
->adev
),
461 &aconnector
->dm_dp_aux
.aux
,
464 (u8
)max_link_enc_cap
.lane_count
,
465 (u8
)max_link_enc_cap
.link_rate
,
466 aconnector
->connector_id
);
468 drm_connector_attach_dp_subconnector_property(&aconnector
->base
);
471 int dm_mst_get_pbn_divider(struct dc_link
*link
)
476 return dc_link_bandwidth_kbps(link
,
477 dc_link_get_link_cap(link
)) / (8 * 1000 * 54);
480 #if defined(CONFIG_DRM_AMD_DC_DCN)
482 struct dsc_mst_fairness_params
{
483 struct dc_crtc_timing
*timing
;
484 struct dc_sink
*sink
;
485 struct dc_dsc_bw_range bw_range
;
486 bool compression_possible
;
487 struct drm_dp_mst_port
*port
;
488 enum dsc_clock_force_state clock_force_enable
;
489 uint32_t num_slices_h
;
490 uint32_t num_slices_v
;
491 uint32_t bpp_overwrite
;
494 struct dsc_mst_fairness_vars
{
500 static int kbps_to_peak_pbn(int kbps
)
502 u64 peak_kbps
= kbps
;
505 peak_kbps
= div_u64(peak_kbps
, 1000);
506 return (int) DIV64_U64_ROUND_UP(peak_kbps
* 64, (54 * 8 * 1000));
509 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params
*params
,
510 struct dsc_mst_fairness_vars
*vars
,
515 for (i
= 0; i
< count
; i
++) {
516 memset(¶ms
[i
].timing
->dsc_cfg
, 0, sizeof(params
[i
].timing
->dsc_cfg
));
517 if (vars
[i
].dsc_enabled
&& dc_dsc_compute_config(
518 params
[i
].sink
->ctx
->dc
->res_pool
->dscs
[0],
519 ¶ms
[i
].sink
->dsc_caps
.dsc_dec_caps
,
520 params
[i
].sink
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
524 ¶ms
[i
].timing
->dsc_cfg
)) {
525 params
[i
].timing
->flags
.DSC
= 1;
527 if (params
[i
].bpp_overwrite
)
528 params
[i
].timing
->dsc_cfg
.bits_per_pixel
= params
[i
].bpp_overwrite
;
530 params
[i
].timing
->dsc_cfg
.bits_per_pixel
= vars
[i
].bpp_x16
;
532 if (params
[i
].num_slices_h
)
533 params
[i
].timing
->dsc_cfg
.num_slices_h
= params
[i
].num_slices_h
;
535 if (params
[i
].num_slices_v
)
536 params
[i
].timing
->dsc_cfg
.num_slices_v
= params
[i
].num_slices_v
;
538 params
[i
].timing
->flags
.DSC
= 0;
543 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param
, int pbn
)
545 struct dc_dsc_config dsc_config
;
548 kbps
= div_u64((u64
)pbn
* 994 * 8 * 54, 64);
549 dc_dsc_compute_config(
550 param
.sink
->ctx
->dc
->res_pool
->dscs
[0],
551 ¶m
.sink
->dsc_caps
.dsc_dec_caps
,
552 param
.sink
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
554 (int) kbps
, param
.timing
, &dsc_config
);
556 return dsc_config
.bits_per_pixel
;
559 static void increase_dsc_bpp(struct drm_atomic_state
*state
,
560 struct dc_link
*dc_link
,
561 struct dsc_mst_fairness_params
*params
,
562 struct dsc_mst_fairness_vars
*vars
,
566 bool bpp_increased
[MAX_PIPES
];
567 int initial_slack
[MAX_PIPES
];
568 int min_initial_slack
;
570 int remaining_to_increase
= 0;
571 int pbn_per_timeslot
;
572 int link_timeslots_used
;
575 pbn_per_timeslot
= dm_mst_get_pbn_divider(dc_link
);
577 for (i
= 0; i
< count
; i
++) {
578 if (vars
[i
].dsc_enabled
) {
579 initial_slack
[i
] = kbps_to_peak_pbn(params
[i
].bw_range
.max_kbps
) - vars
[i
].pbn
;
580 bpp_increased
[i
] = false;
581 remaining_to_increase
+= 1;
583 initial_slack
[i
] = 0;
584 bpp_increased
[i
] = true;
588 while (remaining_to_increase
) {
590 min_initial_slack
= -1;
591 for (i
= 0; i
< count
; i
++) {
592 if (!bpp_increased
[i
]) {
593 if (min_initial_slack
== -1 || min_initial_slack
> initial_slack
[i
]) {
594 min_initial_slack
= initial_slack
[i
];
600 if (next_index
== -1)
603 link_timeslots_used
= 0;
605 for (i
= 0; i
< count
; i
++)
606 link_timeslots_used
+= DIV_ROUND_UP(vars
[i
].pbn
, pbn_per_timeslot
);
608 fair_pbn_alloc
= (63 - link_timeslots_used
) / remaining_to_increase
* pbn_per_timeslot
;
610 if (initial_slack
[next_index
] > fair_pbn_alloc
) {
611 vars
[next_index
].pbn
+= fair_pbn_alloc
;
612 if (drm_dp_atomic_find_vcpi_slots(state
,
613 params
[next_index
].port
->mgr
,
614 params
[next_index
].port
,
615 vars
[next_index
].pbn
,
616 pbn_per_timeslot
) < 0)
618 if (!drm_dp_mst_atomic_check(state
)) {
619 vars
[next_index
].bpp_x16
= bpp_x16_from_pbn(params
[next_index
], vars
[next_index
].pbn
);
621 vars
[next_index
].pbn
-= fair_pbn_alloc
;
622 if (drm_dp_atomic_find_vcpi_slots(state
,
623 params
[next_index
].port
->mgr
,
624 params
[next_index
].port
,
625 vars
[next_index
].pbn
,
626 pbn_per_timeslot
) < 0)
630 vars
[next_index
].pbn
+= initial_slack
[next_index
];
631 if (drm_dp_atomic_find_vcpi_slots(state
,
632 params
[next_index
].port
->mgr
,
633 params
[next_index
].port
,
634 vars
[next_index
].pbn
,
635 pbn_per_timeslot
) < 0)
637 if (!drm_dp_mst_atomic_check(state
)) {
638 vars
[next_index
].bpp_x16
= params
[next_index
].bw_range
.max_target_bpp_x16
;
640 vars
[next_index
].pbn
-= initial_slack
[next_index
];
641 if (drm_dp_atomic_find_vcpi_slots(state
,
642 params
[next_index
].port
->mgr
,
643 params
[next_index
].port
,
644 vars
[next_index
].pbn
,
645 pbn_per_timeslot
) < 0)
650 bpp_increased
[next_index
] = true;
651 remaining_to_increase
--;
655 static void try_disable_dsc(struct drm_atomic_state
*state
,
656 struct dc_link
*dc_link
,
657 struct dsc_mst_fairness_params
*params
,
658 struct dsc_mst_fairness_vars
*vars
,
662 bool tried
[MAX_PIPES
];
663 int kbps_increase
[MAX_PIPES
];
664 int max_kbps_increase
;
666 int remaining_to_try
= 0;
668 for (i
= 0; i
< count
; i
++) {
669 if (vars
[i
].dsc_enabled
670 && vars
[i
].bpp_x16
== params
[i
].bw_range
.max_target_bpp_x16
671 && params
[i
].clock_force_enable
== DSC_CLK_FORCE_DEFAULT
) {
672 kbps_increase
[i
] = params
[i
].bw_range
.stream_kbps
- params
[i
].bw_range
.max_kbps
;
674 remaining_to_try
+= 1;
676 kbps_increase
[i
] = 0;
681 while (remaining_to_try
) {
683 max_kbps_increase
= -1;
684 for (i
= 0; i
< count
; i
++) {
686 if (max_kbps_increase
== -1 || max_kbps_increase
< kbps_increase
[i
]) {
687 max_kbps_increase
= kbps_increase
[i
];
693 if (next_index
== -1)
696 vars
[next_index
].pbn
= kbps_to_peak_pbn(params
[next_index
].bw_range
.stream_kbps
);
697 if (drm_dp_atomic_find_vcpi_slots(state
,
698 params
[next_index
].port
->mgr
,
699 params
[next_index
].port
,
700 vars
[next_index
].pbn
,
701 dm_mst_get_pbn_divider(dc_link
)) < 0)
704 if (!drm_dp_mst_atomic_check(state
)) {
705 vars
[next_index
].dsc_enabled
= false;
706 vars
[next_index
].bpp_x16
= 0;
708 vars
[next_index
].pbn
= kbps_to_peak_pbn(params
[next_index
].bw_range
.max_kbps
);
709 if (drm_dp_atomic_find_vcpi_slots(state
,
710 params
[next_index
].port
->mgr
,
711 params
[next_index
].port
,
712 vars
[next_index
].pbn
,
713 dm_mst_get_pbn_divider(dc_link
)) < 0)
717 tried
[next_index
] = true;
722 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state
*state
,
723 struct dc_state
*dc_state
,
724 struct dc_link
*dc_link
)
727 struct dc_stream_state
*stream
;
728 struct dsc_mst_fairness_params params
[MAX_PIPES
];
729 struct dsc_mst_fairness_vars vars
[MAX_PIPES
];
730 struct amdgpu_dm_connector
*aconnector
;
732 bool debugfs_overwrite
= false;
734 memset(params
, 0, sizeof(params
));
737 for (i
= 0; i
< dc_state
->stream_count
; i
++) {
738 struct dc_dsc_policy dsc_policy
= {0};
740 stream
= dc_state
->streams
[i
];
742 if (stream
->link
!= dc_link
)
745 stream
->timing
.flags
.DSC
= 0;
747 params
[count
].timing
= &stream
->timing
;
748 params
[count
].sink
= stream
->sink
;
749 aconnector
= (struct amdgpu_dm_connector
*)stream
->dm_stream_context
;
750 params
[count
].port
= aconnector
->port
;
751 params
[count
].clock_force_enable
= aconnector
->dsc_settings
.dsc_force_enable
;
752 if (params
[count
].clock_force_enable
== DSC_CLK_FORCE_ENABLE
)
753 debugfs_overwrite
= true;
754 params
[count
].num_slices_h
= aconnector
->dsc_settings
.dsc_num_slices_h
;
755 params
[count
].num_slices_v
= aconnector
->dsc_settings
.dsc_num_slices_v
;
756 params
[count
].bpp_overwrite
= aconnector
->dsc_settings
.dsc_bits_per_pixel
;
757 params
[count
].compression_possible
= stream
->sink
->dsc_caps
.dsc_dec_caps
.is_dsc_supported
;
758 dc_dsc_get_policy_for_timing(params
[count
].timing
, 0, &dsc_policy
);
759 if (!dc_dsc_compute_bandwidth_range(
760 stream
->sink
->ctx
->dc
->res_pool
->dscs
[0],
761 stream
->sink
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
762 dsc_policy
.min_target_bpp
* 16,
763 dsc_policy
.max_target_bpp
* 16,
764 &stream
->sink
->dsc_caps
.dsc_dec_caps
,
765 &stream
->timing
, ¶ms
[count
].bw_range
))
766 params
[count
].bw_range
.stream_kbps
= dc_bandwidth_in_kbps_from_timing(&stream
->timing
);
770 /* Try no compression */
771 for (i
= 0; i
< count
; i
++) {
772 vars
[i
].pbn
= kbps_to_peak_pbn(params
[i
].bw_range
.stream_kbps
);
773 vars
[i
].dsc_enabled
= false;
775 if (drm_dp_atomic_find_vcpi_slots(state
,
779 dm_mst_get_pbn_divider(dc_link
)) < 0)
782 if (!drm_dp_mst_atomic_check(state
) && !debugfs_overwrite
) {
783 set_dsc_configs_from_fairness_vars(params
, vars
, count
);
787 /* Try max compression */
788 for (i
= 0; i
< count
; i
++) {
789 if (params
[i
].compression_possible
&& params
[i
].clock_force_enable
!= DSC_CLK_FORCE_DISABLE
) {
790 vars
[i
].pbn
= kbps_to_peak_pbn(params
[i
].bw_range
.min_kbps
);
791 vars
[i
].dsc_enabled
= true;
792 vars
[i
].bpp_x16
= params
[i
].bw_range
.min_target_bpp_x16
;
793 if (drm_dp_atomic_find_vcpi_slots(state
,
797 dm_mst_get_pbn_divider(dc_link
)) < 0)
800 vars
[i
].pbn
= kbps_to_peak_pbn(params
[i
].bw_range
.stream_kbps
);
801 vars
[i
].dsc_enabled
= false;
803 if (drm_dp_atomic_find_vcpi_slots(state
,
807 dm_mst_get_pbn_divider(dc_link
)) < 0)
811 if (drm_dp_mst_atomic_check(state
))
814 /* Optimize degree of compression */
815 increase_dsc_bpp(state
, dc_link
, params
, vars
, count
);
817 try_disable_dsc(state
, dc_link
, params
, vars
, count
);
819 set_dsc_configs_from_fairness_vars(params
, vars
, count
);
824 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state
*state
,
825 struct dc_state
*dc_state
)
828 struct dc_stream_state
*stream
;
829 bool computed_streams
[MAX_PIPES
];
830 struct amdgpu_dm_connector
*aconnector
;
832 for (i
= 0; i
< dc_state
->stream_count
; i
++)
833 computed_streams
[i
] = false;
835 for (i
= 0; i
< dc_state
->stream_count
; i
++) {
836 stream
= dc_state
->streams
[i
];
838 if (stream
->signal
!= SIGNAL_TYPE_DISPLAY_PORT_MST
)
841 aconnector
= (struct amdgpu_dm_connector
*)stream
->dm_stream_context
;
843 if (!aconnector
|| !aconnector
->dc_sink
)
846 if (!aconnector
->dc_sink
->dsc_caps
.dsc_dec_caps
.is_dsc_supported
)
849 if (computed_streams
[i
])
852 if (dcn20_remove_stream_from_ctx(stream
->ctx
->dc
, dc_state
, stream
) != DC_OK
)
855 mutex_lock(&aconnector
->mst_mgr
.lock
);
856 if (!compute_mst_dsc_configs_for_link(state
, dc_state
, stream
->link
)) {
857 mutex_unlock(&aconnector
->mst_mgr
.lock
);
860 mutex_unlock(&aconnector
->mst_mgr
.lock
);
862 for (j
= 0; j
< dc_state
->stream_count
; j
++) {
863 if (dc_state
->streams
[j
]->link
== stream
->link
)
864 computed_streams
[j
] = true;
868 for (i
= 0; i
< dc_state
->stream_count
; i
++) {
869 stream
= dc_state
->streams
[i
];
871 if (stream
->timing
.flags
.DSC
== 1)
872 if (dc_stream_add_dsc_to_resource(stream
->ctx
->dc
, dc_state
, stream
) != DC_OK
)