]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
Merge tag 'amd-drm-next-5.14-2021-05-19' of https://gitlab.freedesktop.org/agd5f...
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_mst_types.c
1 /*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <drm/drm_atomic.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_dp_mst_helper.h>
29 #include <drm/drm_dp_helper.h>
30 #include "dm_services.h"
31 #include "amdgpu.h"
32 #include "amdgpu_dm.h"
33 #include "amdgpu_dm_mst_types.h"
34
35 #include "dc.h"
36 #include "dm_helpers.h"
37
38 #include "dc_link_ddc.h"
39
40 #include "i2caux_interface.h"
41 #include "dmub_cmd.h"
42 #if defined(CONFIG_DEBUG_FS)
43 #include "amdgpu_dm_debugfs.h"
44 #endif
45
46 #if defined(CONFIG_DRM_AMD_DC_DCN)
47 #include "dc/dcn20/dcn20_resource.h"
48 #endif
49
50 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
51 struct drm_dp_aux_msg *msg)
52 {
53 ssize_t result = 0;
54 struct aux_payload payload;
55 enum aux_return_code_type operation_result;
56
57 if (WARN_ON(msg->size > 16))
58 return -E2BIG;
59
60 payload.address = msg->address;
61 payload.data = msg->buffer;
62 payload.length = msg->size;
63 payload.reply = &msg->reply;
64 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
65 payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
66 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
67 payload.defer_delay = 0;
68
69 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
70 &operation_result);
71
72 if (payload.write && result >= 0)
73 result = msg->size;
74
75 if (result < 0)
76 switch (operation_result) {
77 case AUX_RET_SUCCESS:
78 break;
79 case AUX_RET_ERROR_HPD_DISCON:
80 case AUX_RET_ERROR_UNKNOWN:
81 case AUX_RET_ERROR_INVALID_OPERATION:
82 case AUX_RET_ERROR_PROTOCOL_ERROR:
83 result = -EIO;
84 break;
85 case AUX_RET_ERROR_INVALID_REPLY:
86 case AUX_RET_ERROR_ENGINE_ACQUIRE:
87 result = -EBUSY;
88 break;
89 case AUX_RET_ERROR_TIMEOUT:
90 result = -ETIMEDOUT;
91 break;
92 }
93
94 return result;
95 }
96
97 static void
98 dm_dp_mst_connector_destroy(struct drm_connector *connector)
99 {
100 struct amdgpu_dm_connector *aconnector =
101 to_amdgpu_dm_connector(connector);
102
103 if (aconnector->dc_sink) {
104 dc_link_remove_remote_sink(aconnector->dc_link,
105 aconnector->dc_sink);
106 dc_sink_release(aconnector->dc_sink);
107 }
108
109 kfree(aconnector->edid);
110
111 drm_connector_cleanup(connector);
112 drm_dp_mst_put_port_malloc(aconnector->port);
113 kfree(aconnector);
114 }
115
116 static int
117 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
118 {
119 struct amdgpu_dm_connector *amdgpu_dm_connector =
120 to_amdgpu_dm_connector(connector);
121 int r;
122
123 r = drm_dp_mst_connector_late_register(connector,
124 amdgpu_dm_connector->port);
125 if (r < 0)
126 return r;
127
128 #if defined(CONFIG_DEBUG_FS)
129 connector_debugfs_init(amdgpu_dm_connector);
130 #endif
131
132 return 0;
133 }
134
135 static void
136 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
137 {
138 struct amdgpu_dm_connector *amdgpu_dm_connector =
139 to_amdgpu_dm_connector(connector);
140 struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
141
142 drm_dp_mst_connector_early_unregister(connector, port);
143 }
144
145 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
146 .fill_modes = drm_helper_probe_single_connector_modes,
147 .destroy = dm_dp_mst_connector_destroy,
148 .reset = amdgpu_dm_connector_funcs_reset,
149 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
150 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
151 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
152 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
153 .late_register = amdgpu_dm_mst_connector_late_register,
154 .early_unregister = amdgpu_dm_mst_connector_early_unregister,
155 };
156
157 #if defined(CONFIG_DRM_AMD_DC_DCN)
158 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
159 {
160 struct dc_sink *dc_sink = aconnector->dc_sink;
161 struct drm_dp_mst_port *port = aconnector->port;
162 u8 dsc_caps[16] = { 0 };
163
164 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
165 #if defined(CONFIG_HP_HOOK_WORKAROUND)
166 /*
167 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
168 * because it only check the dsc/fec caps of the "port variable" and not the dock
169 *
170 * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
171 *
172 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
173 *
174 */
175
176 if (!aconnector->dsc_aux && !port->parent->port_parent)
177 aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
178 #endif
179 if (!aconnector->dsc_aux)
180 return false;
181
182 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
183 return false;
184
185 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
186 dsc_caps, NULL,
187 &dc_sink->dsc_caps.dsc_dec_caps))
188 return false;
189
190 return true;
191 }
192 #endif
193
194 static int dm_dp_mst_get_modes(struct drm_connector *connector)
195 {
196 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
197 int ret = 0;
198
199 if (!aconnector)
200 return drm_add_edid_modes(connector, NULL);
201
202 if (!aconnector->edid) {
203 struct edid *edid;
204 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
205
206 if (!edid) {
207 drm_connector_update_edid_property(
208 &aconnector->base,
209 NULL);
210 return ret;
211 }
212
213 aconnector->edid = edid;
214 }
215
216 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
217 dc_sink_release(aconnector->dc_sink);
218 aconnector->dc_sink = NULL;
219 }
220
221 if (!aconnector->dc_sink) {
222 struct dc_sink *dc_sink;
223 struct dc_sink_init_data init_params = {
224 .link = aconnector->dc_link,
225 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
226 dc_sink = dc_link_add_remote_sink(
227 aconnector->dc_link,
228 (uint8_t *)aconnector->edid,
229 (aconnector->edid->extensions + 1) * EDID_LENGTH,
230 &init_params);
231
232 if (!dc_sink) {
233 DRM_ERROR("Unable to add a remote sink\n");
234 return 0;
235 }
236
237 dc_sink->priv = aconnector;
238 /* dc_link_add_remote_sink returns a new reference */
239 aconnector->dc_sink = dc_sink;
240
241 if (aconnector->dc_sink) {
242 amdgpu_dm_update_freesync_caps(
243 connector, aconnector->edid);
244
245 #if defined(CONFIG_DRM_AMD_DC_DCN)
246 if (!validate_dsc_caps_on_connector(aconnector))
247 memset(&aconnector->dc_sink->dsc_caps,
248 0, sizeof(aconnector->dc_sink->dsc_caps));
249 #endif
250 }
251 }
252
253 drm_connector_update_edid_property(
254 &aconnector->base, aconnector->edid);
255
256 ret = drm_add_edid_modes(connector, aconnector->edid);
257
258 return ret;
259 }
260
261 static struct drm_encoder *
262 dm_mst_atomic_best_encoder(struct drm_connector *connector,
263 struct drm_atomic_state *state)
264 {
265 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
266 connector);
267 struct drm_device *dev = connector->dev;
268 struct amdgpu_device *adev = drm_to_adev(dev);
269 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
270
271 return &adev->dm.mst_encoders[acrtc->crtc_id].base;
272 }
273
274 static int
275 dm_dp_mst_detect(struct drm_connector *connector,
276 struct drm_modeset_acquire_ctx *ctx, bool force)
277 {
278 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
279 struct amdgpu_dm_connector *master = aconnector->mst_port;
280
281 if (drm_connector_is_unregistered(connector))
282 return connector_status_disconnected;
283
284 return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
285 aconnector->port);
286 }
287
288 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
289 struct drm_atomic_state *state)
290 {
291 struct drm_connector_state *new_conn_state =
292 drm_atomic_get_new_connector_state(state, connector);
293 struct drm_connector_state *old_conn_state =
294 drm_atomic_get_old_connector_state(state, connector);
295 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
296 struct drm_crtc_state *new_crtc_state;
297 struct drm_dp_mst_topology_mgr *mst_mgr;
298 struct drm_dp_mst_port *mst_port;
299
300 mst_port = aconnector->port;
301 mst_mgr = &aconnector->mst_port->mst_mgr;
302
303 if (!old_conn_state->crtc)
304 return 0;
305
306 if (new_conn_state->crtc) {
307 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
308 if (!new_crtc_state ||
309 !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
310 new_crtc_state->enable)
311 return 0;
312 }
313
314 return drm_dp_atomic_release_vcpi_slots(state,
315 mst_mgr,
316 mst_port);
317 }
318
319 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
320 .get_modes = dm_dp_mst_get_modes,
321 .mode_valid = amdgpu_dm_connector_mode_valid,
322 .atomic_best_encoder = dm_mst_atomic_best_encoder,
323 .detect_ctx = dm_dp_mst_detect,
324 .atomic_check = dm_dp_mst_atomic_check,
325 };
326
327 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
328 {
329 drm_encoder_cleanup(encoder);
330 kfree(encoder);
331 }
332
333 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
334 .destroy = amdgpu_dm_encoder_destroy,
335 };
336
337 void
338 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
339 {
340 struct drm_device *dev = adev_to_drm(adev);
341 int i;
342
343 for (i = 0; i < adev->dm.display_indexes_num; i++) {
344 struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i];
345 struct drm_encoder *encoder = &amdgpu_encoder->base;
346
347 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
348
349 drm_encoder_init(
350 dev,
351 &amdgpu_encoder->base,
352 &amdgpu_dm_encoder_funcs,
353 DRM_MODE_ENCODER_DPMST,
354 NULL);
355
356 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
357 }
358 }
359
360 static struct drm_connector *
361 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
362 struct drm_dp_mst_port *port,
363 const char *pathprop)
364 {
365 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
366 struct drm_device *dev = master->base.dev;
367 struct amdgpu_device *adev = drm_to_adev(dev);
368 struct amdgpu_dm_connector *aconnector;
369 struct drm_connector *connector;
370 int i;
371
372 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
373 if (!aconnector)
374 return NULL;
375
376 connector = &aconnector->base;
377 aconnector->port = port;
378 aconnector->mst_port = master;
379
380 if (drm_connector_init(
381 dev,
382 connector,
383 &dm_dp_mst_connector_funcs,
384 DRM_MODE_CONNECTOR_DisplayPort)) {
385 kfree(aconnector);
386 return NULL;
387 }
388 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
389
390 amdgpu_dm_connector_init_helper(
391 &adev->dm,
392 aconnector,
393 DRM_MODE_CONNECTOR_DisplayPort,
394 master->dc_link,
395 master->connector_id);
396
397 for (i = 0; i < adev->dm.display_indexes_num; i++) {
398 drm_connector_attach_encoder(&aconnector->base,
399 &adev->dm.mst_encoders[i].base);
400 }
401
402 connector->max_bpc_property = master->base.max_bpc_property;
403 if (connector->max_bpc_property)
404 drm_connector_attach_max_bpc_property(connector, 8, 16);
405
406 connector->vrr_capable_property = master->base.vrr_capable_property;
407 if (connector->vrr_capable_property)
408 drm_connector_attach_vrr_capable_property(connector);
409
410 drm_object_attach_property(
411 &connector->base,
412 dev->mode_config.path_property,
413 0);
414 drm_object_attach_property(
415 &connector->base,
416 dev->mode_config.tile_property,
417 0);
418
419 drm_connector_set_path_property(connector, pathprop);
420
421 /*
422 * Initialize connector state before adding the connectror to drm and
423 * framebuffer lists
424 */
425 amdgpu_dm_connector_funcs_reset(connector);
426
427 drm_dp_mst_get_port_malloc(port);
428
429 return connector;
430 }
431
432 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
433 .add_connector = dm_dp_add_mst_connector,
434 };
435
436 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
437 struct amdgpu_dm_connector *aconnector,
438 int link_index)
439 {
440 struct dc_link_settings max_link_enc_cap = {0};
441
442 aconnector->dm_dp_aux.aux.name =
443 kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
444 link_index);
445 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
446 aconnector->dm_dp_aux.aux.drm_dev = dm->ddev;
447 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
448
449 drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
450 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
451 &aconnector->base);
452
453 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
454 return;
455
456 dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
457 aconnector->mst_mgr.cbs = &dm_mst_cbs;
458 drm_dp_mst_topology_mgr_init(
459 &aconnector->mst_mgr,
460 adev_to_drm(dm->adev),
461 &aconnector->dm_dp_aux.aux,
462 16,
463 4,
464 (u8)max_link_enc_cap.lane_count,
465 (u8)max_link_enc_cap.link_rate,
466 aconnector->connector_id);
467
468 drm_connector_attach_dp_subconnector_property(&aconnector->base);
469 }
470
471 int dm_mst_get_pbn_divider(struct dc_link *link)
472 {
473 if (!link)
474 return 0;
475
476 return dc_link_bandwidth_kbps(link,
477 dc_link_get_link_cap(link)) / (8 * 1000 * 54);
478 }
479
480 #if defined(CONFIG_DRM_AMD_DC_DCN)
481
482 struct dsc_mst_fairness_params {
483 struct dc_crtc_timing *timing;
484 struct dc_sink *sink;
485 struct dc_dsc_bw_range bw_range;
486 bool compression_possible;
487 struct drm_dp_mst_port *port;
488 enum dsc_clock_force_state clock_force_enable;
489 uint32_t num_slices_h;
490 uint32_t num_slices_v;
491 uint32_t bpp_overwrite;
492 };
493
494 struct dsc_mst_fairness_vars {
495 int pbn;
496 bool dsc_enabled;
497 int bpp_x16;
498 };
499
500 static int kbps_to_peak_pbn(int kbps)
501 {
502 u64 peak_kbps = kbps;
503
504 peak_kbps *= 1006;
505 peak_kbps = div_u64(peak_kbps, 1000);
506 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
507 }
508
509 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
510 struct dsc_mst_fairness_vars *vars,
511 int count)
512 {
513 int i;
514
515 for (i = 0; i < count; i++) {
516 memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
517 if (vars[i].dsc_enabled && dc_dsc_compute_config(
518 params[i].sink->ctx->dc->res_pool->dscs[0],
519 &params[i].sink->dsc_caps.dsc_dec_caps,
520 params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
521 0,
522 0,
523 params[i].timing,
524 &params[i].timing->dsc_cfg)) {
525 params[i].timing->flags.DSC = 1;
526
527 if (params[i].bpp_overwrite)
528 params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite;
529 else
530 params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
531
532 if (params[i].num_slices_h)
533 params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
534
535 if (params[i].num_slices_v)
536 params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v;
537 } else {
538 params[i].timing->flags.DSC = 0;
539 }
540 }
541 }
542
543 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
544 {
545 struct dc_dsc_config dsc_config;
546 u64 kbps;
547
548 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
549 dc_dsc_compute_config(
550 param.sink->ctx->dc->res_pool->dscs[0],
551 &param.sink->dsc_caps.dsc_dec_caps,
552 param.sink->ctx->dc->debug.dsc_min_slice_height_override,
553 0,
554 (int) kbps, param.timing, &dsc_config);
555
556 return dsc_config.bits_per_pixel;
557 }
558
559 static void increase_dsc_bpp(struct drm_atomic_state *state,
560 struct dc_link *dc_link,
561 struct dsc_mst_fairness_params *params,
562 struct dsc_mst_fairness_vars *vars,
563 int count)
564 {
565 int i;
566 bool bpp_increased[MAX_PIPES];
567 int initial_slack[MAX_PIPES];
568 int min_initial_slack;
569 int next_index;
570 int remaining_to_increase = 0;
571 int pbn_per_timeslot;
572 int link_timeslots_used;
573 int fair_pbn_alloc;
574
575 pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
576
577 for (i = 0; i < count; i++) {
578 if (vars[i].dsc_enabled) {
579 initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
580 bpp_increased[i] = false;
581 remaining_to_increase += 1;
582 } else {
583 initial_slack[i] = 0;
584 bpp_increased[i] = true;
585 }
586 }
587
588 while (remaining_to_increase) {
589 next_index = -1;
590 min_initial_slack = -1;
591 for (i = 0; i < count; i++) {
592 if (!bpp_increased[i]) {
593 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
594 min_initial_slack = initial_slack[i];
595 next_index = i;
596 }
597 }
598 }
599
600 if (next_index == -1)
601 break;
602
603 link_timeslots_used = 0;
604
605 for (i = 0; i < count; i++)
606 link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
607
608 fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
609
610 if (initial_slack[next_index] > fair_pbn_alloc) {
611 vars[next_index].pbn += fair_pbn_alloc;
612 if (drm_dp_atomic_find_vcpi_slots(state,
613 params[next_index].port->mgr,
614 params[next_index].port,
615 vars[next_index].pbn,
616 pbn_per_timeslot) < 0)
617 return;
618 if (!drm_dp_mst_atomic_check(state)) {
619 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
620 } else {
621 vars[next_index].pbn -= fair_pbn_alloc;
622 if (drm_dp_atomic_find_vcpi_slots(state,
623 params[next_index].port->mgr,
624 params[next_index].port,
625 vars[next_index].pbn,
626 pbn_per_timeslot) < 0)
627 return;
628 }
629 } else {
630 vars[next_index].pbn += initial_slack[next_index];
631 if (drm_dp_atomic_find_vcpi_slots(state,
632 params[next_index].port->mgr,
633 params[next_index].port,
634 vars[next_index].pbn,
635 pbn_per_timeslot) < 0)
636 return;
637 if (!drm_dp_mst_atomic_check(state)) {
638 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
639 } else {
640 vars[next_index].pbn -= initial_slack[next_index];
641 if (drm_dp_atomic_find_vcpi_slots(state,
642 params[next_index].port->mgr,
643 params[next_index].port,
644 vars[next_index].pbn,
645 pbn_per_timeslot) < 0)
646 return;
647 }
648 }
649
650 bpp_increased[next_index] = true;
651 remaining_to_increase--;
652 }
653 }
654
655 static void try_disable_dsc(struct drm_atomic_state *state,
656 struct dc_link *dc_link,
657 struct dsc_mst_fairness_params *params,
658 struct dsc_mst_fairness_vars *vars,
659 int count)
660 {
661 int i;
662 bool tried[MAX_PIPES];
663 int kbps_increase[MAX_PIPES];
664 int max_kbps_increase;
665 int next_index;
666 int remaining_to_try = 0;
667
668 for (i = 0; i < count; i++) {
669 if (vars[i].dsc_enabled
670 && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16
671 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
672 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
673 tried[i] = false;
674 remaining_to_try += 1;
675 } else {
676 kbps_increase[i] = 0;
677 tried[i] = true;
678 }
679 }
680
681 while (remaining_to_try) {
682 next_index = -1;
683 max_kbps_increase = -1;
684 for (i = 0; i < count; i++) {
685 if (!tried[i]) {
686 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
687 max_kbps_increase = kbps_increase[i];
688 next_index = i;
689 }
690 }
691 }
692
693 if (next_index == -1)
694 break;
695
696 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
697 if (drm_dp_atomic_find_vcpi_slots(state,
698 params[next_index].port->mgr,
699 params[next_index].port,
700 vars[next_index].pbn,
701 dm_mst_get_pbn_divider(dc_link)) < 0)
702 return;
703
704 if (!drm_dp_mst_atomic_check(state)) {
705 vars[next_index].dsc_enabled = false;
706 vars[next_index].bpp_x16 = 0;
707 } else {
708 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
709 if (drm_dp_atomic_find_vcpi_slots(state,
710 params[next_index].port->mgr,
711 params[next_index].port,
712 vars[next_index].pbn,
713 dm_mst_get_pbn_divider(dc_link)) < 0)
714 return;
715 }
716
717 tried[next_index] = true;
718 remaining_to_try--;
719 }
720 }
721
722 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
723 struct dc_state *dc_state,
724 struct dc_link *dc_link)
725 {
726 int i;
727 struct dc_stream_state *stream;
728 struct dsc_mst_fairness_params params[MAX_PIPES];
729 struct dsc_mst_fairness_vars vars[MAX_PIPES];
730 struct amdgpu_dm_connector *aconnector;
731 int count = 0;
732 bool debugfs_overwrite = false;
733
734 memset(params, 0, sizeof(params));
735
736 /* Set up params */
737 for (i = 0; i < dc_state->stream_count; i++) {
738 struct dc_dsc_policy dsc_policy = {0};
739
740 stream = dc_state->streams[i];
741
742 if (stream->link != dc_link)
743 continue;
744
745 stream->timing.flags.DSC = 0;
746
747 params[count].timing = &stream->timing;
748 params[count].sink = stream->sink;
749 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
750 params[count].port = aconnector->port;
751 params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
752 if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
753 debugfs_overwrite = true;
754 params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
755 params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
756 params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
757 params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
758 dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy);
759 if (!dc_dsc_compute_bandwidth_range(
760 stream->sink->ctx->dc->res_pool->dscs[0],
761 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
762 dsc_policy.min_target_bpp * 16,
763 dsc_policy.max_target_bpp * 16,
764 &stream->sink->dsc_caps.dsc_dec_caps,
765 &stream->timing, &params[count].bw_range))
766 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
767
768 count++;
769 }
770 /* Try no compression */
771 for (i = 0; i < count; i++) {
772 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
773 vars[i].dsc_enabled = false;
774 vars[i].bpp_x16 = 0;
775 if (drm_dp_atomic_find_vcpi_slots(state,
776 params[i].port->mgr,
777 params[i].port,
778 vars[i].pbn,
779 dm_mst_get_pbn_divider(dc_link)) < 0)
780 return false;
781 }
782 if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
783 set_dsc_configs_from_fairness_vars(params, vars, count);
784 return true;
785 }
786
787 /* Try max compression */
788 for (i = 0; i < count; i++) {
789 if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
790 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
791 vars[i].dsc_enabled = true;
792 vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
793 if (drm_dp_atomic_find_vcpi_slots(state,
794 params[i].port->mgr,
795 params[i].port,
796 vars[i].pbn,
797 dm_mst_get_pbn_divider(dc_link)) < 0)
798 return false;
799 } else {
800 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
801 vars[i].dsc_enabled = false;
802 vars[i].bpp_x16 = 0;
803 if (drm_dp_atomic_find_vcpi_slots(state,
804 params[i].port->mgr,
805 params[i].port,
806 vars[i].pbn,
807 dm_mst_get_pbn_divider(dc_link)) < 0)
808 return false;
809 }
810 }
811 if (drm_dp_mst_atomic_check(state))
812 return false;
813
814 /* Optimize degree of compression */
815 increase_dsc_bpp(state, dc_link, params, vars, count);
816
817 try_disable_dsc(state, dc_link, params, vars, count);
818
819 set_dsc_configs_from_fairness_vars(params, vars, count);
820
821 return true;
822 }
823
824 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
825 struct dc_state *dc_state)
826 {
827 int i, j;
828 struct dc_stream_state *stream;
829 bool computed_streams[MAX_PIPES];
830 struct amdgpu_dm_connector *aconnector;
831
832 for (i = 0; i < dc_state->stream_count; i++)
833 computed_streams[i] = false;
834
835 for (i = 0; i < dc_state->stream_count; i++) {
836 stream = dc_state->streams[i];
837
838 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
839 continue;
840
841 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
842
843 if (!aconnector || !aconnector->dc_sink)
844 continue;
845
846 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
847 continue;
848
849 if (computed_streams[i])
850 continue;
851
852 if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
853 return false;
854
855 mutex_lock(&aconnector->mst_mgr.lock);
856 if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
857 mutex_unlock(&aconnector->mst_mgr.lock);
858 return false;
859 }
860 mutex_unlock(&aconnector->mst_mgr.lock);
861
862 for (j = 0; j < dc_state->stream_count; j++) {
863 if (dc_state->streams[j]->link == stream->link)
864 computed_streams[j] = true;
865 }
866 }
867
868 for (i = 0; i < dc_state->stream_count; i++) {
869 stream = dc_state->streams[i];
870
871 if (stream->timing.flags.DSC == 1)
872 if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
873 return false;
874 }
875
876 return true;
877 }
878
879 #endif