]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
Merge tag 'amd-drm-next-5.8-2020-04-30' of git://people.freedesktop.org/~agd5f/linux...
[mirror_ubuntu-kernels.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100
101 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109
110 /**
111 * DOC: overview
112 *
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
116 *
117 * The root control structure is &struct amdgpu_display_manager.
118 */
119
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
124 /*
125 * initializes drm_device display related structures, based on the information
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
128 *
129 * Returns 0 on success
130 */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 struct drm_plane *plane,
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
144 uint32_t link_index,
145 struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
149
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
154 bool nonblock);
155
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
160
161 static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
163
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
170 /*
171 * dm_vblank_get_counter
172 *
173 * @brief
174 * Get counter for number of vertical blanks
175 *
176 * @param
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
179 *
180 * @return
181 * Counter for vertical blanks
182 */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185 if (crtc >= adev->mode_info.num_crtc)
186 return 0;
187 else {
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 acrtc->base.state);
191
192
193 if (acrtc_state->stream == NULL) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 crtc);
196 return 0;
197 }
198
199 return dc_stream_get_vblank_counter(acrtc_state->stream);
200 }
201 }
202
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 u32 *vbl, u32 *position)
205 {
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 return -EINVAL;
210 else {
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 acrtc->base.state);
214
215 if (acrtc_state->stream == NULL) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 crtc);
218 return 0;
219 }
220
221 /*
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
224 */
225 dc_stream_get_scanoutpos(acrtc_state->stream,
226 &v_blank_start,
227 &v_blank_end,
228 &h_position,
229 &v_position);
230
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
233 }
234
235 return 0;
236 }
237
238 static bool dm_is_idle(void *handle)
239 {
240 /* XXX todo */
241 return true;
242 }
243
244 static int dm_wait_for_idle(void *handle)
245 {
246 /* XXX todo */
247 return 0;
248 }
249
250 static bool dm_check_soft_reset(void *handle)
251 {
252 return false;
253 }
254
255 static int dm_soft_reset(void *handle)
256 {
257 /* XXX todo */
258 return 0;
259 }
260
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 int otg_inst)
264 {
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
268
269 if (otg_inst == -1) {
270 WARN_ON(1);
271 return adev->mode_info.crtcs[0];
272 }
273
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277 if (amdgpu_crtc->otg_inst == otg_inst)
278 return amdgpu_crtc;
279 }
280
281 return NULL;
282 }
283
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289
290 /**
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
293 *
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
296 */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
302 unsigned long flags;
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 bool vrr_active;
307
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 return;
315 }
316
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
318
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
324 amdgpu_crtc);
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 return;
327 }
328
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
332
333 if (!e)
334 WARN_ON(1);
335
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 if (!vrr_active ||
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
347 */
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
352 */
353 if (e) {
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
358 }
359 } else if (e) {
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
366 *
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
371 */
372
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
376
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 e = NULL;
379 }
380
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
385 */
386 amdgpu_crtc->last_flip_vblank =
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388
389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
395 }
396
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
403 unsigned long flags;
404
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407 if (acrtc) {
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 acrtc->crtc_id,
412 amdgpu_dm_vrr_active(acrtc_state));
413
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
419 */
420 if (amdgpu_dm_vrr_active(acrtc_state)) {
421 drm_crtc_handle_vblank(&acrtc->base);
422
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state->stream &&
425 adev->family < AMDGPU_FAMILY_AI) {
426 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 mod_freesync_handle_v_update(
428 adev->dm.freesync_module,
429 acrtc_state->stream,
430 &acrtc_state->vrr_params);
431
432 dc_stream_adjust_vmin_vmax(
433 adev->dm.dc,
434 acrtc_state->stream,
435 &acrtc_state->vrr_params.adjust);
436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 }
438 }
439 }
440 }
441
442 /**
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: ignored
445 *
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447 * event handler.
448 */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451 struct common_irq_params *irq_params = interrupt_params;
452 struct amdgpu_device *adev = irq_params->adev;
453 struct amdgpu_crtc *acrtc;
454 struct dm_crtc_state *acrtc_state;
455 unsigned long flags;
456
457 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458
459 if (acrtc) {
460 acrtc_state = to_dm_crtc_state(acrtc->base.state);
461
462 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
463 acrtc->crtc_id,
464 amdgpu_dm_vrr_active(acrtc_state));
465
466 /* Core vblank handling at start of front-porch is only possible
467 * in non-vrr mode, as only there vblank timestamping will give
468 * valid results while done in front-porch. Otherwise defer it
469 * to dm_vupdate_high_irq after end of front-porch.
470 */
471 if (!amdgpu_dm_vrr_active(acrtc_state))
472 drm_crtc_handle_vblank(&acrtc->base);
473
474 /* Following stuff must happen at start of vblank, for crc
475 * computation and below-the-range btr support in vrr mode.
476 */
477 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
478
479 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480 acrtc_state->vrr_params.supported &&
481 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482 spin_lock_irqsave(&adev->ddev->event_lock, flags);
483 mod_freesync_handle_v_update(
484 adev->dm.freesync_module,
485 acrtc_state->stream,
486 &acrtc_state->vrr_params);
487
488 dc_stream_adjust_vmin_vmax(
489 adev->dm.dc,
490 acrtc_state->stream,
491 &acrtc_state->vrr_params.adjust);
492 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
493 }
494 }
495 }
496
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
498 /**
499 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500 * @interrupt params - interrupt parameters
501 *
502 * Notify DRM's vblank event handler at VSTARTUP
503 *
504 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505 * * We are close enough to VUPDATE - the point of no return for hw
506 * * We are in the fixed portion of variable front porch when vrr is enabled
507 * * We are before VUPDATE, where double-buffered vrr registers are swapped
508 *
509 * It is therefore the correct place to signal vblank, send user flip events,
510 * and update VRR.
511 */
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
513 {
514 struct common_irq_params *irq_params = interrupt_params;
515 struct amdgpu_device *adev = irq_params->adev;
516 struct amdgpu_crtc *acrtc;
517 struct dm_crtc_state *acrtc_state;
518 unsigned long flags;
519
520 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521
522 if (!acrtc)
523 return;
524
525 acrtc_state = to_dm_crtc_state(acrtc->base.state);
526
527 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528 amdgpu_dm_vrr_active(acrtc_state),
529 acrtc_state->active_planes);
530
531 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 drm_crtc_handle_vblank(&acrtc->base);
533
534 spin_lock_irqsave(&adev->ddev->event_lock, flags);
535
536 if (acrtc_state->vrr_params.supported &&
537 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 mod_freesync_handle_v_update(
539 adev->dm.freesync_module,
540 acrtc_state->stream,
541 &acrtc_state->vrr_params);
542
543 dc_stream_adjust_vmin_vmax(
544 adev->dm.dc,
545 acrtc_state->stream,
546 &acrtc_state->vrr_params.adjust);
547 }
548
549 /*
550 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 * In that case, pageflip completion interrupts won't fire and pageflip
552 * completion events won't get delivered. Prevent this by sending
553 * pending pageflip events from here if a flip is still pending.
554 *
555 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 * avoid race conditions between flip programming and completion,
557 * which could cause too early flip completion events.
558 */
559 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560 acrtc_state->active_planes == 0) {
561 if (acrtc->event) {
562 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
563 acrtc->event = NULL;
564 drm_crtc_vblank_put(&acrtc->base);
565 }
566 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567 }
568
569 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
570 }
571 #endif
572
573 static int dm_set_clockgating_state(void *handle,
574 enum amd_clockgating_state state)
575 {
576 return 0;
577 }
578
579 static int dm_set_powergating_state(void *handle,
580 enum amd_powergating_state state)
581 {
582 return 0;
583 }
584
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
587
588 /* Allocate memory for FBC compressed data */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
590 {
591 struct drm_device *dev = connector->dev;
592 struct amdgpu_device *adev = dev->dev_private;
593 struct dm_comressor_info *compressor = &adev->dm.compressor;
594 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595 struct drm_display_mode *mode;
596 unsigned long max_size = 0;
597
598 if (adev->dm.dc->fbc_compressor == NULL)
599 return;
600
601 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
602 return;
603
604 if (compressor->bo_ptr)
605 return;
606
607
608 list_for_each_entry(mode, &connector->modes, head) {
609 if (max_size < mode->htotal * mode->vtotal)
610 max_size = mode->htotal * mode->vtotal;
611 }
612
613 if (max_size) {
614 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616 &compressor->gpu_addr, &compressor->cpu_addr);
617
618 if (r)
619 DRM_ERROR("DM: Failed to initialize FBC\n");
620 else {
621 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
623 }
624
625 }
626
627 }
628
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630 int pipe, bool *enabled,
631 unsigned char *buf, int max_bytes)
632 {
633 struct drm_device *dev = dev_get_drvdata(kdev);
634 struct amdgpu_device *adev = dev->dev_private;
635 struct drm_connector *connector;
636 struct drm_connector_list_iter conn_iter;
637 struct amdgpu_dm_connector *aconnector;
638 int ret = 0;
639
640 *enabled = false;
641
642 mutex_lock(&adev->dm.audio_lock);
643
644 drm_connector_list_iter_begin(dev, &conn_iter);
645 drm_for_each_connector_iter(connector, &conn_iter) {
646 aconnector = to_amdgpu_dm_connector(connector);
647 if (aconnector->audio_inst != port)
648 continue;
649
650 *enabled = true;
651 ret = drm_eld_size(connector->eld);
652 memcpy(buf, connector->eld, min(max_bytes, ret));
653
654 break;
655 }
656 drm_connector_list_iter_end(&conn_iter);
657
658 mutex_unlock(&adev->dm.audio_lock);
659
660 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661
662 return ret;
663 }
664
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666 .get_eld = amdgpu_dm_audio_component_get_eld,
667 };
668
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670 struct device *hda_kdev, void *data)
671 {
672 struct drm_device *dev = dev_get_drvdata(kdev);
673 struct amdgpu_device *adev = dev->dev_private;
674 struct drm_audio_component *acomp = data;
675
676 acomp->ops = &amdgpu_dm_audio_component_ops;
677 acomp->dev = kdev;
678 adev->dm.audio_component = acomp;
679
680 return 0;
681 }
682
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684 struct device *hda_kdev, void *data)
685 {
686 struct drm_device *dev = dev_get_drvdata(kdev);
687 struct amdgpu_device *adev = dev->dev_private;
688 struct drm_audio_component *acomp = data;
689
690 acomp->ops = NULL;
691 acomp->dev = NULL;
692 adev->dm.audio_component = NULL;
693 }
694
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696 .bind = amdgpu_dm_audio_component_bind,
697 .unbind = amdgpu_dm_audio_component_unbind,
698 };
699
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
701 {
702 int i, ret;
703
704 if (!amdgpu_audio)
705 return 0;
706
707 adev->mode_info.audio.enabled = true;
708
709 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
710
711 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712 adev->mode_info.audio.pin[i].channels = -1;
713 adev->mode_info.audio.pin[i].rate = -1;
714 adev->mode_info.audio.pin[i].bits_per_sample = -1;
715 adev->mode_info.audio.pin[i].status_bits = 0;
716 adev->mode_info.audio.pin[i].category_code = 0;
717 adev->mode_info.audio.pin[i].connected = false;
718 adev->mode_info.audio.pin[i].id =
719 adev->dm.dc->res_pool->audios[i]->inst;
720 adev->mode_info.audio.pin[i].offset = 0;
721 }
722
723 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724 if (ret < 0)
725 return ret;
726
727 adev->dm.audio_registered = true;
728
729 return 0;
730 }
731
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 {
734 if (!amdgpu_audio)
735 return;
736
737 if (!adev->mode_info.audio.enabled)
738 return;
739
740 if (adev->dm.audio_registered) {
741 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742 adev->dm.audio_registered = false;
743 }
744
745 /* TODO: Disable audio? */
746
747 adev->mode_info.audio.enabled = false;
748 }
749
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
751 {
752 struct drm_audio_component *acomp = adev->dm.audio_component;
753
754 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
756
757 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 pin, -1);
759 }
760 }
761
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
763 {
764 const struct dmcub_firmware_header_v1_0 *hdr;
765 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767 const struct firmware *dmub_fw = adev->dm.dmub_fw;
768 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769 struct abm *abm = adev->dm.dc->res_pool->abm;
770 struct dmub_srv_hw_params hw_params;
771 enum dmub_status status;
772 const unsigned char *fw_inst_const, *fw_bss_data;
773 uint32_t i, fw_inst_const_size, fw_bss_data_size;
774 bool has_hw_support;
775
776 if (!dmub_srv)
777 /* DMUB isn't supported on the ASIC. */
778 return 0;
779
780 if (!fb_info) {
781 DRM_ERROR("No framebuffer info for DMUB service.\n");
782 return -EINVAL;
783 }
784
785 if (!dmub_fw) {
786 /* Firmware required for DMUB support. */
787 DRM_ERROR("No firmware provided for DMUB.\n");
788 return -EINVAL;
789 }
790
791 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792 if (status != DMUB_STATUS_OK) {
793 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794 return -EINVAL;
795 }
796
797 if (!has_hw_support) {
798 DRM_INFO("DMUB unsupported on ASIC\n");
799 return 0;
800 }
801
802 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
803
804 fw_inst_const = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 PSP_HEADER_BYTES;
807
808 fw_bss_data = dmub_fw->data +
809 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810 le32_to_cpu(hdr->inst_const_bytes);
811
812 /* Copy firmware and bios info into FB memory. */
813 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
815
816 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
817
818 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 * amdgpu_ucode_init_single_fw will load dmub firmware
820 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 * will be done by dm_dmub_hw_init
822 */
823 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825 fw_inst_const_size);
826 }
827
828 if (fw_bss_data_size)
829 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
830 fw_bss_data, fw_bss_data_size);
831
832 /* Copy firmware bios info into FB memory. */
833 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
834 adev->bios_size);
835
836 /* Reset regions that need to be reset. */
837 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
838 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
839
840 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
841 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
842
843 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
844 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
845
846 /* Initialize hardware. */
847 memset(&hw_params, 0, sizeof(hw_params));
848 hw_params.fb_base = adev->gmc.fb_start;
849 hw_params.fb_offset = adev->gmc.aper_base;
850
851 /* backdoor load firmware and trigger dmub running */
852 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
853 hw_params.load_inst_const = true;
854
855 if (dmcu)
856 hw_params.psp_version = dmcu->psp_version;
857
858 for (i = 0; i < fb_info->num_fb; ++i)
859 hw_params.fb[i] = &fb_info->fb[i];
860
861 status = dmub_srv_hw_init(dmub_srv, &hw_params);
862 if (status != DMUB_STATUS_OK) {
863 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
864 return -EINVAL;
865 }
866
867 /* Wait for firmware load to finish. */
868 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
869 if (status != DMUB_STATUS_OK)
870 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
871
872 /* Init DMCU and ABM if available. */
873 if (dmcu && abm) {
874 dmcu->funcs->dmcu_init(dmcu);
875 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
876 }
877
878 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
879 if (!adev->dm.dc->ctx->dmub_srv) {
880 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
881 return -ENOMEM;
882 }
883
884 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
885 adev->dm.dmcub_fw_version);
886
887 return 0;
888 }
889
890 static int amdgpu_dm_init(struct amdgpu_device *adev)
891 {
892 struct dc_init_data init_data;
893 #ifdef CONFIG_DRM_AMD_DC_HDCP
894 struct dc_callback_init init_params;
895 #endif
896 int r;
897
898 adev->dm.ddev = adev->ddev;
899 adev->dm.adev = adev;
900
901 /* Zero all the fields */
902 memset(&init_data, 0, sizeof(init_data));
903 #ifdef CONFIG_DRM_AMD_DC_HDCP
904 memset(&init_params, 0, sizeof(init_params));
905 #endif
906
907 mutex_init(&adev->dm.dc_lock);
908 mutex_init(&adev->dm.audio_lock);
909
910 if(amdgpu_dm_irq_init(adev)) {
911 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
912 goto error;
913 }
914
915 init_data.asic_id.chip_family = adev->family;
916
917 init_data.asic_id.pci_revision_id = adev->pdev->revision;
918 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
919
920 init_data.asic_id.vram_width = adev->gmc.vram_width;
921 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
922 init_data.asic_id.atombios_base_address =
923 adev->mode_info.atom_context->bios;
924
925 init_data.driver = adev;
926
927 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
928
929 if (!adev->dm.cgs_device) {
930 DRM_ERROR("amdgpu: failed to create cgs device.\n");
931 goto error;
932 }
933
934 init_data.cgs_device = adev->dm.cgs_device;
935
936 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
937
938 switch (adev->asic_type) {
939 case CHIP_CARRIZO:
940 case CHIP_STONEY:
941 case CHIP_RAVEN:
942 case CHIP_RENOIR:
943 init_data.flags.gpu_vm_support = true;
944 break;
945 default:
946 break;
947 }
948
949 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
950 init_data.flags.fbc_support = true;
951
952 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
953 init_data.flags.multi_mon_pp_mclk_switch = true;
954
955 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
956 init_data.flags.disable_fractional_pwm = true;
957
958 init_data.flags.power_down_display_on_boot = true;
959
960 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
961
962 /* Display Core create. */
963 adev->dm.dc = dc_create(&init_data);
964
965 if (adev->dm.dc) {
966 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
967 } else {
968 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
969 goto error;
970 }
971
972 r = dm_dmub_hw_init(adev);
973 if (r) {
974 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
975 goto error;
976 }
977
978 dc_hardware_init(adev->dm.dc);
979
980 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
981 if (!adev->dm.freesync_module) {
982 DRM_ERROR(
983 "amdgpu: failed to initialize freesync_module.\n");
984 } else
985 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
986 adev->dm.freesync_module);
987
988 amdgpu_dm_init_color_mod();
989
990 #ifdef CONFIG_DRM_AMD_DC_HDCP
991 if (adev->asic_type >= CHIP_RAVEN) {
992 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
993
994 if (!adev->dm.hdcp_workqueue)
995 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
996 else
997 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
998
999 dc_init_callbacks(adev->dm.dc, &init_params);
1000 }
1001 #endif
1002 if (amdgpu_dm_initialize_drm_device(adev)) {
1003 DRM_ERROR(
1004 "amdgpu: failed to initialize sw for display support.\n");
1005 goto error;
1006 }
1007
1008 /* Update the actual used number of crtc */
1009 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1010
1011 /* TODO: Add_display_info? */
1012
1013 /* TODO use dynamic cursor width */
1014 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1015 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1016
1017 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1018 DRM_ERROR(
1019 "amdgpu: failed to initialize sw for display support.\n");
1020 goto error;
1021 }
1022
1023 DRM_DEBUG_DRIVER("KMS initialized.\n");
1024
1025 return 0;
1026 error:
1027 amdgpu_dm_fini(adev);
1028
1029 return -EINVAL;
1030 }
1031
1032 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1033 {
1034 amdgpu_dm_audio_fini(adev);
1035
1036 amdgpu_dm_destroy_drm_device(&adev->dm);
1037
1038 #ifdef CONFIG_DRM_AMD_DC_HDCP
1039 if (adev->dm.hdcp_workqueue) {
1040 hdcp_destroy(adev->dm.hdcp_workqueue);
1041 adev->dm.hdcp_workqueue = NULL;
1042 }
1043
1044 if (adev->dm.dc)
1045 dc_deinit_callbacks(adev->dm.dc);
1046 #endif
1047 if (adev->dm.dc->ctx->dmub_srv) {
1048 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1049 adev->dm.dc->ctx->dmub_srv = NULL;
1050 }
1051
1052 if (adev->dm.dmub_bo)
1053 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1054 &adev->dm.dmub_bo_gpu_addr,
1055 &adev->dm.dmub_bo_cpu_addr);
1056
1057 /* DC Destroy TODO: Replace destroy DAL */
1058 if (adev->dm.dc)
1059 dc_destroy(&adev->dm.dc);
1060 /*
1061 * TODO: pageflip, vlank interrupt
1062 *
1063 * amdgpu_dm_irq_fini(adev);
1064 */
1065
1066 if (adev->dm.cgs_device) {
1067 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1068 adev->dm.cgs_device = NULL;
1069 }
1070 if (adev->dm.freesync_module) {
1071 mod_freesync_destroy(adev->dm.freesync_module);
1072 adev->dm.freesync_module = NULL;
1073 }
1074
1075 mutex_destroy(&adev->dm.audio_lock);
1076 mutex_destroy(&adev->dm.dc_lock);
1077
1078 return;
1079 }
1080
1081 static int load_dmcu_fw(struct amdgpu_device *adev)
1082 {
1083 const char *fw_name_dmcu = NULL;
1084 int r;
1085 const struct dmcu_firmware_header_v1_0 *hdr;
1086
1087 switch(adev->asic_type) {
1088 case CHIP_BONAIRE:
1089 case CHIP_HAWAII:
1090 case CHIP_KAVERI:
1091 case CHIP_KABINI:
1092 case CHIP_MULLINS:
1093 case CHIP_TONGA:
1094 case CHIP_FIJI:
1095 case CHIP_CARRIZO:
1096 case CHIP_STONEY:
1097 case CHIP_POLARIS11:
1098 case CHIP_POLARIS10:
1099 case CHIP_POLARIS12:
1100 case CHIP_VEGAM:
1101 case CHIP_VEGA10:
1102 case CHIP_VEGA12:
1103 case CHIP_VEGA20:
1104 case CHIP_NAVI10:
1105 case CHIP_NAVI14:
1106 case CHIP_RENOIR:
1107 return 0;
1108 case CHIP_NAVI12:
1109 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1110 break;
1111 case CHIP_RAVEN:
1112 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1113 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1114 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1115 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1116 else
1117 return 0;
1118 break;
1119 default:
1120 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1121 return -EINVAL;
1122 }
1123
1124 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1125 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1126 return 0;
1127 }
1128
1129 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1130 if (r == -ENOENT) {
1131 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1132 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1133 adev->dm.fw_dmcu = NULL;
1134 return 0;
1135 }
1136 if (r) {
1137 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1138 fw_name_dmcu);
1139 return r;
1140 }
1141
1142 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1143 if (r) {
1144 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1145 fw_name_dmcu);
1146 release_firmware(adev->dm.fw_dmcu);
1147 adev->dm.fw_dmcu = NULL;
1148 return r;
1149 }
1150
1151 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1152 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1153 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1154 adev->firmware.fw_size +=
1155 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1156
1157 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1158 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1159 adev->firmware.fw_size +=
1160 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1161
1162 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1163
1164 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1165
1166 return 0;
1167 }
1168
1169 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1170 {
1171 struct amdgpu_device *adev = ctx;
1172
1173 return dm_read_reg(adev->dm.dc->ctx, address);
1174 }
1175
1176 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1177 uint32_t value)
1178 {
1179 struct amdgpu_device *adev = ctx;
1180
1181 return dm_write_reg(adev->dm.dc->ctx, address, value);
1182 }
1183
1184 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1185 {
1186 struct dmub_srv_create_params create_params;
1187 struct dmub_srv_region_params region_params;
1188 struct dmub_srv_region_info region_info;
1189 struct dmub_srv_fb_params fb_params;
1190 struct dmub_srv_fb_info *fb_info;
1191 struct dmub_srv *dmub_srv;
1192 const struct dmcub_firmware_header_v1_0 *hdr;
1193 const char *fw_name_dmub;
1194 enum dmub_asic dmub_asic;
1195 enum dmub_status status;
1196 int r;
1197
1198 switch (adev->asic_type) {
1199 case CHIP_RENOIR:
1200 dmub_asic = DMUB_ASIC_DCN21;
1201 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1202 break;
1203
1204 default:
1205 /* ASIC doesn't support DMUB. */
1206 return 0;
1207 }
1208
1209 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1210 if (r) {
1211 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1212 return 0;
1213 }
1214
1215 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1216 if (r) {
1217 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1218 return 0;
1219 }
1220
1221 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1222
1223 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1224 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1225 AMDGPU_UCODE_ID_DMCUB;
1226 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1227 adev->dm.dmub_fw;
1228 adev->firmware.fw_size +=
1229 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1230
1231 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1232 adev->dm.dmcub_fw_version);
1233 }
1234
1235 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1236
1237 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1238 dmub_srv = adev->dm.dmub_srv;
1239
1240 if (!dmub_srv) {
1241 DRM_ERROR("Failed to allocate DMUB service!\n");
1242 return -ENOMEM;
1243 }
1244
1245 memset(&create_params, 0, sizeof(create_params));
1246 create_params.user_ctx = adev;
1247 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1248 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1249 create_params.asic = dmub_asic;
1250
1251 /* Create the DMUB service. */
1252 status = dmub_srv_create(dmub_srv, &create_params);
1253 if (status != DMUB_STATUS_OK) {
1254 DRM_ERROR("Error creating DMUB service: %d\n", status);
1255 return -EINVAL;
1256 }
1257
1258 /* Calculate the size of all the regions for the DMUB service. */
1259 memset(&region_params, 0, sizeof(region_params));
1260
1261 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1262 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1263 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1264 region_params.vbios_size = adev->bios_size;
1265 region_params.fw_bss_data =
1266 adev->dm.dmub_fw->data +
1267 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1268 le32_to_cpu(hdr->inst_const_bytes);
1269 region_params.fw_inst_const =
1270 adev->dm.dmub_fw->data +
1271 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1272 PSP_HEADER_BYTES;
1273
1274 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1275 &region_info);
1276
1277 if (status != DMUB_STATUS_OK) {
1278 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1279 return -EINVAL;
1280 }
1281
1282 /*
1283 * Allocate a framebuffer based on the total size of all the regions.
1284 * TODO: Move this into GART.
1285 */
1286 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1287 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1288 &adev->dm.dmub_bo_gpu_addr,
1289 &adev->dm.dmub_bo_cpu_addr);
1290 if (r)
1291 return r;
1292
1293 /* Rebase the regions on the framebuffer address. */
1294 memset(&fb_params, 0, sizeof(fb_params));
1295 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1296 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1297 fb_params.region_info = &region_info;
1298
1299 adev->dm.dmub_fb_info =
1300 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1301 fb_info = adev->dm.dmub_fb_info;
1302
1303 if (!fb_info) {
1304 DRM_ERROR(
1305 "Failed to allocate framebuffer info for DMUB service!\n");
1306 return -ENOMEM;
1307 }
1308
1309 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1310 if (status != DMUB_STATUS_OK) {
1311 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1312 return -EINVAL;
1313 }
1314
1315 return 0;
1316 }
1317
1318 static int dm_sw_init(void *handle)
1319 {
1320 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1321 int r;
1322
1323 r = dm_dmub_sw_init(adev);
1324 if (r)
1325 return r;
1326
1327 return load_dmcu_fw(adev);
1328 }
1329
1330 static int dm_sw_fini(void *handle)
1331 {
1332 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1333
1334 kfree(adev->dm.dmub_fb_info);
1335 adev->dm.dmub_fb_info = NULL;
1336
1337 if (adev->dm.dmub_srv) {
1338 dmub_srv_destroy(adev->dm.dmub_srv);
1339 adev->dm.dmub_srv = NULL;
1340 }
1341
1342 if (adev->dm.dmub_fw) {
1343 release_firmware(adev->dm.dmub_fw);
1344 adev->dm.dmub_fw = NULL;
1345 }
1346
1347 if(adev->dm.fw_dmcu) {
1348 release_firmware(adev->dm.fw_dmcu);
1349 adev->dm.fw_dmcu = NULL;
1350 }
1351
1352 return 0;
1353 }
1354
1355 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1356 {
1357 struct amdgpu_dm_connector *aconnector;
1358 struct drm_connector *connector;
1359 struct drm_connector_list_iter iter;
1360 int ret = 0;
1361
1362 drm_connector_list_iter_begin(dev, &iter);
1363 drm_for_each_connector_iter(connector, &iter) {
1364 aconnector = to_amdgpu_dm_connector(connector);
1365 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1366 aconnector->mst_mgr.aux) {
1367 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1368 aconnector,
1369 aconnector->base.base.id);
1370
1371 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1372 if (ret < 0) {
1373 DRM_ERROR("DM_MST: Failed to start MST\n");
1374 aconnector->dc_link->type =
1375 dc_connection_single;
1376 break;
1377 }
1378 }
1379 }
1380 drm_connector_list_iter_end(&iter);
1381
1382 return ret;
1383 }
1384
1385 static int dm_late_init(void *handle)
1386 {
1387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1388
1389 struct dmcu_iram_parameters params;
1390 unsigned int linear_lut[16];
1391 int i;
1392 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1393 bool ret = false;
1394
1395 for (i = 0; i < 16; i++)
1396 linear_lut[i] = 0xFFFF * i / 15;
1397
1398 params.set = 0;
1399 params.backlight_ramping_start = 0xCCCC;
1400 params.backlight_ramping_reduction = 0xCCCCCCCC;
1401 params.backlight_lut_array_size = 16;
1402 params.backlight_lut_array = linear_lut;
1403
1404 /* Min backlight level after ABM reduction, Don't allow below 1%
1405 * 0xFFFF x 0.01 = 0x28F
1406 */
1407 params.min_abm_backlight = 0x28F;
1408
1409 /* todo will enable for navi10 */
1410 if (adev->asic_type <= CHIP_RAVEN) {
1411 ret = dmcu_load_iram(dmcu, params);
1412
1413 if (!ret)
1414 return -EINVAL;
1415 }
1416
1417 return detect_mst_link_for_all_connectors(adev->ddev);
1418 }
1419
1420 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1421 {
1422 struct amdgpu_dm_connector *aconnector;
1423 struct drm_connector *connector;
1424 struct drm_connector_list_iter iter;
1425 struct drm_dp_mst_topology_mgr *mgr;
1426 int ret;
1427 bool need_hotplug = false;
1428
1429 drm_connector_list_iter_begin(dev, &iter);
1430 drm_for_each_connector_iter(connector, &iter) {
1431 aconnector = to_amdgpu_dm_connector(connector);
1432 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 aconnector->mst_port)
1434 continue;
1435
1436 mgr = &aconnector->mst_mgr;
1437
1438 if (suspend) {
1439 drm_dp_mst_topology_mgr_suspend(mgr);
1440 } else {
1441 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1442 if (ret < 0) {
1443 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 need_hotplug = true;
1445 }
1446 }
1447 }
1448 drm_connector_list_iter_end(&iter);
1449
1450 if (need_hotplug)
1451 drm_kms_helper_hotplug_event(dev);
1452 }
1453
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1455 {
1456 struct smu_context *smu = &adev->smu;
1457 int ret = 0;
1458
1459 if (!is_support_sw_smu(adev))
1460 return 0;
1461
1462 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 * on window driver dc implementation.
1464 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 * should be passed to smu during boot up and resume from s3.
1466 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 * dcn20_resource_construct
1468 * then call pplib functions below to pass the settings to smu:
1469 * smu_set_watermarks_for_clock_ranges
1470 * smu_set_watermarks_table
1471 * navi10_set_watermarks_table
1472 * smu_write_watermarks_table
1473 *
1474 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 * dc has implemented different flow for window driver:
1476 * dc_hardware_init / dc_set_power_state
1477 * dcn10_init_hw
1478 * notify_wm_ranges
1479 * set_wm_ranges
1480 * -- Linux
1481 * smu_set_watermarks_for_clock_ranges
1482 * renoir_set_watermarks_table
1483 * smu_write_watermarks_table
1484 *
1485 * For Linux,
1486 * dc_hardware_init -> amdgpu_dm_init
1487 * dc_set_power_state --> dm_resume
1488 *
1489 * therefore, this function apply to navi10/12/14 but not Renoir
1490 * *
1491 */
1492 switch(adev->asic_type) {
1493 case CHIP_NAVI10:
1494 case CHIP_NAVI14:
1495 case CHIP_NAVI12:
1496 break;
1497 default:
1498 return 0;
1499 }
1500
1501 mutex_lock(&smu->mutex);
1502
1503 /* pass data to smu controller */
1504 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1505 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1506 ret = smu_write_watermarks_table(smu);
1507
1508 if (ret) {
1509 mutex_unlock(&smu->mutex);
1510 DRM_ERROR("Failed to update WMTABLE!\n");
1511 return ret;
1512 }
1513 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1514 }
1515
1516 mutex_unlock(&smu->mutex);
1517
1518 return 0;
1519 }
1520
1521 /**
1522 * dm_hw_init() - Initialize DC device
1523 * @handle: The base driver device containing the amdgpu_dm device.
1524 *
1525 * Initialize the &struct amdgpu_display_manager device. This involves calling
1526 * the initializers of each DM component, then populating the struct with them.
1527 *
1528 * Although the function implies hardware initialization, both hardware and
1529 * software are initialized here. Splitting them out to their relevant init
1530 * hooks is a future TODO item.
1531 *
1532 * Some notable things that are initialized here:
1533 *
1534 * - Display Core, both software and hardware
1535 * - DC modules that we need (freesync and color management)
1536 * - DRM software states
1537 * - Interrupt sources and handlers
1538 * - Vblank support
1539 * - Debug FS entries, if enabled
1540 */
1541 static int dm_hw_init(void *handle)
1542 {
1543 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1544 /* Create DAL display manager */
1545 amdgpu_dm_init(adev);
1546 amdgpu_dm_hpd_init(adev);
1547
1548 return 0;
1549 }
1550
1551 /**
1552 * dm_hw_fini() - Teardown DC device
1553 * @handle: The base driver device containing the amdgpu_dm device.
1554 *
1555 * Teardown components within &struct amdgpu_display_manager that require
1556 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1557 * were loaded. Also flush IRQ workqueues and disable them.
1558 */
1559 static int dm_hw_fini(void *handle)
1560 {
1561 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1562
1563 amdgpu_dm_hpd_fini(adev);
1564
1565 amdgpu_dm_irq_fini(adev);
1566 amdgpu_dm_fini(adev);
1567 return 0;
1568 }
1569
1570 static int dm_suspend(void *handle)
1571 {
1572 struct amdgpu_device *adev = handle;
1573 struct amdgpu_display_manager *dm = &adev->dm;
1574 int ret = 0;
1575
1576 WARN_ON(adev->dm.cached_state);
1577 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1578
1579 s3_handle_mst(adev->ddev, true);
1580
1581 amdgpu_dm_irq_suspend(adev);
1582
1583
1584 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1585
1586 return ret;
1587 }
1588
1589 static struct amdgpu_dm_connector *
1590 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1591 struct drm_crtc *crtc)
1592 {
1593 uint32_t i;
1594 struct drm_connector_state *new_con_state;
1595 struct drm_connector *connector;
1596 struct drm_crtc *crtc_from_state;
1597
1598 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1599 crtc_from_state = new_con_state->crtc;
1600
1601 if (crtc_from_state == crtc)
1602 return to_amdgpu_dm_connector(connector);
1603 }
1604
1605 return NULL;
1606 }
1607
1608 static void emulated_link_detect(struct dc_link *link)
1609 {
1610 struct dc_sink_init_data sink_init_data = { 0 };
1611 struct display_sink_capability sink_caps = { 0 };
1612 enum dc_edid_status edid_status;
1613 struct dc_context *dc_ctx = link->ctx;
1614 struct dc_sink *sink = NULL;
1615 struct dc_sink *prev_sink = NULL;
1616
1617 link->type = dc_connection_none;
1618 prev_sink = link->local_sink;
1619
1620 if (prev_sink != NULL)
1621 dc_sink_retain(prev_sink);
1622
1623 switch (link->connector_signal) {
1624 case SIGNAL_TYPE_HDMI_TYPE_A: {
1625 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1626 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1627 break;
1628 }
1629
1630 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1631 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1632 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1633 break;
1634 }
1635
1636 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1637 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1638 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1639 break;
1640 }
1641
1642 case SIGNAL_TYPE_LVDS: {
1643 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1644 sink_caps.signal = SIGNAL_TYPE_LVDS;
1645 break;
1646 }
1647
1648 case SIGNAL_TYPE_EDP: {
1649 sink_caps.transaction_type =
1650 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1651 sink_caps.signal = SIGNAL_TYPE_EDP;
1652 break;
1653 }
1654
1655 case SIGNAL_TYPE_DISPLAY_PORT: {
1656 sink_caps.transaction_type =
1657 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1658 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1659 break;
1660 }
1661
1662 default:
1663 DC_ERROR("Invalid connector type! signal:%d\n",
1664 link->connector_signal);
1665 return;
1666 }
1667
1668 sink_init_data.link = link;
1669 sink_init_data.sink_signal = sink_caps.signal;
1670
1671 sink = dc_sink_create(&sink_init_data);
1672 if (!sink) {
1673 DC_ERROR("Failed to create sink!\n");
1674 return;
1675 }
1676
1677 /* dc_sink_create returns a new reference */
1678 link->local_sink = sink;
1679
1680 edid_status = dm_helpers_read_local_edid(
1681 link->ctx,
1682 link,
1683 sink);
1684
1685 if (edid_status != EDID_OK)
1686 DC_ERROR("Failed to read EDID");
1687
1688 }
1689
1690 static int dm_resume(void *handle)
1691 {
1692 struct amdgpu_device *adev = handle;
1693 struct drm_device *ddev = adev->ddev;
1694 struct amdgpu_display_manager *dm = &adev->dm;
1695 struct amdgpu_dm_connector *aconnector;
1696 struct drm_connector *connector;
1697 struct drm_connector_list_iter iter;
1698 struct drm_crtc *crtc;
1699 struct drm_crtc_state *new_crtc_state;
1700 struct dm_crtc_state *dm_new_crtc_state;
1701 struct drm_plane *plane;
1702 struct drm_plane_state *new_plane_state;
1703 struct dm_plane_state *dm_new_plane_state;
1704 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1705 enum dc_connection_type new_connection_type = dc_connection_none;
1706 int i, r;
1707
1708 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1709 dc_release_state(dm_state->context);
1710 dm_state->context = dc_create_state(dm->dc);
1711 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1712 dc_resource_state_construct(dm->dc, dm_state->context);
1713
1714 /* Before powering on DC we need to re-initialize DMUB. */
1715 r = dm_dmub_hw_init(adev);
1716 if (r)
1717 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1718
1719 /* power on hardware */
1720 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1721
1722 /* program HPD filter */
1723 dc_resume(dm->dc);
1724
1725 /*
1726 * early enable HPD Rx IRQ, should be done before set mode as short
1727 * pulse interrupts are used for MST
1728 */
1729 amdgpu_dm_irq_resume_early(adev);
1730
1731 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1732 s3_handle_mst(ddev, false);
1733
1734 /* Do detection*/
1735 drm_connector_list_iter_begin(ddev, &iter);
1736 drm_for_each_connector_iter(connector, &iter) {
1737 aconnector = to_amdgpu_dm_connector(connector);
1738
1739 /*
1740 * this is the case when traversing through already created
1741 * MST connectors, should be skipped
1742 */
1743 if (aconnector->mst_port)
1744 continue;
1745
1746 mutex_lock(&aconnector->hpd_lock);
1747 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1748 DRM_ERROR("KMS: Failed to detect connector\n");
1749
1750 if (aconnector->base.force && new_connection_type == dc_connection_none)
1751 emulated_link_detect(aconnector->dc_link);
1752 else
1753 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1754
1755 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1756 aconnector->fake_enable = false;
1757
1758 if (aconnector->dc_sink)
1759 dc_sink_release(aconnector->dc_sink);
1760 aconnector->dc_sink = NULL;
1761 amdgpu_dm_update_connector_after_detect(aconnector);
1762 mutex_unlock(&aconnector->hpd_lock);
1763 }
1764 drm_connector_list_iter_end(&iter);
1765
1766 /* Force mode set in atomic commit */
1767 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1768 new_crtc_state->active_changed = true;
1769
1770 /*
1771 * atomic_check is expected to create the dc states. We need to release
1772 * them here, since they were duplicated as part of the suspend
1773 * procedure.
1774 */
1775 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1776 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1777 if (dm_new_crtc_state->stream) {
1778 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1779 dc_stream_release(dm_new_crtc_state->stream);
1780 dm_new_crtc_state->stream = NULL;
1781 }
1782 }
1783
1784 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1785 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1786 if (dm_new_plane_state->dc_state) {
1787 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1788 dc_plane_state_release(dm_new_plane_state->dc_state);
1789 dm_new_plane_state->dc_state = NULL;
1790 }
1791 }
1792
1793 drm_atomic_helper_resume(ddev, dm->cached_state);
1794
1795 dm->cached_state = NULL;
1796
1797 amdgpu_dm_irq_resume_late(adev);
1798
1799 amdgpu_dm_smu_write_watermarks_table(adev);
1800
1801 return 0;
1802 }
1803
1804 /**
1805 * DOC: DM Lifecycle
1806 *
1807 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1808 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1809 * the base driver's device list to be initialized and torn down accordingly.
1810 *
1811 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1812 */
1813
1814 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1815 .name = "dm",
1816 .early_init = dm_early_init,
1817 .late_init = dm_late_init,
1818 .sw_init = dm_sw_init,
1819 .sw_fini = dm_sw_fini,
1820 .hw_init = dm_hw_init,
1821 .hw_fini = dm_hw_fini,
1822 .suspend = dm_suspend,
1823 .resume = dm_resume,
1824 .is_idle = dm_is_idle,
1825 .wait_for_idle = dm_wait_for_idle,
1826 .check_soft_reset = dm_check_soft_reset,
1827 .soft_reset = dm_soft_reset,
1828 .set_clockgating_state = dm_set_clockgating_state,
1829 .set_powergating_state = dm_set_powergating_state,
1830 };
1831
1832 const struct amdgpu_ip_block_version dm_ip_block =
1833 {
1834 .type = AMD_IP_BLOCK_TYPE_DCE,
1835 .major = 1,
1836 .minor = 0,
1837 .rev = 0,
1838 .funcs = &amdgpu_dm_funcs,
1839 };
1840
1841
1842 /**
1843 * DOC: atomic
1844 *
1845 * *WIP*
1846 */
1847
1848 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1849 .fb_create = amdgpu_display_user_framebuffer_create,
1850 .output_poll_changed = drm_fb_helper_output_poll_changed,
1851 .atomic_check = amdgpu_dm_atomic_check,
1852 .atomic_commit = amdgpu_dm_atomic_commit,
1853 };
1854
1855 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1856 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1857 };
1858
1859 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1860 {
1861 u32 max_cll, min_cll, max, min, q, r;
1862 struct amdgpu_dm_backlight_caps *caps;
1863 struct amdgpu_display_manager *dm;
1864 struct drm_connector *conn_base;
1865 struct amdgpu_device *adev;
1866 static const u8 pre_computed_values[] = {
1867 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1868 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1869
1870 if (!aconnector || !aconnector->dc_link)
1871 return;
1872
1873 conn_base = &aconnector->base;
1874 adev = conn_base->dev->dev_private;
1875 dm = &adev->dm;
1876 caps = &dm->backlight_caps;
1877 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1878 caps->aux_support = false;
1879 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1880 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1881
1882 if (caps->ext_caps->bits.oled == 1 ||
1883 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1884 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1885 caps->aux_support = true;
1886
1887 /* From the specification (CTA-861-G), for calculating the maximum
1888 * luminance we need to use:
1889 * Luminance = 50*2**(CV/32)
1890 * Where CV is a one-byte value.
1891 * For calculating this expression we may need float point precision;
1892 * to avoid this complexity level, we take advantage that CV is divided
1893 * by a constant. From the Euclids division algorithm, we know that CV
1894 * can be written as: CV = 32*q + r. Next, we replace CV in the
1895 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1896 * need to pre-compute the value of r/32. For pre-computing the values
1897 * We just used the following Ruby line:
1898 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1899 * The results of the above expressions can be verified at
1900 * pre_computed_values.
1901 */
1902 q = max_cll >> 5;
1903 r = max_cll % 32;
1904 max = (1 << q) * pre_computed_values[r];
1905
1906 // min luminance: maxLum * (CV/255)^2 / 100
1907 q = DIV_ROUND_CLOSEST(min_cll, 255);
1908 min = max * DIV_ROUND_CLOSEST((q * q), 100);
1909
1910 caps->aux_max_input_signal = max;
1911 caps->aux_min_input_signal = min;
1912 }
1913
1914 void amdgpu_dm_update_connector_after_detect(
1915 struct amdgpu_dm_connector *aconnector)
1916 {
1917 struct drm_connector *connector = &aconnector->base;
1918 struct drm_device *dev = connector->dev;
1919 struct dc_sink *sink;
1920
1921 /* MST handled by drm_mst framework */
1922 if (aconnector->mst_mgr.mst_state == true)
1923 return;
1924
1925
1926 sink = aconnector->dc_link->local_sink;
1927 if (sink)
1928 dc_sink_retain(sink);
1929
1930 /*
1931 * Edid mgmt connector gets first update only in mode_valid hook and then
1932 * the connector sink is set to either fake or physical sink depends on link status.
1933 * Skip if already done during boot.
1934 */
1935 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1936 && aconnector->dc_em_sink) {
1937
1938 /*
1939 * For S3 resume with headless use eml_sink to fake stream
1940 * because on resume connector->sink is set to NULL
1941 */
1942 mutex_lock(&dev->mode_config.mutex);
1943
1944 if (sink) {
1945 if (aconnector->dc_sink) {
1946 amdgpu_dm_update_freesync_caps(connector, NULL);
1947 /*
1948 * retain and release below are used to
1949 * bump up refcount for sink because the link doesn't point
1950 * to it anymore after disconnect, so on next crtc to connector
1951 * reshuffle by UMD we will get into unwanted dc_sink release
1952 */
1953 dc_sink_release(aconnector->dc_sink);
1954 }
1955 aconnector->dc_sink = sink;
1956 dc_sink_retain(aconnector->dc_sink);
1957 amdgpu_dm_update_freesync_caps(connector,
1958 aconnector->edid);
1959 } else {
1960 amdgpu_dm_update_freesync_caps(connector, NULL);
1961 if (!aconnector->dc_sink) {
1962 aconnector->dc_sink = aconnector->dc_em_sink;
1963 dc_sink_retain(aconnector->dc_sink);
1964 }
1965 }
1966
1967 mutex_unlock(&dev->mode_config.mutex);
1968
1969 if (sink)
1970 dc_sink_release(sink);
1971 return;
1972 }
1973
1974 /*
1975 * TODO: temporary guard to look for proper fix
1976 * if this sink is MST sink, we should not do anything
1977 */
1978 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1979 dc_sink_release(sink);
1980 return;
1981 }
1982
1983 if (aconnector->dc_sink == sink) {
1984 /*
1985 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1986 * Do nothing!!
1987 */
1988 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1989 aconnector->connector_id);
1990 if (sink)
1991 dc_sink_release(sink);
1992 return;
1993 }
1994
1995 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1996 aconnector->connector_id, aconnector->dc_sink, sink);
1997
1998 mutex_lock(&dev->mode_config.mutex);
1999
2000 /*
2001 * 1. Update status of the drm connector
2002 * 2. Send an event and let userspace tell us what to do
2003 */
2004 if (sink) {
2005 /*
2006 * TODO: check if we still need the S3 mode update workaround.
2007 * If yes, put it here.
2008 */
2009 if (aconnector->dc_sink)
2010 amdgpu_dm_update_freesync_caps(connector, NULL);
2011
2012 aconnector->dc_sink = sink;
2013 dc_sink_retain(aconnector->dc_sink);
2014 if (sink->dc_edid.length == 0) {
2015 aconnector->edid = NULL;
2016 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2017 } else {
2018 aconnector->edid =
2019 (struct edid *) sink->dc_edid.raw_edid;
2020
2021
2022 drm_connector_update_edid_property(connector,
2023 aconnector->edid);
2024 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2025 aconnector->edid);
2026 }
2027 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2028 update_connector_ext_caps(aconnector);
2029 } else {
2030 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2031 amdgpu_dm_update_freesync_caps(connector, NULL);
2032 drm_connector_update_edid_property(connector, NULL);
2033 aconnector->num_modes = 0;
2034 dc_sink_release(aconnector->dc_sink);
2035 aconnector->dc_sink = NULL;
2036 aconnector->edid = NULL;
2037 #ifdef CONFIG_DRM_AMD_DC_HDCP
2038 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2039 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2040 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2041 #endif
2042 }
2043
2044 mutex_unlock(&dev->mode_config.mutex);
2045
2046 if (sink)
2047 dc_sink_release(sink);
2048 }
2049
2050 static void handle_hpd_irq(void *param)
2051 {
2052 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2053 struct drm_connector *connector = &aconnector->base;
2054 struct drm_device *dev = connector->dev;
2055 enum dc_connection_type new_connection_type = dc_connection_none;
2056 #ifdef CONFIG_DRM_AMD_DC_HDCP
2057 struct amdgpu_device *adev = dev->dev_private;
2058 #endif
2059
2060 /*
2061 * In case of failure or MST no need to update connector status or notify the OS
2062 * since (for MST case) MST does this in its own context.
2063 */
2064 mutex_lock(&aconnector->hpd_lock);
2065
2066 #ifdef CONFIG_DRM_AMD_DC_HDCP
2067 if (adev->dm.hdcp_workqueue)
2068 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2069 #endif
2070 if (aconnector->fake_enable)
2071 aconnector->fake_enable = false;
2072
2073 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2074 DRM_ERROR("KMS: Failed to detect connector\n");
2075
2076 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2077 emulated_link_detect(aconnector->dc_link);
2078
2079
2080 drm_modeset_lock_all(dev);
2081 dm_restore_drm_connector_state(dev, connector);
2082 drm_modeset_unlock_all(dev);
2083
2084 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2085 drm_kms_helper_hotplug_event(dev);
2086
2087 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2088 amdgpu_dm_update_connector_after_detect(aconnector);
2089
2090
2091 drm_modeset_lock_all(dev);
2092 dm_restore_drm_connector_state(dev, connector);
2093 drm_modeset_unlock_all(dev);
2094
2095 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2096 drm_kms_helper_hotplug_event(dev);
2097 }
2098 mutex_unlock(&aconnector->hpd_lock);
2099
2100 }
2101
2102 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2103 {
2104 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2105 uint8_t dret;
2106 bool new_irq_handled = false;
2107 int dpcd_addr;
2108 int dpcd_bytes_to_read;
2109
2110 const int max_process_count = 30;
2111 int process_count = 0;
2112
2113 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2114
2115 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2116 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2117 /* DPCD 0x200 - 0x201 for downstream IRQ */
2118 dpcd_addr = DP_SINK_COUNT;
2119 } else {
2120 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2121 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2122 dpcd_addr = DP_SINK_COUNT_ESI;
2123 }
2124
2125 dret = drm_dp_dpcd_read(
2126 &aconnector->dm_dp_aux.aux,
2127 dpcd_addr,
2128 esi,
2129 dpcd_bytes_to_read);
2130
2131 while (dret == dpcd_bytes_to_read &&
2132 process_count < max_process_count) {
2133 uint8_t retry;
2134 dret = 0;
2135
2136 process_count++;
2137
2138 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2139 /* handle HPD short pulse irq */
2140 if (aconnector->mst_mgr.mst_state)
2141 drm_dp_mst_hpd_irq(
2142 &aconnector->mst_mgr,
2143 esi,
2144 &new_irq_handled);
2145
2146 if (new_irq_handled) {
2147 /* ACK at DPCD to notify down stream */
2148 const int ack_dpcd_bytes_to_write =
2149 dpcd_bytes_to_read - 1;
2150
2151 for (retry = 0; retry < 3; retry++) {
2152 uint8_t wret;
2153
2154 wret = drm_dp_dpcd_write(
2155 &aconnector->dm_dp_aux.aux,
2156 dpcd_addr + 1,
2157 &esi[1],
2158 ack_dpcd_bytes_to_write);
2159 if (wret == ack_dpcd_bytes_to_write)
2160 break;
2161 }
2162
2163 /* check if there is new irq to be handled */
2164 dret = drm_dp_dpcd_read(
2165 &aconnector->dm_dp_aux.aux,
2166 dpcd_addr,
2167 esi,
2168 dpcd_bytes_to_read);
2169
2170 new_irq_handled = false;
2171 } else {
2172 break;
2173 }
2174 }
2175
2176 if (process_count == max_process_count)
2177 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2178 }
2179
2180 static void handle_hpd_rx_irq(void *param)
2181 {
2182 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2183 struct drm_connector *connector = &aconnector->base;
2184 struct drm_device *dev = connector->dev;
2185 struct dc_link *dc_link = aconnector->dc_link;
2186 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2187 enum dc_connection_type new_connection_type = dc_connection_none;
2188 #ifdef CONFIG_DRM_AMD_DC_HDCP
2189 union hpd_irq_data hpd_irq_data;
2190 struct amdgpu_device *adev = dev->dev_private;
2191
2192 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2193 #endif
2194
2195 /*
2196 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2197 * conflict, after implement i2c helper, this mutex should be
2198 * retired.
2199 */
2200 if (dc_link->type != dc_connection_mst_branch)
2201 mutex_lock(&aconnector->hpd_lock);
2202
2203
2204 #ifdef CONFIG_DRM_AMD_DC_HDCP
2205 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2206 #else
2207 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2208 #endif
2209 !is_mst_root_connector) {
2210 /* Downstream Port status changed. */
2211 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2212 DRM_ERROR("KMS: Failed to detect connector\n");
2213
2214 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2215 emulated_link_detect(dc_link);
2216
2217 if (aconnector->fake_enable)
2218 aconnector->fake_enable = false;
2219
2220 amdgpu_dm_update_connector_after_detect(aconnector);
2221
2222
2223 drm_modeset_lock_all(dev);
2224 dm_restore_drm_connector_state(dev, connector);
2225 drm_modeset_unlock_all(dev);
2226
2227 drm_kms_helper_hotplug_event(dev);
2228 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2229
2230 if (aconnector->fake_enable)
2231 aconnector->fake_enable = false;
2232
2233 amdgpu_dm_update_connector_after_detect(aconnector);
2234
2235
2236 drm_modeset_lock_all(dev);
2237 dm_restore_drm_connector_state(dev, connector);
2238 drm_modeset_unlock_all(dev);
2239
2240 drm_kms_helper_hotplug_event(dev);
2241 }
2242 }
2243 #ifdef CONFIG_DRM_AMD_DC_HDCP
2244 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2245 if (adev->dm.hdcp_workqueue)
2246 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2247 }
2248 #endif
2249 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2250 (dc_link->type == dc_connection_mst_branch))
2251 dm_handle_hpd_rx_irq(aconnector);
2252
2253 if (dc_link->type != dc_connection_mst_branch) {
2254 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2255 mutex_unlock(&aconnector->hpd_lock);
2256 }
2257 }
2258
2259 static void register_hpd_handlers(struct amdgpu_device *adev)
2260 {
2261 struct drm_device *dev = adev->ddev;
2262 struct drm_connector *connector;
2263 struct amdgpu_dm_connector *aconnector;
2264 const struct dc_link *dc_link;
2265 struct dc_interrupt_params int_params = {0};
2266
2267 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2268 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2269
2270 list_for_each_entry(connector,
2271 &dev->mode_config.connector_list, head) {
2272
2273 aconnector = to_amdgpu_dm_connector(connector);
2274 dc_link = aconnector->dc_link;
2275
2276 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2277 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2278 int_params.irq_source = dc_link->irq_source_hpd;
2279
2280 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2281 handle_hpd_irq,
2282 (void *) aconnector);
2283 }
2284
2285 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2286
2287 /* Also register for DP short pulse (hpd_rx). */
2288 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2289 int_params.irq_source = dc_link->irq_source_hpd_rx;
2290
2291 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2292 handle_hpd_rx_irq,
2293 (void *) aconnector);
2294 }
2295 }
2296 }
2297
2298 /* Register IRQ sources and initialize IRQ callbacks */
2299 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2300 {
2301 struct dc *dc = adev->dm.dc;
2302 struct common_irq_params *c_irq_params;
2303 struct dc_interrupt_params int_params = {0};
2304 int r;
2305 int i;
2306 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2307
2308 if (adev->asic_type >= CHIP_VEGA10)
2309 client_id = SOC15_IH_CLIENTID_DCE;
2310
2311 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2312 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2313
2314 /*
2315 * Actions of amdgpu_irq_add_id():
2316 * 1. Register a set() function with base driver.
2317 * Base driver will call set() function to enable/disable an
2318 * interrupt in DC hardware.
2319 * 2. Register amdgpu_dm_irq_handler().
2320 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2321 * coming from DC hardware.
2322 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2323 * for acknowledging and handling. */
2324
2325 /* Use VBLANK interrupt */
2326 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2327 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2328 if (r) {
2329 DRM_ERROR("Failed to add crtc irq id!\n");
2330 return r;
2331 }
2332
2333 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2334 int_params.irq_source =
2335 dc_interrupt_to_irq_source(dc, i, 0);
2336
2337 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2338
2339 c_irq_params->adev = adev;
2340 c_irq_params->irq_src = int_params.irq_source;
2341
2342 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2343 dm_crtc_high_irq, c_irq_params);
2344 }
2345
2346 /* Use VUPDATE interrupt */
2347 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2348 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2349 if (r) {
2350 DRM_ERROR("Failed to add vupdate irq id!\n");
2351 return r;
2352 }
2353
2354 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2355 int_params.irq_source =
2356 dc_interrupt_to_irq_source(dc, i, 0);
2357
2358 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2359
2360 c_irq_params->adev = adev;
2361 c_irq_params->irq_src = int_params.irq_source;
2362
2363 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2364 dm_vupdate_high_irq, c_irq_params);
2365 }
2366
2367 /* Use GRPH_PFLIP interrupt */
2368 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2369 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2370 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2371 if (r) {
2372 DRM_ERROR("Failed to add page flip irq id!\n");
2373 return r;
2374 }
2375
2376 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2377 int_params.irq_source =
2378 dc_interrupt_to_irq_source(dc, i, 0);
2379
2380 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2381
2382 c_irq_params->adev = adev;
2383 c_irq_params->irq_src = int_params.irq_source;
2384
2385 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2386 dm_pflip_high_irq, c_irq_params);
2387
2388 }
2389
2390 /* HPD */
2391 r = amdgpu_irq_add_id(adev, client_id,
2392 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2393 if (r) {
2394 DRM_ERROR("Failed to add hpd irq id!\n");
2395 return r;
2396 }
2397
2398 register_hpd_handlers(adev);
2399
2400 return 0;
2401 }
2402
2403 #if defined(CONFIG_DRM_AMD_DC_DCN)
2404 /* Register IRQ sources and initialize IRQ callbacks */
2405 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2406 {
2407 struct dc *dc = adev->dm.dc;
2408 struct common_irq_params *c_irq_params;
2409 struct dc_interrupt_params int_params = {0};
2410 int r;
2411 int i;
2412
2413 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2414 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2415
2416 /*
2417 * Actions of amdgpu_irq_add_id():
2418 * 1. Register a set() function with base driver.
2419 * Base driver will call set() function to enable/disable an
2420 * interrupt in DC hardware.
2421 * 2. Register amdgpu_dm_irq_handler().
2422 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2423 * coming from DC hardware.
2424 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2425 * for acknowledging and handling.
2426 */
2427
2428 /* Use VSTARTUP interrupt */
2429 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2430 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2431 i++) {
2432 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2433
2434 if (r) {
2435 DRM_ERROR("Failed to add crtc irq id!\n");
2436 return r;
2437 }
2438
2439 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2440 int_params.irq_source =
2441 dc_interrupt_to_irq_source(dc, i, 0);
2442
2443 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2444
2445 c_irq_params->adev = adev;
2446 c_irq_params->irq_src = int_params.irq_source;
2447
2448 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2449 dm_dcn_crtc_high_irq, c_irq_params);
2450 }
2451
2452 /* Use GRPH_PFLIP interrupt */
2453 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2454 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2455 i++) {
2456 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2457 if (r) {
2458 DRM_ERROR("Failed to add page flip irq id!\n");
2459 return r;
2460 }
2461
2462 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2463 int_params.irq_source =
2464 dc_interrupt_to_irq_source(dc, i, 0);
2465
2466 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2467
2468 c_irq_params->adev = adev;
2469 c_irq_params->irq_src = int_params.irq_source;
2470
2471 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2472 dm_pflip_high_irq, c_irq_params);
2473
2474 }
2475
2476 /* HPD */
2477 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2478 &adev->hpd_irq);
2479 if (r) {
2480 DRM_ERROR("Failed to add hpd irq id!\n");
2481 return r;
2482 }
2483
2484 register_hpd_handlers(adev);
2485
2486 return 0;
2487 }
2488 #endif
2489
2490 /*
2491 * Acquires the lock for the atomic state object and returns
2492 * the new atomic state.
2493 *
2494 * This should only be called during atomic check.
2495 */
2496 static int dm_atomic_get_state(struct drm_atomic_state *state,
2497 struct dm_atomic_state **dm_state)
2498 {
2499 struct drm_device *dev = state->dev;
2500 struct amdgpu_device *adev = dev->dev_private;
2501 struct amdgpu_display_manager *dm = &adev->dm;
2502 struct drm_private_state *priv_state;
2503
2504 if (*dm_state)
2505 return 0;
2506
2507 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2508 if (IS_ERR(priv_state))
2509 return PTR_ERR(priv_state);
2510
2511 *dm_state = to_dm_atomic_state(priv_state);
2512
2513 return 0;
2514 }
2515
2516 struct dm_atomic_state *
2517 dm_atomic_get_new_state(struct drm_atomic_state *state)
2518 {
2519 struct drm_device *dev = state->dev;
2520 struct amdgpu_device *adev = dev->dev_private;
2521 struct amdgpu_display_manager *dm = &adev->dm;
2522 struct drm_private_obj *obj;
2523 struct drm_private_state *new_obj_state;
2524 int i;
2525
2526 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2527 if (obj->funcs == dm->atomic_obj.funcs)
2528 return to_dm_atomic_state(new_obj_state);
2529 }
2530
2531 return NULL;
2532 }
2533
2534 struct dm_atomic_state *
2535 dm_atomic_get_old_state(struct drm_atomic_state *state)
2536 {
2537 struct drm_device *dev = state->dev;
2538 struct amdgpu_device *adev = dev->dev_private;
2539 struct amdgpu_display_manager *dm = &adev->dm;
2540 struct drm_private_obj *obj;
2541 struct drm_private_state *old_obj_state;
2542 int i;
2543
2544 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2545 if (obj->funcs == dm->atomic_obj.funcs)
2546 return to_dm_atomic_state(old_obj_state);
2547 }
2548
2549 return NULL;
2550 }
2551
2552 static struct drm_private_state *
2553 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2554 {
2555 struct dm_atomic_state *old_state, *new_state;
2556
2557 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2558 if (!new_state)
2559 return NULL;
2560
2561 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2562
2563 old_state = to_dm_atomic_state(obj->state);
2564
2565 if (old_state && old_state->context)
2566 new_state->context = dc_copy_state(old_state->context);
2567
2568 if (!new_state->context) {
2569 kfree(new_state);
2570 return NULL;
2571 }
2572
2573 return &new_state->base;
2574 }
2575
2576 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2577 struct drm_private_state *state)
2578 {
2579 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2580
2581 if (dm_state && dm_state->context)
2582 dc_release_state(dm_state->context);
2583
2584 kfree(dm_state);
2585 }
2586
2587 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2588 .atomic_duplicate_state = dm_atomic_duplicate_state,
2589 .atomic_destroy_state = dm_atomic_destroy_state,
2590 };
2591
2592 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2593 {
2594 struct dm_atomic_state *state;
2595 int r;
2596
2597 adev->mode_info.mode_config_initialized = true;
2598
2599 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2600 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2601
2602 adev->ddev->mode_config.max_width = 16384;
2603 adev->ddev->mode_config.max_height = 16384;
2604
2605 adev->ddev->mode_config.preferred_depth = 24;
2606 adev->ddev->mode_config.prefer_shadow = 1;
2607 /* indicates support for immediate flip */
2608 adev->ddev->mode_config.async_page_flip = true;
2609
2610 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2611
2612 state = kzalloc(sizeof(*state), GFP_KERNEL);
2613 if (!state)
2614 return -ENOMEM;
2615
2616 state->context = dc_create_state(adev->dm.dc);
2617 if (!state->context) {
2618 kfree(state);
2619 return -ENOMEM;
2620 }
2621
2622 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2623
2624 drm_atomic_private_obj_init(adev->ddev,
2625 &adev->dm.atomic_obj,
2626 &state->base,
2627 &dm_atomic_state_funcs);
2628
2629 r = amdgpu_display_modeset_create_props(adev);
2630 if (r)
2631 return r;
2632
2633 r = amdgpu_dm_audio_init(adev);
2634 if (r)
2635 return r;
2636
2637 return 0;
2638 }
2639
2640 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2641 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2642 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2643
2644 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2645 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2646
2647 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2648 {
2649 #if defined(CONFIG_ACPI)
2650 struct amdgpu_dm_backlight_caps caps;
2651
2652 if (dm->backlight_caps.caps_valid)
2653 return;
2654
2655 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2656 if (caps.caps_valid) {
2657 dm->backlight_caps.caps_valid = true;
2658 if (caps.aux_support)
2659 return;
2660 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2661 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2662 } else {
2663 dm->backlight_caps.min_input_signal =
2664 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2665 dm->backlight_caps.max_input_signal =
2666 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2667 }
2668 #else
2669 if (dm->backlight_caps.aux_support)
2670 return;
2671
2672 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2673 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2674 #endif
2675 }
2676
2677 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2678 {
2679 bool rc;
2680
2681 if (!link)
2682 return 1;
2683
2684 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2685 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2686
2687 return rc ? 0 : 1;
2688 }
2689
2690 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2691 const uint32_t user_brightness)
2692 {
2693 u32 min, max, conversion_pace;
2694 u32 brightness = user_brightness;
2695
2696 if (!caps)
2697 goto out;
2698
2699 if (!caps->aux_support) {
2700 max = caps->max_input_signal;
2701 min = caps->min_input_signal;
2702 /*
2703 * The brightness input is in the range 0-255
2704 * It needs to be rescaled to be between the
2705 * requested min and max input signal
2706 * It also needs to be scaled up by 0x101 to
2707 * match the DC interface which has a range of
2708 * 0 to 0xffff
2709 */
2710 conversion_pace = 0x101;
2711 brightness =
2712 user_brightness
2713 * conversion_pace
2714 * (max - min)
2715 / AMDGPU_MAX_BL_LEVEL
2716 + min * conversion_pace;
2717 } else {
2718 /* TODO
2719 * We are doing a linear interpolation here, which is OK but
2720 * does not provide the optimal result. We probably want
2721 * something close to the Perceptual Quantizer (PQ) curve.
2722 */
2723 max = caps->aux_max_input_signal;
2724 min = caps->aux_min_input_signal;
2725
2726 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2727 + user_brightness * max;
2728 // Multiple the value by 1000 since we use millinits
2729 brightness *= 1000;
2730 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2731 }
2732
2733 out:
2734 return brightness;
2735 }
2736
2737 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2738 {
2739 struct amdgpu_display_manager *dm = bl_get_data(bd);
2740 struct amdgpu_dm_backlight_caps caps;
2741 struct dc_link *link = NULL;
2742 u32 brightness;
2743 bool rc;
2744
2745 amdgpu_dm_update_backlight_caps(dm);
2746 caps = dm->backlight_caps;
2747
2748 link = (struct dc_link *)dm->backlight_link;
2749
2750 brightness = convert_brightness(&caps, bd->props.brightness);
2751 // Change brightness based on AUX property
2752 if (caps.aux_support)
2753 return set_backlight_via_aux(link, brightness);
2754
2755 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2756
2757 return rc ? 0 : 1;
2758 }
2759
2760 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2761 {
2762 struct amdgpu_display_manager *dm = bl_get_data(bd);
2763 int ret = dc_link_get_backlight_level(dm->backlight_link);
2764
2765 if (ret == DC_ERROR_UNEXPECTED)
2766 return bd->props.brightness;
2767 return ret;
2768 }
2769
2770 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2771 .options = BL_CORE_SUSPENDRESUME,
2772 .get_brightness = amdgpu_dm_backlight_get_brightness,
2773 .update_status = amdgpu_dm_backlight_update_status,
2774 };
2775
2776 static void
2777 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2778 {
2779 char bl_name[16];
2780 struct backlight_properties props = { 0 };
2781
2782 amdgpu_dm_update_backlight_caps(dm);
2783
2784 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2785 props.brightness = AMDGPU_MAX_BL_LEVEL;
2786 props.type = BACKLIGHT_RAW;
2787
2788 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2789 dm->adev->ddev->primary->index);
2790
2791 dm->backlight_dev = backlight_device_register(bl_name,
2792 dm->adev->ddev->dev,
2793 dm,
2794 &amdgpu_dm_backlight_ops,
2795 &props);
2796
2797 if (IS_ERR(dm->backlight_dev))
2798 DRM_ERROR("DM: Backlight registration failed!\n");
2799 else
2800 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2801 }
2802
2803 #endif
2804
2805 static int initialize_plane(struct amdgpu_display_manager *dm,
2806 struct amdgpu_mode_info *mode_info, int plane_id,
2807 enum drm_plane_type plane_type,
2808 const struct dc_plane_cap *plane_cap)
2809 {
2810 struct drm_plane *plane;
2811 unsigned long possible_crtcs;
2812 int ret = 0;
2813
2814 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2815 if (!plane) {
2816 DRM_ERROR("KMS: Failed to allocate plane\n");
2817 return -ENOMEM;
2818 }
2819 plane->type = plane_type;
2820
2821 /*
2822 * HACK: IGT tests expect that the primary plane for a CRTC
2823 * can only have one possible CRTC. Only expose support for
2824 * any CRTC if they're not going to be used as a primary plane
2825 * for a CRTC - like overlay or underlay planes.
2826 */
2827 possible_crtcs = 1 << plane_id;
2828 if (plane_id >= dm->dc->caps.max_streams)
2829 possible_crtcs = 0xff;
2830
2831 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2832
2833 if (ret) {
2834 DRM_ERROR("KMS: Failed to initialize plane\n");
2835 kfree(plane);
2836 return ret;
2837 }
2838
2839 if (mode_info)
2840 mode_info->planes[plane_id] = plane;
2841
2842 return ret;
2843 }
2844
2845
2846 static void register_backlight_device(struct amdgpu_display_manager *dm,
2847 struct dc_link *link)
2848 {
2849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2850 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2851
2852 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2853 link->type != dc_connection_none) {
2854 /*
2855 * Event if registration failed, we should continue with
2856 * DM initialization because not having a backlight control
2857 * is better then a black screen.
2858 */
2859 amdgpu_dm_register_backlight_device(dm);
2860
2861 if (dm->backlight_dev)
2862 dm->backlight_link = link;
2863 }
2864 #endif
2865 }
2866
2867
2868 /*
2869 * In this architecture, the association
2870 * connector -> encoder -> crtc
2871 * id not really requried. The crtc and connector will hold the
2872 * display_index as an abstraction to use with DAL component
2873 *
2874 * Returns 0 on success
2875 */
2876 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2877 {
2878 struct amdgpu_display_manager *dm = &adev->dm;
2879 int32_t i;
2880 struct amdgpu_dm_connector *aconnector = NULL;
2881 struct amdgpu_encoder *aencoder = NULL;
2882 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2883 uint32_t link_cnt;
2884 int32_t primary_planes;
2885 enum dc_connection_type new_connection_type = dc_connection_none;
2886 const struct dc_plane_cap *plane;
2887
2888 link_cnt = dm->dc->caps.max_links;
2889 if (amdgpu_dm_mode_config_init(dm->adev)) {
2890 DRM_ERROR("DM: Failed to initialize mode config\n");
2891 return -EINVAL;
2892 }
2893
2894 /* There is one primary plane per CRTC */
2895 primary_planes = dm->dc->caps.max_streams;
2896 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2897
2898 /*
2899 * Initialize primary planes, implicit planes for legacy IOCTLS.
2900 * Order is reversed to match iteration order in atomic check.
2901 */
2902 for (i = (primary_planes - 1); i >= 0; i--) {
2903 plane = &dm->dc->caps.planes[i];
2904
2905 if (initialize_plane(dm, mode_info, i,
2906 DRM_PLANE_TYPE_PRIMARY, plane)) {
2907 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2908 goto fail;
2909 }
2910 }
2911
2912 /*
2913 * Initialize overlay planes, index starting after primary planes.
2914 * These planes have a higher DRM index than the primary planes since
2915 * they should be considered as having a higher z-order.
2916 * Order is reversed to match iteration order in atomic check.
2917 *
2918 * Only support DCN for now, and only expose one so we don't encourage
2919 * userspace to use up all the pipes.
2920 */
2921 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2922 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2923
2924 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2925 continue;
2926
2927 if (!plane->blends_with_above || !plane->blends_with_below)
2928 continue;
2929
2930 if (!plane->pixel_format_support.argb8888)
2931 continue;
2932
2933 if (initialize_plane(dm, NULL, primary_planes + i,
2934 DRM_PLANE_TYPE_OVERLAY, plane)) {
2935 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2936 goto fail;
2937 }
2938
2939 /* Only create one overlay plane. */
2940 break;
2941 }
2942
2943 for (i = 0; i < dm->dc->caps.max_streams; i++)
2944 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2945 DRM_ERROR("KMS: Failed to initialize crtc\n");
2946 goto fail;
2947 }
2948
2949 dm->display_indexes_num = dm->dc->caps.max_streams;
2950
2951 /* loops over all connectors on the board */
2952 for (i = 0; i < link_cnt; i++) {
2953 struct dc_link *link = NULL;
2954
2955 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2956 DRM_ERROR(
2957 "KMS: Cannot support more than %d display indexes\n",
2958 AMDGPU_DM_MAX_DISPLAY_INDEX);
2959 continue;
2960 }
2961
2962 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2963 if (!aconnector)
2964 goto fail;
2965
2966 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2967 if (!aencoder)
2968 goto fail;
2969
2970 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2971 DRM_ERROR("KMS: Failed to initialize encoder\n");
2972 goto fail;
2973 }
2974
2975 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2976 DRM_ERROR("KMS: Failed to initialize connector\n");
2977 goto fail;
2978 }
2979
2980 link = dc_get_link_at_index(dm->dc, i);
2981
2982 if (!dc_link_detect_sink(link, &new_connection_type))
2983 DRM_ERROR("KMS: Failed to detect connector\n");
2984
2985 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2986 emulated_link_detect(link);
2987 amdgpu_dm_update_connector_after_detect(aconnector);
2988
2989 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2990 amdgpu_dm_update_connector_after_detect(aconnector);
2991 register_backlight_device(dm, link);
2992 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2993 amdgpu_dm_set_psr_caps(link);
2994 }
2995
2996
2997 }
2998
2999 /* Software is initialized. Now we can register interrupt handlers. */
3000 switch (adev->asic_type) {
3001 case CHIP_BONAIRE:
3002 case CHIP_HAWAII:
3003 case CHIP_KAVERI:
3004 case CHIP_KABINI:
3005 case CHIP_MULLINS:
3006 case CHIP_TONGA:
3007 case CHIP_FIJI:
3008 case CHIP_CARRIZO:
3009 case CHIP_STONEY:
3010 case CHIP_POLARIS11:
3011 case CHIP_POLARIS10:
3012 case CHIP_POLARIS12:
3013 case CHIP_VEGAM:
3014 case CHIP_VEGA10:
3015 case CHIP_VEGA12:
3016 case CHIP_VEGA20:
3017 if (dce110_register_irq_handlers(dm->adev)) {
3018 DRM_ERROR("DM: Failed to initialize IRQ\n");
3019 goto fail;
3020 }
3021 break;
3022 #if defined(CONFIG_DRM_AMD_DC_DCN)
3023 case CHIP_RAVEN:
3024 case CHIP_NAVI12:
3025 case CHIP_NAVI10:
3026 case CHIP_NAVI14:
3027 case CHIP_RENOIR:
3028 if (dcn10_register_irq_handlers(dm->adev)) {
3029 DRM_ERROR("DM: Failed to initialize IRQ\n");
3030 goto fail;
3031 }
3032 break;
3033 #endif
3034 default:
3035 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3036 goto fail;
3037 }
3038
3039 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3040 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3041
3042 /* No userspace support. */
3043 dm->dc->debug.disable_tri_buf = true;
3044
3045 return 0;
3046 fail:
3047 kfree(aencoder);
3048 kfree(aconnector);
3049
3050 return -EINVAL;
3051 }
3052
3053 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3054 {
3055 drm_mode_config_cleanup(dm->ddev);
3056 drm_atomic_private_obj_fini(&dm->atomic_obj);
3057 return;
3058 }
3059
3060 /******************************************************************************
3061 * amdgpu_display_funcs functions
3062 *****************************************************************************/
3063
3064 /*
3065 * dm_bandwidth_update - program display watermarks
3066 *
3067 * @adev: amdgpu_device pointer
3068 *
3069 * Calculate and program the display watermarks and line buffer allocation.
3070 */
3071 static void dm_bandwidth_update(struct amdgpu_device *adev)
3072 {
3073 /* TODO: implement later */
3074 }
3075
3076 static const struct amdgpu_display_funcs dm_display_funcs = {
3077 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3078 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3079 .backlight_set_level = NULL, /* never called for DC */
3080 .backlight_get_level = NULL, /* never called for DC */
3081 .hpd_sense = NULL,/* called unconditionally */
3082 .hpd_set_polarity = NULL, /* called unconditionally */
3083 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3084 .page_flip_get_scanoutpos =
3085 dm_crtc_get_scanoutpos,/* called unconditionally */
3086 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3087 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3088 };
3089
3090 #if defined(CONFIG_DEBUG_KERNEL_DC)
3091
3092 static ssize_t s3_debug_store(struct device *device,
3093 struct device_attribute *attr,
3094 const char *buf,
3095 size_t count)
3096 {
3097 int ret;
3098 int s3_state;
3099 struct drm_device *drm_dev = dev_get_drvdata(device);
3100 struct amdgpu_device *adev = drm_dev->dev_private;
3101
3102 ret = kstrtoint(buf, 0, &s3_state);
3103
3104 if (ret == 0) {
3105 if (s3_state) {
3106 dm_resume(adev);
3107 drm_kms_helper_hotplug_event(adev->ddev);
3108 } else
3109 dm_suspend(adev);
3110 }
3111
3112 return ret == 0 ? count : 0;
3113 }
3114
3115 DEVICE_ATTR_WO(s3_debug);
3116
3117 #endif
3118
3119 static int dm_early_init(void *handle)
3120 {
3121 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3122
3123 switch (adev->asic_type) {
3124 case CHIP_BONAIRE:
3125 case CHIP_HAWAII:
3126 adev->mode_info.num_crtc = 6;
3127 adev->mode_info.num_hpd = 6;
3128 adev->mode_info.num_dig = 6;
3129 break;
3130 case CHIP_KAVERI:
3131 adev->mode_info.num_crtc = 4;
3132 adev->mode_info.num_hpd = 6;
3133 adev->mode_info.num_dig = 7;
3134 break;
3135 case CHIP_KABINI:
3136 case CHIP_MULLINS:
3137 adev->mode_info.num_crtc = 2;
3138 adev->mode_info.num_hpd = 6;
3139 adev->mode_info.num_dig = 6;
3140 break;
3141 case CHIP_FIJI:
3142 case CHIP_TONGA:
3143 adev->mode_info.num_crtc = 6;
3144 adev->mode_info.num_hpd = 6;
3145 adev->mode_info.num_dig = 7;
3146 break;
3147 case CHIP_CARRIZO:
3148 adev->mode_info.num_crtc = 3;
3149 adev->mode_info.num_hpd = 6;
3150 adev->mode_info.num_dig = 9;
3151 break;
3152 case CHIP_STONEY:
3153 adev->mode_info.num_crtc = 2;
3154 adev->mode_info.num_hpd = 6;
3155 adev->mode_info.num_dig = 9;
3156 break;
3157 case CHIP_POLARIS11:
3158 case CHIP_POLARIS12:
3159 adev->mode_info.num_crtc = 5;
3160 adev->mode_info.num_hpd = 5;
3161 adev->mode_info.num_dig = 5;
3162 break;
3163 case CHIP_POLARIS10:
3164 case CHIP_VEGAM:
3165 adev->mode_info.num_crtc = 6;
3166 adev->mode_info.num_hpd = 6;
3167 adev->mode_info.num_dig = 6;
3168 break;
3169 case CHIP_VEGA10:
3170 case CHIP_VEGA12:
3171 case CHIP_VEGA20:
3172 adev->mode_info.num_crtc = 6;
3173 adev->mode_info.num_hpd = 6;
3174 adev->mode_info.num_dig = 6;
3175 break;
3176 #if defined(CONFIG_DRM_AMD_DC_DCN)
3177 case CHIP_RAVEN:
3178 adev->mode_info.num_crtc = 4;
3179 adev->mode_info.num_hpd = 4;
3180 adev->mode_info.num_dig = 4;
3181 break;
3182 #endif
3183 case CHIP_NAVI10:
3184 case CHIP_NAVI12:
3185 adev->mode_info.num_crtc = 6;
3186 adev->mode_info.num_hpd = 6;
3187 adev->mode_info.num_dig = 6;
3188 break;
3189 case CHIP_NAVI14:
3190 adev->mode_info.num_crtc = 5;
3191 adev->mode_info.num_hpd = 5;
3192 adev->mode_info.num_dig = 5;
3193 break;
3194 case CHIP_RENOIR:
3195 adev->mode_info.num_crtc = 4;
3196 adev->mode_info.num_hpd = 4;
3197 adev->mode_info.num_dig = 4;
3198 break;
3199 default:
3200 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3201 return -EINVAL;
3202 }
3203
3204 amdgpu_dm_set_irq_funcs(adev);
3205
3206 if (adev->mode_info.funcs == NULL)
3207 adev->mode_info.funcs = &dm_display_funcs;
3208
3209 /*
3210 * Note: Do NOT change adev->audio_endpt_rreg and
3211 * adev->audio_endpt_wreg because they are initialised in
3212 * amdgpu_device_init()
3213 */
3214 #if defined(CONFIG_DEBUG_KERNEL_DC)
3215 device_create_file(
3216 adev->ddev->dev,
3217 &dev_attr_s3_debug);
3218 #endif
3219
3220 return 0;
3221 }
3222
3223 static bool modeset_required(struct drm_crtc_state *crtc_state,
3224 struct dc_stream_state *new_stream,
3225 struct dc_stream_state *old_stream)
3226 {
3227 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3228 return false;
3229
3230 if (!crtc_state->enable)
3231 return false;
3232
3233 return crtc_state->active;
3234 }
3235
3236 static bool modereset_required(struct drm_crtc_state *crtc_state)
3237 {
3238 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3239 return false;
3240
3241 return !crtc_state->enable || !crtc_state->active;
3242 }
3243
3244 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3245 {
3246 drm_encoder_cleanup(encoder);
3247 kfree(encoder);
3248 }
3249
3250 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3251 .destroy = amdgpu_dm_encoder_destroy,
3252 };
3253
3254
3255 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3256 struct dc_scaling_info *scaling_info)
3257 {
3258 int scale_w, scale_h;
3259
3260 memset(scaling_info, 0, sizeof(*scaling_info));
3261
3262 /* Source is fixed 16.16 but we ignore mantissa for now... */
3263 scaling_info->src_rect.x = state->src_x >> 16;
3264 scaling_info->src_rect.y = state->src_y >> 16;
3265
3266 scaling_info->src_rect.width = state->src_w >> 16;
3267 if (scaling_info->src_rect.width == 0)
3268 return -EINVAL;
3269
3270 scaling_info->src_rect.height = state->src_h >> 16;
3271 if (scaling_info->src_rect.height == 0)
3272 return -EINVAL;
3273
3274 scaling_info->dst_rect.x = state->crtc_x;
3275 scaling_info->dst_rect.y = state->crtc_y;
3276
3277 if (state->crtc_w == 0)
3278 return -EINVAL;
3279
3280 scaling_info->dst_rect.width = state->crtc_w;
3281
3282 if (state->crtc_h == 0)
3283 return -EINVAL;
3284
3285 scaling_info->dst_rect.height = state->crtc_h;
3286
3287 /* DRM doesn't specify clipping on destination output. */
3288 scaling_info->clip_rect = scaling_info->dst_rect;
3289
3290 /* TODO: Validate scaling per-format with DC plane caps */
3291 scale_w = scaling_info->dst_rect.width * 1000 /
3292 scaling_info->src_rect.width;
3293
3294 if (scale_w < 250 || scale_w > 16000)
3295 return -EINVAL;
3296
3297 scale_h = scaling_info->dst_rect.height * 1000 /
3298 scaling_info->src_rect.height;
3299
3300 if (scale_h < 250 || scale_h > 16000)
3301 return -EINVAL;
3302
3303 /*
3304 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3305 * assume reasonable defaults based on the format.
3306 */
3307
3308 return 0;
3309 }
3310
3311 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3312 uint64_t *tiling_flags, bool *tmz_surface)
3313 {
3314 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3315 int r = amdgpu_bo_reserve(rbo, false);
3316
3317 if (unlikely(r)) {
3318 /* Don't show error message when returning -ERESTARTSYS */
3319 if (r != -ERESTARTSYS)
3320 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3321 return r;
3322 }
3323
3324 if (tiling_flags)
3325 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3326
3327 if (tmz_surface)
3328 *tmz_surface = amdgpu_bo_encrypted(rbo);
3329
3330 amdgpu_bo_unreserve(rbo);
3331
3332 return r;
3333 }
3334
3335 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3336 {
3337 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3338
3339 return offset ? (address + offset * 256) : 0;
3340 }
3341
3342 static int
3343 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3344 const struct amdgpu_framebuffer *afb,
3345 const enum surface_pixel_format format,
3346 const enum dc_rotation_angle rotation,
3347 const struct plane_size *plane_size,
3348 const union dc_tiling_info *tiling_info,
3349 const uint64_t info,
3350 struct dc_plane_dcc_param *dcc,
3351 struct dc_plane_address *address,
3352 bool force_disable_dcc)
3353 {
3354 struct dc *dc = adev->dm.dc;
3355 struct dc_dcc_surface_param input;
3356 struct dc_surface_dcc_cap output;
3357 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3358 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3359 uint64_t dcc_address;
3360
3361 memset(&input, 0, sizeof(input));
3362 memset(&output, 0, sizeof(output));
3363
3364 if (force_disable_dcc)
3365 return 0;
3366
3367 if (!offset)
3368 return 0;
3369
3370 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3371 return 0;
3372
3373 if (!dc->cap_funcs.get_dcc_compression_cap)
3374 return -EINVAL;
3375
3376 input.format = format;
3377 input.surface_size.width = plane_size->surface_size.width;
3378 input.surface_size.height = plane_size->surface_size.height;
3379 input.swizzle_mode = tiling_info->gfx9.swizzle;
3380
3381 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3382 input.scan = SCAN_DIRECTION_HORIZONTAL;
3383 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3384 input.scan = SCAN_DIRECTION_VERTICAL;
3385
3386 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3387 return -EINVAL;
3388
3389 if (!output.capable)
3390 return -EINVAL;
3391
3392 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3393 return -EINVAL;
3394
3395 dcc->enable = 1;
3396 dcc->meta_pitch =
3397 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3398 dcc->independent_64b_blks = i64b;
3399
3400 dcc_address = get_dcc_address(afb->address, info);
3401 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3402 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3403
3404 return 0;
3405 }
3406
3407 static int
3408 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3409 const struct amdgpu_framebuffer *afb,
3410 const enum surface_pixel_format format,
3411 const enum dc_rotation_angle rotation,
3412 const uint64_t tiling_flags,
3413 union dc_tiling_info *tiling_info,
3414 struct plane_size *plane_size,
3415 struct dc_plane_dcc_param *dcc,
3416 struct dc_plane_address *address,
3417 bool tmz_surface,
3418 bool force_disable_dcc)
3419 {
3420 const struct drm_framebuffer *fb = &afb->base;
3421 int ret;
3422
3423 memset(tiling_info, 0, sizeof(*tiling_info));
3424 memset(plane_size, 0, sizeof(*plane_size));
3425 memset(dcc, 0, sizeof(*dcc));
3426 memset(address, 0, sizeof(*address));
3427
3428 address->tmz_surface = tmz_surface;
3429
3430 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3431 plane_size->surface_size.x = 0;
3432 plane_size->surface_size.y = 0;
3433 plane_size->surface_size.width = fb->width;
3434 plane_size->surface_size.height = fb->height;
3435 plane_size->surface_pitch =
3436 fb->pitches[0] / fb->format->cpp[0];
3437
3438 address->type = PLN_ADDR_TYPE_GRAPHICS;
3439 address->grph.addr.low_part = lower_32_bits(afb->address);
3440 address->grph.addr.high_part = upper_32_bits(afb->address);
3441 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3442 uint64_t chroma_addr = afb->address + fb->offsets[1];
3443
3444 plane_size->surface_size.x = 0;
3445 plane_size->surface_size.y = 0;
3446 plane_size->surface_size.width = fb->width;
3447 plane_size->surface_size.height = fb->height;
3448 plane_size->surface_pitch =
3449 fb->pitches[0] / fb->format->cpp[0];
3450
3451 plane_size->chroma_size.x = 0;
3452 plane_size->chroma_size.y = 0;
3453 /* TODO: set these based on surface format */
3454 plane_size->chroma_size.width = fb->width / 2;
3455 plane_size->chroma_size.height = fb->height / 2;
3456
3457 plane_size->chroma_pitch =
3458 fb->pitches[1] / fb->format->cpp[1];
3459
3460 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3461 address->video_progressive.luma_addr.low_part =
3462 lower_32_bits(afb->address);
3463 address->video_progressive.luma_addr.high_part =
3464 upper_32_bits(afb->address);
3465 address->video_progressive.chroma_addr.low_part =
3466 lower_32_bits(chroma_addr);
3467 address->video_progressive.chroma_addr.high_part =
3468 upper_32_bits(chroma_addr);
3469 }
3470
3471 /* Fill GFX8 params */
3472 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3473 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3474
3475 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3476 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3477 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3478 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3479 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3480
3481 /* XXX fix me for VI */
3482 tiling_info->gfx8.num_banks = num_banks;
3483 tiling_info->gfx8.array_mode =
3484 DC_ARRAY_2D_TILED_THIN1;
3485 tiling_info->gfx8.tile_split = tile_split;
3486 tiling_info->gfx8.bank_width = bankw;
3487 tiling_info->gfx8.bank_height = bankh;
3488 tiling_info->gfx8.tile_aspect = mtaspect;
3489 tiling_info->gfx8.tile_mode =
3490 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3491 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3492 == DC_ARRAY_1D_TILED_THIN1) {
3493 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3494 }
3495
3496 tiling_info->gfx8.pipe_config =
3497 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3498
3499 if (adev->asic_type == CHIP_VEGA10 ||
3500 adev->asic_type == CHIP_VEGA12 ||
3501 adev->asic_type == CHIP_VEGA20 ||
3502 adev->asic_type == CHIP_NAVI10 ||
3503 adev->asic_type == CHIP_NAVI14 ||
3504 adev->asic_type == CHIP_NAVI12 ||
3505 adev->asic_type == CHIP_RENOIR ||
3506 adev->asic_type == CHIP_RAVEN) {
3507 /* Fill GFX9 params */
3508 tiling_info->gfx9.num_pipes =
3509 adev->gfx.config.gb_addr_config_fields.num_pipes;
3510 tiling_info->gfx9.num_banks =
3511 adev->gfx.config.gb_addr_config_fields.num_banks;
3512 tiling_info->gfx9.pipe_interleave =
3513 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3514 tiling_info->gfx9.num_shader_engines =
3515 adev->gfx.config.gb_addr_config_fields.num_se;
3516 tiling_info->gfx9.max_compressed_frags =
3517 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3518 tiling_info->gfx9.num_rb_per_se =
3519 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3520 tiling_info->gfx9.swizzle =
3521 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3522 tiling_info->gfx9.shaderEnable = 1;
3523
3524 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3525 plane_size, tiling_info,
3526 tiling_flags, dcc, address,
3527 force_disable_dcc);
3528 if (ret)
3529 return ret;
3530 }
3531
3532 return 0;
3533 }
3534
3535 static void
3536 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3537 bool *per_pixel_alpha, bool *global_alpha,
3538 int *global_alpha_value)
3539 {
3540 *per_pixel_alpha = false;
3541 *global_alpha = false;
3542 *global_alpha_value = 0xff;
3543
3544 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3545 return;
3546
3547 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3548 static const uint32_t alpha_formats[] = {
3549 DRM_FORMAT_ARGB8888,
3550 DRM_FORMAT_RGBA8888,
3551 DRM_FORMAT_ABGR8888,
3552 };
3553 uint32_t format = plane_state->fb->format->format;
3554 unsigned int i;
3555
3556 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3557 if (format == alpha_formats[i]) {
3558 *per_pixel_alpha = true;
3559 break;
3560 }
3561 }
3562 }
3563
3564 if (plane_state->alpha < 0xffff) {
3565 *global_alpha = true;
3566 *global_alpha_value = plane_state->alpha >> 8;
3567 }
3568 }
3569
3570 static int
3571 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3572 const enum surface_pixel_format format,
3573 enum dc_color_space *color_space)
3574 {
3575 bool full_range;
3576
3577 *color_space = COLOR_SPACE_SRGB;
3578
3579 /* DRM color properties only affect non-RGB formats. */
3580 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3581 return 0;
3582
3583 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3584
3585 switch (plane_state->color_encoding) {
3586 case DRM_COLOR_YCBCR_BT601:
3587 if (full_range)
3588 *color_space = COLOR_SPACE_YCBCR601;
3589 else
3590 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3591 break;
3592
3593 case DRM_COLOR_YCBCR_BT709:
3594 if (full_range)
3595 *color_space = COLOR_SPACE_YCBCR709;
3596 else
3597 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3598 break;
3599
3600 case DRM_COLOR_YCBCR_BT2020:
3601 if (full_range)
3602 *color_space = COLOR_SPACE_2020_YCBCR;
3603 else
3604 return -EINVAL;
3605 break;
3606
3607 default:
3608 return -EINVAL;
3609 }
3610
3611 return 0;
3612 }
3613
3614 static int
3615 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3616 const struct drm_plane_state *plane_state,
3617 const uint64_t tiling_flags,
3618 struct dc_plane_info *plane_info,
3619 struct dc_plane_address *address,
3620 bool tmz_surface,
3621 bool force_disable_dcc)
3622 {
3623 const struct drm_framebuffer *fb = plane_state->fb;
3624 const struct amdgpu_framebuffer *afb =
3625 to_amdgpu_framebuffer(plane_state->fb);
3626 struct drm_format_name_buf format_name;
3627 int ret;
3628
3629 memset(plane_info, 0, sizeof(*plane_info));
3630
3631 switch (fb->format->format) {
3632 case DRM_FORMAT_C8:
3633 plane_info->format =
3634 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3635 break;
3636 case DRM_FORMAT_RGB565:
3637 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3638 break;
3639 case DRM_FORMAT_XRGB8888:
3640 case DRM_FORMAT_ARGB8888:
3641 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3642 break;
3643 case DRM_FORMAT_XRGB2101010:
3644 case DRM_FORMAT_ARGB2101010:
3645 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3646 break;
3647 case DRM_FORMAT_XBGR2101010:
3648 case DRM_FORMAT_ABGR2101010:
3649 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3650 break;
3651 case DRM_FORMAT_XBGR8888:
3652 case DRM_FORMAT_ABGR8888:
3653 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3654 break;
3655 case DRM_FORMAT_NV21:
3656 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3657 break;
3658 case DRM_FORMAT_NV12:
3659 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3660 break;
3661 case DRM_FORMAT_P010:
3662 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3663 break;
3664 default:
3665 DRM_ERROR(
3666 "Unsupported screen format %s\n",
3667 drm_get_format_name(fb->format->format, &format_name));
3668 return -EINVAL;
3669 }
3670
3671 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3672 case DRM_MODE_ROTATE_0:
3673 plane_info->rotation = ROTATION_ANGLE_0;
3674 break;
3675 case DRM_MODE_ROTATE_90:
3676 plane_info->rotation = ROTATION_ANGLE_90;
3677 break;
3678 case DRM_MODE_ROTATE_180:
3679 plane_info->rotation = ROTATION_ANGLE_180;
3680 break;
3681 case DRM_MODE_ROTATE_270:
3682 plane_info->rotation = ROTATION_ANGLE_270;
3683 break;
3684 default:
3685 plane_info->rotation = ROTATION_ANGLE_0;
3686 break;
3687 }
3688
3689 plane_info->visible = true;
3690 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3691
3692 plane_info->layer_index = 0;
3693
3694 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3695 &plane_info->color_space);
3696 if (ret)
3697 return ret;
3698
3699 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3700 plane_info->rotation, tiling_flags,
3701 &plane_info->tiling_info,
3702 &plane_info->plane_size,
3703 &plane_info->dcc, address, tmz_surface,
3704 force_disable_dcc);
3705 if (ret)
3706 return ret;
3707
3708 fill_blending_from_plane_state(
3709 plane_state, &plane_info->per_pixel_alpha,
3710 &plane_info->global_alpha, &plane_info->global_alpha_value);
3711
3712 return 0;
3713 }
3714
3715 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3716 struct dc_plane_state *dc_plane_state,
3717 struct drm_plane_state *plane_state,
3718 struct drm_crtc_state *crtc_state)
3719 {
3720 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3721 const struct amdgpu_framebuffer *amdgpu_fb =
3722 to_amdgpu_framebuffer(plane_state->fb);
3723 struct dc_scaling_info scaling_info;
3724 struct dc_plane_info plane_info;
3725 uint64_t tiling_flags;
3726 int ret;
3727 bool tmz_surface = false;
3728 bool force_disable_dcc = false;
3729
3730 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3731 if (ret)
3732 return ret;
3733
3734 dc_plane_state->src_rect = scaling_info.src_rect;
3735 dc_plane_state->dst_rect = scaling_info.dst_rect;
3736 dc_plane_state->clip_rect = scaling_info.clip_rect;
3737 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3738
3739 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3740 if (ret)
3741 return ret;
3742
3743 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3744 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3745 &plane_info,
3746 &dc_plane_state->address,
3747 tmz_surface,
3748 force_disable_dcc);
3749 if (ret)
3750 return ret;
3751
3752 dc_plane_state->format = plane_info.format;
3753 dc_plane_state->color_space = plane_info.color_space;
3754 dc_plane_state->format = plane_info.format;
3755 dc_plane_state->plane_size = plane_info.plane_size;
3756 dc_plane_state->rotation = plane_info.rotation;
3757 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3758 dc_plane_state->stereo_format = plane_info.stereo_format;
3759 dc_plane_state->tiling_info = plane_info.tiling_info;
3760 dc_plane_state->visible = plane_info.visible;
3761 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3762 dc_plane_state->global_alpha = plane_info.global_alpha;
3763 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3764 dc_plane_state->dcc = plane_info.dcc;
3765 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3766
3767 /*
3768 * Always set input transfer function, since plane state is refreshed
3769 * every time.
3770 */
3771 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3772 if (ret)
3773 return ret;
3774
3775 return 0;
3776 }
3777
3778 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3779 const struct dm_connector_state *dm_state,
3780 struct dc_stream_state *stream)
3781 {
3782 enum amdgpu_rmx_type rmx_type;
3783
3784 struct rect src = { 0 }; /* viewport in composition space*/
3785 struct rect dst = { 0 }; /* stream addressable area */
3786
3787 /* no mode. nothing to be done */
3788 if (!mode)
3789 return;
3790
3791 /* Full screen scaling by default */
3792 src.width = mode->hdisplay;
3793 src.height = mode->vdisplay;
3794 dst.width = stream->timing.h_addressable;
3795 dst.height = stream->timing.v_addressable;
3796
3797 if (dm_state) {
3798 rmx_type = dm_state->scaling;
3799 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3800 if (src.width * dst.height <
3801 src.height * dst.width) {
3802 /* height needs less upscaling/more downscaling */
3803 dst.width = src.width *
3804 dst.height / src.height;
3805 } else {
3806 /* width needs less upscaling/more downscaling */
3807 dst.height = src.height *
3808 dst.width / src.width;
3809 }
3810 } else if (rmx_type == RMX_CENTER) {
3811 dst = src;
3812 }
3813
3814 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3815 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3816
3817 if (dm_state->underscan_enable) {
3818 dst.x += dm_state->underscan_hborder / 2;
3819 dst.y += dm_state->underscan_vborder / 2;
3820 dst.width -= dm_state->underscan_hborder;
3821 dst.height -= dm_state->underscan_vborder;
3822 }
3823 }
3824
3825 stream->src = src;
3826 stream->dst = dst;
3827
3828 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3829 dst.x, dst.y, dst.width, dst.height);
3830
3831 }
3832
3833 static enum dc_color_depth
3834 convert_color_depth_from_display_info(const struct drm_connector *connector,
3835 const struct drm_connector_state *state,
3836 bool is_y420)
3837 {
3838 uint8_t bpc;
3839
3840 if (is_y420) {
3841 bpc = 8;
3842
3843 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3844 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3845 bpc = 16;
3846 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3847 bpc = 12;
3848 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3849 bpc = 10;
3850 } else {
3851 bpc = (uint8_t)connector->display_info.bpc;
3852 /* Assume 8 bpc by default if no bpc is specified. */
3853 bpc = bpc ? bpc : 8;
3854 }
3855
3856 if (!state)
3857 state = connector->state;
3858
3859 if (state) {
3860 /*
3861 * Cap display bpc based on the user requested value.
3862 *
3863 * The value for state->max_bpc may not correctly updated
3864 * depending on when the connector gets added to the state
3865 * or if this was called outside of atomic check, so it
3866 * can't be used directly.
3867 */
3868 bpc = min(bpc, state->max_requested_bpc);
3869
3870 /* Round down to the nearest even number. */
3871 bpc = bpc - (bpc & 1);
3872 }
3873
3874 switch (bpc) {
3875 case 0:
3876 /*
3877 * Temporary Work around, DRM doesn't parse color depth for
3878 * EDID revision before 1.4
3879 * TODO: Fix edid parsing
3880 */
3881 return COLOR_DEPTH_888;
3882 case 6:
3883 return COLOR_DEPTH_666;
3884 case 8:
3885 return COLOR_DEPTH_888;
3886 case 10:
3887 return COLOR_DEPTH_101010;
3888 case 12:
3889 return COLOR_DEPTH_121212;
3890 case 14:
3891 return COLOR_DEPTH_141414;
3892 case 16:
3893 return COLOR_DEPTH_161616;
3894 default:
3895 return COLOR_DEPTH_UNDEFINED;
3896 }
3897 }
3898
3899 static enum dc_aspect_ratio
3900 get_aspect_ratio(const struct drm_display_mode *mode_in)
3901 {
3902 /* 1-1 mapping, since both enums follow the HDMI spec. */
3903 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3904 }
3905
3906 static enum dc_color_space
3907 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3908 {
3909 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3910
3911 switch (dc_crtc_timing->pixel_encoding) {
3912 case PIXEL_ENCODING_YCBCR422:
3913 case PIXEL_ENCODING_YCBCR444:
3914 case PIXEL_ENCODING_YCBCR420:
3915 {
3916 /*
3917 * 27030khz is the separation point between HDTV and SDTV
3918 * according to HDMI spec, we use YCbCr709 and YCbCr601
3919 * respectively
3920 */
3921 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3922 if (dc_crtc_timing->flags.Y_ONLY)
3923 color_space =
3924 COLOR_SPACE_YCBCR709_LIMITED;
3925 else
3926 color_space = COLOR_SPACE_YCBCR709;
3927 } else {
3928 if (dc_crtc_timing->flags.Y_ONLY)
3929 color_space =
3930 COLOR_SPACE_YCBCR601_LIMITED;
3931 else
3932 color_space = COLOR_SPACE_YCBCR601;
3933 }
3934
3935 }
3936 break;
3937 case PIXEL_ENCODING_RGB:
3938 color_space = COLOR_SPACE_SRGB;
3939 break;
3940
3941 default:
3942 WARN_ON(1);
3943 break;
3944 }
3945
3946 return color_space;
3947 }
3948
3949 static bool adjust_colour_depth_from_display_info(
3950 struct dc_crtc_timing *timing_out,
3951 const struct drm_display_info *info)
3952 {
3953 enum dc_color_depth depth = timing_out->display_color_depth;
3954 int normalized_clk;
3955 do {
3956 normalized_clk = timing_out->pix_clk_100hz / 10;
3957 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3958 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3959 normalized_clk /= 2;
3960 /* Adjusting pix clock following on HDMI spec based on colour depth */
3961 switch (depth) {
3962 case COLOR_DEPTH_888:
3963 break;
3964 case COLOR_DEPTH_101010:
3965 normalized_clk = (normalized_clk * 30) / 24;
3966 break;
3967 case COLOR_DEPTH_121212:
3968 normalized_clk = (normalized_clk * 36) / 24;
3969 break;
3970 case COLOR_DEPTH_161616:
3971 normalized_clk = (normalized_clk * 48) / 24;
3972 break;
3973 default:
3974 /* The above depths are the only ones valid for HDMI. */
3975 return false;
3976 }
3977 if (normalized_clk <= info->max_tmds_clock) {
3978 timing_out->display_color_depth = depth;
3979 return true;
3980 }
3981 } while (--depth > COLOR_DEPTH_666);
3982 return false;
3983 }
3984
3985 static void fill_stream_properties_from_drm_display_mode(
3986 struct dc_stream_state *stream,
3987 const struct drm_display_mode *mode_in,
3988 const struct drm_connector *connector,
3989 const struct drm_connector_state *connector_state,
3990 const struct dc_stream_state *old_stream)
3991 {
3992 struct dc_crtc_timing *timing_out = &stream->timing;
3993 const struct drm_display_info *info = &connector->display_info;
3994 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3995 struct hdmi_vendor_infoframe hv_frame;
3996 struct hdmi_avi_infoframe avi_frame;
3997
3998 memset(&hv_frame, 0, sizeof(hv_frame));
3999 memset(&avi_frame, 0, sizeof(avi_frame));
4000
4001 timing_out->h_border_left = 0;
4002 timing_out->h_border_right = 0;
4003 timing_out->v_border_top = 0;
4004 timing_out->v_border_bottom = 0;
4005 /* TODO: un-hardcode */
4006 if (drm_mode_is_420_only(info, mode_in)
4007 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4008 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4009 else if (drm_mode_is_420_also(info, mode_in)
4010 && aconnector->force_yuv420_output)
4011 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4012 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4013 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4014 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4015 else
4016 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4017
4018 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4019 timing_out->display_color_depth = convert_color_depth_from_display_info(
4020 connector, connector_state,
4021 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
4022 timing_out->scan_type = SCANNING_TYPE_NODATA;
4023 timing_out->hdmi_vic = 0;
4024
4025 if(old_stream) {
4026 timing_out->vic = old_stream->timing.vic;
4027 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4028 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4029 } else {
4030 timing_out->vic = drm_match_cea_mode(mode_in);
4031 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4032 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4033 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4034 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4035 }
4036
4037 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4038 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4039 timing_out->vic = avi_frame.video_code;
4040 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4041 timing_out->hdmi_vic = hv_frame.vic;
4042 }
4043
4044 timing_out->h_addressable = mode_in->crtc_hdisplay;
4045 timing_out->h_total = mode_in->crtc_htotal;
4046 timing_out->h_sync_width =
4047 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4048 timing_out->h_front_porch =
4049 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4050 timing_out->v_total = mode_in->crtc_vtotal;
4051 timing_out->v_addressable = mode_in->crtc_vdisplay;
4052 timing_out->v_front_porch =
4053 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4054 timing_out->v_sync_width =
4055 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4056 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4057 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4058
4059 stream->output_color_space = get_output_color_space(timing_out);
4060
4061 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4062 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4063 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4064 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4065 drm_mode_is_420_also(info, mode_in) &&
4066 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4067 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4068 adjust_colour_depth_from_display_info(timing_out, info);
4069 }
4070 }
4071 }
4072
4073 static void fill_audio_info(struct audio_info *audio_info,
4074 const struct drm_connector *drm_connector,
4075 const struct dc_sink *dc_sink)
4076 {
4077 int i = 0;
4078 int cea_revision = 0;
4079 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4080
4081 audio_info->manufacture_id = edid_caps->manufacturer_id;
4082 audio_info->product_id = edid_caps->product_id;
4083
4084 cea_revision = drm_connector->display_info.cea_rev;
4085
4086 strscpy(audio_info->display_name,
4087 edid_caps->display_name,
4088 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4089
4090 if (cea_revision >= 3) {
4091 audio_info->mode_count = edid_caps->audio_mode_count;
4092
4093 for (i = 0; i < audio_info->mode_count; ++i) {
4094 audio_info->modes[i].format_code =
4095 (enum audio_format_code)
4096 (edid_caps->audio_modes[i].format_code);
4097 audio_info->modes[i].channel_count =
4098 edid_caps->audio_modes[i].channel_count;
4099 audio_info->modes[i].sample_rates.all =
4100 edid_caps->audio_modes[i].sample_rate;
4101 audio_info->modes[i].sample_size =
4102 edid_caps->audio_modes[i].sample_size;
4103 }
4104 }
4105
4106 audio_info->flags.all = edid_caps->speaker_flags;
4107
4108 /* TODO: We only check for the progressive mode, check for interlace mode too */
4109 if (drm_connector->latency_present[0]) {
4110 audio_info->video_latency = drm_connector->video_latency[0];
4111 audio_info->audio_latency = drm_connector->audio_latency[0];
4112 }
4113
4114 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4115
4116 }
4117
4118 static void
4119 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4120 struct drm_display_mode *dst_mode)
4121 {
4122 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4123 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4124 dst_mode->crtc_clock = src_mode->crtc_clock;
4125 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4126 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4127 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4128 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4129 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4130 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4131 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4132 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4133 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4134 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4135 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4136 }
4137
4138 static void
4139 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4140 const struct drm_display_mode *native_mode,
4141 bool scale_enabled)
4142 {
4143 if (scale_enabled) {
4144 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4145 } else if (native_mode->clock == drm_mode->clock &&
4146 native_mode->htotal == drm_mode->htotal &&
4147 native_mode->vtotal == drm_mode->vtotal) {
4148 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4149 } else {
4150 /* no scaling nor amdgpu inserted, no need to patch */
4151 }
4152 }
4153
4154 static struct dc_sink *
4155 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4156 {
4157 struct dc_sink_init_data sink_init_data = { 0 };
4158 struct dc_sink *sink = NULL;
4159 sink_init_data.link = aconnector->dc_link;
4160 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4161
4162 sink = dc_sink_create(&sink_init_data);
4163 if (!sink) {
4164 DRM_ERROR("Failed to create sink!\n");
4165 return NULL;
4166 }
4167 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4168
4169 return sink;
4170 }
4171
4172 static void set_multisync_trigger_params(
4173 struct dc_stream_state *stream)
4174 {
4175 if (stream->triggered_crtc_reset.enabled) {
4176 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4177 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4178 }
4179 }
4180
4181 static void set_master_stream(struct dc_stream_state *stream_set[],
4182 int stream_count)
4183 {
4184 int j, highest_rfr = 0, master_stream = 0;
4185
4186 for (j = 0; j < stream_count; j++) {
4187 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4188 int refresh_rate = 0;
4189
4190 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4191 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4192 if (refresh_rate > highest_rfr) {
4193 highest_rfr = refresh_rate;
4194 master_stream = j;
4195 }
4196 }
4197 }
4198 for (j = 0; j < stream_count; j++) {
4199 if (stream_set[j])
4200 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4201 }
4202 }
4203
4204 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4205 {
4206 int i = 0;
4207
4208 if (context->stream_count < 2)
4209 return;
4210 for (i = 0; i < context->stream_count ; i++) {
4211 if (!context->streams[i])
4212 continue;
4213 /*
4214 * TODO: add a function to read AMD VSDB bits and set
4215 * crtc_sync_master.multi_sync_enabled flag
4216 * For now it's set to false
4217 */
4218 set_multisync_trigger_params(context->streams[i]);
4219 }
4220 set_master_stream(context->streams, context->stream_count);
4221 }
4222
4223 static struct dc_stream_state *
4224 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4225 const struct drm_display_mode *drm_mode,
4226 const struct dm_connector_state *dm_state,
4227 const struct dc_stream_state *old_stream)
4228 {
4229 struct drm_display_mode *preferred_mode = NULL;
4230 struct drm_connector *drm_connector;
4231 const struct drm_connector_state *con_state =
4232 dm_state ? &dm_state->base : NULL;
4233 struct dc_stream_state *stream = NULL;
4234 struct drm_display_mode mode = *drm_mode;
4235 bool native_mode_found = false;
4236 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4237 int mode_refresh;
4238 int preferred_refresh = 0;
4239 #if defined(CONFIG_DRM_AMD_DC_DCN)
4240 struct dsc_dec_dpcd_caps dsc_caps;
4241 #endif
4242 uint32_t link_bandwidth_kbps;
4243
4244 struct dc_sink *sink = NULL;
4245 if (aconnector == NULL) {
4246 DRM_ERROR("aconnector is NULL!\n");
4247 return stream;
4248 }
4249
4250 drm_connector = &aconnector->base;
4251
4252 if (!aconnector->dc_sink) {
4253 sink = create_fake_sink(aconnector);
4254 if (!sink)
4255 return stream;
4256 } else {
4257 sink = aconnector->dc_sink;
4258 dc_sink_retain(sink);
4259 }
4260
4261 stream = dc_create_stream_for_sink(sink);
4262
4263 if (stream == NULL) {
4264 DRM_ERROR("Failed to create stream for sink!\n");
4265 goto finish;
4266 }
4267
4268 stream->dm_stream_context = aconnector;
4269
4270 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4271 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4272
4273 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4274 /* Search for preferred mode */
4275 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4276 native_mode_found = true;
4277 break;
4278 }
4279 }
4280 if (!native_mode_found)
4281 preferred_mode = list_first_entry_or_null(
4282 &aconnector->base.modes,
4283 struct drm_display_mode,
4284 head);
4285
4286 mode_refresh = drm_mode_vrefresh(&mode);
4287
4288 if (preferred_mode == NULL) {
4289 /*
4290 * This may not be an error, the use case is when we have no
4291 * usermode calls to reset and set mode upon hotplug. In this
4292 * case, we call set mode ourselves to restore the previous mode
4293 * and the modelist may not be filled in in time.
4294 */
4295 DRM_DEBUG_DRIVER("No preferred mode found\n");
4296 } else {
4297 decide_crtc_timing_for_drm_display_mode(
4298 &mode, preferred_mode,
4299 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4300 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4301 }
4302
4303 if (!dm_state)
4304 drm_mode_set_crtcinfo(&mode, 0);
4305
4306 /*
4307 * If scaling is enabled and refresh rate didn't change
4308 * we copy the vic and polarities of the old timings
4309 */
4310 if (!scale || mode_refresh != preferred_refresh)
4311 fill_stream_properties_from_drm_display_mode(stream,
4312 &mode, &aconnector->base, con_state, NULL);
4313 else
4314 fill_stream_properties_from_drm_display_mode(stream,
4315 &mode, &aconnector->base, con_state, old_stream);
4316
4317 stream->timing.flags.DSC = 0;
4318
4319 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4320 #if defined(CONFIG_DRM_AMD_DC_DCN)
4321 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4322 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4323 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4324 &dsc_caps);
4325 #endif
4326 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4327 dc_link_get_link_cap(aconnector->dc_link));
4328
4329 #if defined(CONFIG_DRM_AMD_DC_DCN)
4330 if (dsc_caps.is_dsc_supported)
4331 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4332 &dsc_caps,
4333 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4334 link_bandwidth_kbps,
4335 &stream->timing,
4336 &stream->timing.dsc_cfg))
4337 stream->timing.flags.DSC = 1;
4338 #endif
4339 }
4340
4341 update_stream_scaling_settings(&mode, dm_state, stream);
4342
4343 fill_audio_info(
4344 &stream->audio_info,
4345 drm_connector,
4346 sink);
4347
4348 update_stream_signal(stream, sink);
4349
4350 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4351 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4352 if (stream->link->psr_settings.psr_feature_enabled) {
4353 struct dc *core_dc = stream->link->ctx->dc;
4354
4355 if (dc_is_dmcu_initialized(core_dc)) {
4356 //
4357 // should decide stream support vsc sdp colorimetry capability
4358 // before building vsc info packet
4359 //
4360 stream->use_vsc_sdp_for_colorimetry = false;
4361 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4362 stream->use_vsc_sdp_for_colorimetry =
4363 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4364 } else {
4365 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4366 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4367 stream->use_vsc_sdp_for_colorimetry = true;
4368 }
4369 }
4370 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4371 }
4372 }
4373 finish:
4374 dc_sink_release(sink);
4375
4376 return stream;
4377 }
4378
4379 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4380 {
4381 drm_crtc_cleanup(crtc);
4382 kfree(crtc);
4383 }
4384
4385 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4386 struct drm_crtc_state *state)
4387 {
4388 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4389
4390 /* TODO Destroy dc_stream objects are stream object is flattened */
4391 if (cur->stream)
4392 dc_stream_release(cur->stream);
4393
4394
4395 __drm_atomic_helper_crtc_destroy_state(state);
4396
4397
4398 kfree(state);
4399 }
4400
4401 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4402 {
4403 struct dm_crtc_state *state;
4404
4405 if (crtc->state)
4406 dm_crtc_destroy_state(crtc, crtc->state);
4407
4408 state = kzalloc(sizeof(*state), GFP_KERNEL);
4409 if (WARN_ON(!state))
4410 return;
4411
4412 crtc->state = &state->base;
4413 crtc->state->crtc = crtc;
4414
4415 }
4416
4417 static struct drm_crtc_state *
4418 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4419 {
4420 struct dm_crtc_state *state, *cur;
4421
4422 cur = to_dm_crtc_state(crtc->state);
4423
4424 if (WARN_ON(!crtc->state))
4425 return NULL;
4426
4427 state = kzalloc(sizeof(*state), GFP_KERNEL);
4428 if (!state)
4429 return NULL;
4430
4431 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4432
4433 if (cur->stream) {
4434 state->stream = cur->stream;
4435 dc_stream_retain(state->stream);
4436 }
4437
4438 state->active_planes = cur->active_planes;
4439 state->interrupts_enabled = cur->interrupts_enabled;
4440 state->vrr_params = cur->vrr_params;
4441 state->vrr_infopacket = cur->vrr_infopacket;
4442 state->abm_level = cur->abm_level;
4443 state->vrr_supported = cur->vrr_supported;
4444 state->freesync_config = cur->freesync_config;
4445 state->crc_src = cur->crc_src;
4446 state->cm_has_degamma = cur->cm_has_degamma;
4447 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4448
4449 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4450
4451 return &state->base;
4452 }
4453
4454 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4455 {
4456 enum dc_irq_source irq_source;
4457 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4458 struct amdgpu_device *adev = crtc->dev->dev_private;
4459 int rc;
4460
4461 /* Do not set vupdate for DCN hardware */
4462 if (adev->family > AMDGPU_FAMILY_AI)
4463 return 0;
4464
4465 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4466
4467 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4468
4469 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4470 acrtc->crtc_id, enable ? "en" : "dis", rc);
4471 return rc;
4472 }
4473
4474 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4475 {
4476 enum dc_irq_source irq_source;
4477 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4478 struct amdgpu_device *adev = crtc->dev->dev_private;
4479 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4480 int rc = 0;
4481
4482 if (enable) {
4483 /* vblank irq on -> Only need vupdate irq in vrr mode */
4484 if (amdgpu_dm_vrr_active(acrtc_state))
4485 rc = dm_set_vupdate_irq(crtc, true);
4486 } else {
4487 /* vblank irq off -> vupdate irq off */
4488 rc = dm_set_vupdate_irq(crtc, false);
4489 }
4490
4491 if (rc)
4492 return rc;
4493
4494 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4495 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4496 }
4497
4498 static int dm_enable_vblank(struct drm_crtc *crtc)
4499 {
4500 return dm_set_vblank(crtc, true);
4501 }
4502
4503 static void dm_disable_vblank(struct drm_crtc *crtc)
4504 {
4505 dm_set_vblank(crtc, false);
4506 }
4507
4508 /* Implemented only the options currently availible for the driver */
4509 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4510 .reset = dm_crtc_reset_state,
4511 .destroy = amdgpu_dm_crtc_destroy,
4512 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4513 .set_config = drm_atomic_helper_set_config,
4514 .page_flip = drm_atomic_helper_page_flip,
4515 .atomic_duplicate_state = dm_crtc_duplicate_state,
4516 .atomic_destroy_state = dm_crtc_destroy_state,
4517 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4518 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4519 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4520 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4521 .enable_vblank = dm_enable_vblank,
4522 .disable_vblank = dm_disable_vblank,
4523 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4524 };
4525
4526 static enum drm_connector_status
4527 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4528 {
4529 bool connected;
4530 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4531
4532 /*
4533 * Notes:
4534 * 1. This interface is NOT called in context of HPD irq.
4535 * 2. This interface *is called* in context of user-mode ioctl. Which
4536 * makes it a bad place for *any* MST-related activity.
4537 */
4538
4539 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4540 !aconnector->fake_enable)
4541 connected = (aconnector->dc_sink != NULL);
4542 else
4543 connected = (aconnector->base.force == DRM_FORCE_ON);
4544
4545 return (connected ? connector_status_connected :
4546 connector_status_disconnected);
4547 }
4548
4549 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4550 struct drm_connector_state *connector_state,
4551 struct drm_property *property,
4552 uint64_t val)
4553 {
4554 struct drm_device *dev = connector->dev;
4555 struct amdgpu_device *adev = dev->dev_private;
4556 struct dm_connector_state *dm_old_state =
4557 to_dm_connector_state(connector->state);
4558 struct dm_connector_state *dm_new_state =
4559 to_dm_connector_state(connector_state);
4560
4561 int ret = -EINVAL;
4562
4563 if (property == dev->mode_config.scaling_mode_property) {
4564 enum amdgpu_rmx_type rmx_type;
4565
4566 switch (val) {
4567 case DRM_MODE_SCALE_CENTER:
4568 rmx_type = RMX_CENTER;
4569 break;
4570 case DRM_MODE_SCALE_ASPECT:
4571 rmx_type = RMX_ASPECT;
4572 break;
4573 case DRM_MODE_SCALE_FULLSCREEN:
4574 rmx_type = RMX_FULL;
4575 break;
4576 case DRM_MODE_SCALE_NONE:
4577 default:
4578 rmx_type = RMX_OFF;
4579 break;
4580 }
4581
4582 if (dm_old_state->scaling == rmx_type)
4583 return 0;
4584
4585 dm_new_state->scaling = rmx_type;
4586 ret = 0;
4587 } else if (property == adev->mode_info.underscan_hborder_property) {
4588 dm_new_state->underscan_hborder = val;
4589 ret = 0;
4590 } else if (property == adev->mode_info.underscan_vborder_property) {
4591 dm_new_state->underscan_vborder = val;
4592 ret = 0;
4593 } else if (property == adev->mode_info.underscan_property) {
4594 dm_new_state->underscan_enable = val;
4595 ret = 0;
4596 } else if (property == adev->mode_info.abm_level_property) {
4597 dm_new_state->abm_level = val;
4598 ret = 0;
4599 }
4600
4601 return ret;
4602 }
4603
4604 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4605 const struct drm_connector_state *state,
4606 struct drm_property *property,
4607 uint64_t *val)
4608 {
4609 struct drm_device *dev = connector->dev;
4610 struct amdgpu_device *adev = dev->dev_private;
4611 struct dm_connector_state *dm_state =
4612 to_dm_connector_state(state);
4613 int ret = -EINVAL;
4614
4615 if (property == dev->mode_config.scaling_mode_property) {
4616 switch (dm_state->scaling) {
4617 case RMX_CENTER:
4618 *val = DRM_MODE_SCALE_CENTER;
4619 break;
4620 case RMX_ASPECT:
4621 *val = DRM_MODE_SCALE_ASPECT;
4622 break;
4623 case RMX_FULL:
4624 *val = DRM_MODE_SCALE_FULLSCREEN;
4625 break;
4626 case RMX_OFF:
4627 default:
4628 *val = DRM_MODE_SCALE_NONE;
4629 break;
4630 }
4631 ret = 0;
4632 } else if (property == adev->mode_info.underscan_hborder_property) {
4633 *val = dm_state->underscan_hborder;
4634 ret = 0;
4635 } else if (property == adev->mode_info.underscan_vborder_property) {
4636 *val = dm_state->underscan_vborder;
4637 ret = 0;
4638 } else if (property == adev->mode_info.underscan_property) {
4639 *val = dm_state->underscan_enable;
4640 ret = 0;
4641 } else if (property == adev->mode_info.abm_level_property) {
4642 *val = dm_state->abm_level;
4643 ret = 0;
4644 }
4645
4646 return ret;
4647 }
4648
4649 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4650 {
4651 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4652
4653 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4654 }
4655
4656 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4657 {
4658 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4659 const struct dc_link *link = aconnector->dc_link;
4660 struct amdgpu_device *adev = connector->dev->dev_private;
4661 struct amdgpu_display_manager *dm = &adev->dm;
4662
4663 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4664 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4665
4666 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4667 link->type != dc_connection_none &&
4668 dm->backlight_dev) {
4669 backlight_device_unregister(dm->backlight_dev);
4670 dm->backlight_dev = NULL;
4671 }
4672 #endif
4673
4674 if (aconnector->dc_em_sink)
4675 dc_sink_release(aconnector->dc_em_sink);
4676 aconnector->dc_em_sink = NULL;
4677 if (aconnector->dc_sink)
4678 dc_sink_release(aconnector->dc_sink);
4679 aconnector->dc_sink = NULL;
4680
4681 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4682 drm_connector_unregister(connector);
4683 drm_connector_cleanup(connector);
4684 if (aconnector->i2c) {
4685 i2c_del_adapter(&aconnector->i2c->base);
4686 kfree(aconnector->i2c);
4687 }
4688 kfree(aconnector->dm_dp_aux.aux.name);
4689
4690 kfree(connector);
4691 }
4692
4693 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4694 {
4695 struct dm_connector_state *state =
4696 to_dm_connector_state(connector->state);
4697
4698 if (connector->state)
4699 __drm_atomic_helper_connector_destroy_state(connector->state);
4700
4701 kfree(state);
4702
4703 state = kzalloc(sizeof(*state), GFP_KERNEL);
4704
4705 if (state) {
4706 state->scaling = RMX_OFF;
4707 state->underscan_enable = false;
4708 state->underscan_hborder = 0;
4709 state->underscan_vborder = 0;
4710 state->base.max_requested_bpc = 8;
4711 state->vcpi_slots = 0;
4712 state->pbn = 0;
4713 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4714 state->abm_level = amdgpu_dm_abm_level;
4715
4716 __drm_atomic_helper_connector_reset(connector, &state->base);
4717 }
4718 }
4719
4720 struct drm_connector_state *
4721 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4722 {
4723 struct dm_connector_state *state =
4724 to_dm_connector_state(connector->state);
4725
4726 struct dm_connector_state *new_state =
4727 kmemdup(state, sizeof(*state), GFP_KERNEL);
4728
4729 if (!new_state)
4730 return NULL;
4731
4732 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4733
4734 new_state->freesync_capable = state->freesync_capable;
4735 new_state->abm_level = state->abm_level;
4736 new_state->scaling = state->scaling;
4737 new_state->underscan_enable = state->underscan_enable;
4738 new_state->underscan_hborder = state->underscan_hborder;
4739 new_state->underscan_vborder = state->underscan_vborder;
4740 new_state->vcpi_slots = state->vcpi_slots;
4741 new_state->pbn = state->pbn;
4742 return &new_state->base;
4743 }
4744
4745 static int
4746 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4747 {
4748 #if defined(CONFIG_DEBUG_FS)
4749 struct amdgpu_dm_connector *amdgpu_dm_connector =
4750 to_amdgpu_dm_connector(connector);
4751 int r;
4752
4753 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4754 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4755 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4756 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4757 if (r)
4758 return r;
4759 }
4760
4761 connector_debugfs_init(amdgpu_dm_connector);
4762 #endif
4763
4764 return 0;
4765 }
4766
4767 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4768 .reset = amdgpu_dm_connector_funcs_reset,
4769 .detect = amdgpu_dm_connector_detect,
4770 .fill_modes = drm_helper_probe_single_connector_modes,
4771 .destroy = amdgpu_dm_connector_destroy,
4772 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4773 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4774 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4775 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4776 .late_register = amdgpu_dm_connector_late_register,
4777 .early_unregister = amdgpu_dm_connector_unregister
4778 };
4779
4780 static int get_modes(struct drm_connector *connector)
4781 {
4782 return amdgpu_dm_connector_get_modes(connector);
4783 }
4784
4785 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4786 {
4787 struct dc_sink_init_data init_params = {
4788 .link = aconnector->dc_link,
4789 .sink_signal = SIGNAL_TYPE_VIRTUAL
4790 };
4791 struct edid *edid;
4792
4793 if (!aconnector->base.edid_blob_ptr) {
4794 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4795 aconnector->base.name);
4796
4797 aconnector->base.force = DRM_FORCE_OFF;
4798 aconnector->base.override_edid = false;
4799 return;
4800 }
4801
4802 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4803
4804 aconnector->edid = edid;
4805
4806 aconnector->dc_em_sink = dc_link_add_remote_sink(
4807 aconnector->dc_link,
4808 (uint8_t *)edid,
4809 (edid->extensions + 1) * EDID_LENGTH,
4810 &init_params);
4811
4812 if (aconnector->base.force == DRM_FORCE_ON) {
4813 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4814 aconnector->dc_link->local_sink :
4815 aconnector->dc_em_sink;
4816 dc_sink_retain(aconnector->dc_sink);
4817 }
4818 }
4819
4820 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4821 {
4822 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4823
4824 /*
4825 * In case of headless boot with force on for DP managed connector
4826 * Those settings have to be != 0 to get initial modeset
4827 */
4828 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4829 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4830 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4831 }
4832
4833
4834 aconnector->base.override_edid = true;
4835 create_eml_sink(aconnector);
4836 }
4837
4838 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4839 struct drm_display_mode *mode)
4840 {
4841 int result = MODE_ERROR;
4842 struct dc_sink *dc_sink;
4843 struct amdgpu_device *adev = connector->dev->dev_private;
4844 /* TODO: Unhardcode stream count */
4845 struct dc_stream_state *stream;
4846 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4847 enum dc_status dc_result = DC_OK;
4848
4849 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4850 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4851 return result;
4852
4853 /*
4854 * Only run this the first time mode_valid is called to initilialize
4855 * EDID mgmt
4856 */
4857 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4858 !aconnector->dc_em_sink)
4859 handle_edid_mgmt(aconnector);
4860
4861 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4862
4863 if (dc_sink == NULL) {
4864 DRM_ERROR("dc_sink is NULL!\n");
4865 goto fail;
4866 }
4867
4868 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4869 if (stream == NULL) {
4870 DRM_ERROR("Failed to create stream for sink!\n");
4871 goto fail;
4872 }
4873
4874 dc_result = dc_validate_stream(adev->dm.dc, stream);
4875
4876 if (dc_result == DC_OK)
4877 result = MODE_OK;
4878 else
4879 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4880 mode->hdisplay,
4881 mode->vdisplay,
4882 mode->clock,
4883 dc_result);
4884
4885 dc_stream_release(stream);
4886
4887 fail:
4888 /* TODO: error handling*/
4889 return result;
4890 }
4891
4892 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4893 struct dc_info_packet *out)
4894 {
4895 struct hdmi_drm_infoframe frame;
4896 unsigned char buf[30]; /* 26 + 4 */
4897 ssize_t len;
4898 int ret, i;
4899
4900 memset(out, 0, sizeof(*out));
4901
4902 if (!state->hdr_output_metadata)
4903 return 0;
4904
4905 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4906 if (ret)
4907 return ret;
4908
4909 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4910 if (len < 0)
4911 return (int)len;
4912
4913 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4914 if (len != 30)
4915 return -EINVAL;
4916
4917 /* Prepare the infopacket for DC. */
4918 switch (state->connector->connector_type) {
4919 case DRM_MODE_CONNECTOR_HDMIA:
4920 out->hb0 = 0x87; /* type */
4921 out->hb1 = 0x01; /* version */
4922 out->hb2 = 0x1A; /* length */
4923 out->sb[0] = buf[3]; /* checksum */
4924 i = 1;
4925 break;
4926
4927 case DRM_MODE_CONNECTOR_DisplayPort:
4928 case DRM_MODE_CONNECTOR_eDP:
4929 out->hb0 = 0x00; /* sdp id, zero */
4930 out->hb1 = 0x87; /* type */
4931 out->hb2 = 0x1D; /* payload len - 1 */
4932 out->hb3 = (0x13 << 2); /* sdp version */
4933 out->sb[0] = 0x01; /* version */
4934 out->sb[1] = 0x1A; /* length */
4935 i = 2;
4936 break;
4937
4938 default:
4939 return -EINVAL;
4940 }
4941
4942 memcpy(&out->sb[i], &buf[4], 26);
4943 out->valid = true;
4944
4945 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4946 sizeof(out->sb), false);
4947
4948 return 0;
4949 }
4950
4951 static bool
4952 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4953 const struct drm_connector_state *new_state)
4954 {
4955 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4956 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4957
4958 if (old_blob != new_blob) {
4959 if (old_blob && new_blob &&
4960 old_blob->length == new_blob->length)
4961 return memcmp(old_blob->data, new_blob->data,
4962 old_blob->length);
4963
4964 return true;
4965 }
4966
4967 return false;
4968 }
4969
4970 static int
4971 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4972 struct drm_atomic_state *state)
4973 {
4974 struct drm_connector_state *new_con_state =
4975 drm_atomic_get_new_connector_state(state, conn);
4976 struct drm_connector_state *old_con_state =
4977 drm_atomic_get_old_connector_state(state, conn);
4978 struct drm_crtc *crtc = new_con_state->crtc;
4979 struct drm_crtc_state *new_crtc_state;
4980 int ret;
4981
4982 if (!crtc)
4983 return 0;
4984
4985 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4986 struct dc_info_packet hdr_infopacket;
4987
4988 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4989 if (ret)
4990 return ret;
4991
4992 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4993 if (IS_ERR(new_crtc_state))
4994 return PTR_ERR(new_crtc_state);
4995
4996 /*
4997 * DC considers the stream backends changed if the
4998 * static metadata changes. Forcing the modeset also
4999 * gives a simple way for userspace to switch from
5000 * 8bpc to 10bpc when setting the metadata to enter
5001 * or exit HDR.
5002 *
5003 * Changing the static metadata after it's been
5004 * set is permissible, however. So only force a
5005 * modeset if we're entering or exiting HDR.
5006 */
5007 new_crtc_state->mode_changed =
5008 !old_con_state->hdr_output_metadata ||
5009 !new_con_state->hdr_output_metadata;
5010 }
5011
5012 return 0;
5013 }
5014
5015 static const struct drm_connector_helper_funcs
5016 amdgpu_dm_connector_helper_funcs = {
5017 /*
5018 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5019 * modes will be filtered by drm_mode_validate_size(), and those modes
5020 * are missing after user start lightdm. So we need to renew modes list.
5021 * in get_modes call back, not just return the modes count
5022 */
5023 .get_modes = get_modes,
5024 .mode_valid = amdgpu_dm_connector_mode_valid,
5025 .atomic_check = amdgpu_dm_connector_atomic_check,
5026 };
5027
5028 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5029 {
5030 }
5031
5032 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5033 {
5034 struct drm_device *dev = new_crtc_state->crtc->dev;
5035 struct drm_plane *plane;
5036
5037 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5038 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5039 return true;
5040 }
5041
5042 return false;
5043 }
5044
5045 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5046 {
5047 struct drm_atomic_state *state = new_crtc_state->state;
5048 struct drm_plane *plane;
5049 int num_active = 0;
5050
5051 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5052 struct drm_plane_state *new_plane_state;
5053
5054 /* Cursor planes are "fake". */
5055 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5056 continue;
5057
5058 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5059
5060 if (!new_plane_state) {
5061 /*
5062 * The plane is enable on the CRTC and hasn't changed
5063 * state. This means that it previously passed
5064 * validation and is therefore enabled.
5065 */
5066 num_active += 1;
5067 continue;
5068 }
5069
5070 /* We need a framebuffer to be considered enabled. */
5071 num_active += (new_plane_state->fb != NULL);
5072 }
5073
5074 return num_active;
5075 }
5076
5077 /*
5078 * Sets whether interrupts should be enabled on a specific CRTC.
5079 * We require that the stream be enabled and that there exist active
5080 * DC planes on the stream.
5081 */
5082 static void
5083 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5084 struct drm_crtc_state *new_crtc_state)
5085 {
5086 struct dm_crtc_state *dm_new_crtc_state =
5087 to_dm_crtc_state(new_crtc_state);
5088
5089 dm_new_crtc_state->active_planes = 0;
5090 dm_new_crtc_state->interrupts_enabled = false;
5091
5092 if (!dm_new_crtc_state->stream)
5093 return;
5094
5095 dm_new_crtc_state->active_planes =
5096 count_crtc_active_planes(new_crtc_state);
5097
5098 dm_new_crtc_state->interrupts_enabled =
5099 dm_new_crtc_state->active_planes > 0;
5100 }
5101
5102 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5103 struct drm_crtc_state *state)
5104 {
5105 struct amdgpu_device *adev = crtc->dev->dev_private;
5106 struct dc *dc = adev->dm.dc;
5107 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5108 int ret = -EINVAL;
5109
5110 /*
5111 * Update interrupt state for the CRTC. This needs to happen whenever
5112 * the CRTC has changed or whenever any of its planes have changed.
5113 * Atomic check satisfies both of these requirements since the CRTC
5114 * is added to the state by DRM during drm_atomic_helper_check_planes.
5115 */
5116 dm_update_crtc_interrupt_state(crtc, state);
5117
5118 if (unlikely(!dm_crtc_state->stream &&
5119 modeset_required(state, NULL, dm_crtc_state->stream))) {
5120 WARN_ON(1);
5121 return ret;
5122 }
5123
5124 /* In some use cases, like reset, no stream is attached */
5125 if (!dm_crtc_state->stream)
5126 return 0;
5127
5128 /*
5129 * We want at least one hardware plane enabled to use
5130 * the stream with a cursor enabled.
5131 */
5132 if (state->enable && state->active &&
5133 does_crtc_have_active_cursor(state) &&
5134 dm_crtc_state->active_planes == 0)
5135 return -EINVAL;
5136
5137 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5138 return 0;
5139
5140 return ret;
5141 }
5142
5143 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5144 const struct drm_display_mode *mode,
5145 struct drm_display_mode *adjusted_mode)
5146 {
5147 return true;
5148 }
5149
5150 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5151 .disable = dm_crtc_helper_disable,
5152 .atomic_check = dm_crtc_helper_atomic_check,
5153 .mode_fixup = dm_crtc_helper_mode_fixup,
5154 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5155 };
5156
5157 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5158 {
5159
5160 }
5161
5162 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5163 {
5164 switch (display_color_depth) {
5165 case COLOR_DEPTH_666:
5166 return 6;
5167 case COLOR_DEPTH_888:
5168 return 8;
5169 case COLOR_DEPTH_101010:
5170 return 10;
5171 case COLOR_DEPTH_121212:
5172 return 12;
5173 case COLOR_DEPTH_141414:
5174 return 14;
5175 case COLOR_DEPTH_161616:
5176 return 16;
5177 default:
5178 break;
5179 }
5180 return 0;
5181 }
5182
5183 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5184 struct drm_crtc_state *crtc_state,
5185 struct drm_connector_state *conn_state)
5186 {
5187 struct drm_atomic_state *state = crtc_state->state;
5188 struct drm_connector *connector = conn_state->connector;
5189 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5190 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5191 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5192 struct drm_dp_mst_topology_mgr *mst_mgr;
5193 struct drm_dp_mst_port *mst_port;
5194 enum dc_color_depth color_depth;
5195 int clock, bpp = 0;
5196 bool is_y420 = false;
5197
5198 if (!aconnector->port || !aconnector->dc_sink)
5199 return 0;
5200
5201 mst_port = aconnector->port;
5202 mst_mgr = &aconnector->mst_port->mst_mgr;
5203
5204 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5205 return 0;
5206
5207 if (!state->duplicated) {
5208 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5209 aconnector->force_yuv420_output;
5210 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5211 is_y420);
5212 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5213 clock = adjusted_mode->clock;
5214 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5215 }
5216 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5217 mst_mgr,
5218 mst_port,
5219 dm_new_connector_state->pbn,
5220 0);
5221 if (dm_new_connector_state->vcpi_slots < 0) {
5222 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5223 return dm_new_connector_state->vcpi_slots;
5224 }
5225 return 0;
5226 }
5227
5228 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5229 .disable = dm_encoder_helper_disable,
5230 .atomic_check = dm_encoder_helper_atomic_check
5231 };
5232
5233 #if defined(CONFIG_DRM_AMD_DC_DCN)
5234 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5235 struct dc_state *dc_state)
5236 {
5237 struct dc_stream_state *stream = NULL;
5238 struct drm_connector *connector;
5239 struct drm_connector_state *new_con_state, *old_con_state;
5240 struct amdgpu_dm_connector *aconnector;
5241 struct dm_connector_state *dm_conn_state;
5242 int i, j, clock, bpp;
5243 int vcpi, pbn_div, pbn = 0;
5244
5245 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5246
5247 aconnector = to_amdgpu_dm_connector(connector);
5248
5249 if (!aconnector->port)
5250 continue;
5251
5252 if (!new_con_state || !new_con_state->crtc)
5253 continue;
5254
5255 dm_conn_state = to_dm_connector_state(new_con_state);
5256
5257 for (j = 0; j < dc_state->stream_count; j++) {
5258 stream = dc_state->streams[j];
5259 if (!stream)
5260 continue;
5261
5262 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5263 break;
5264
5265 stream = NULL;
5266 }
5267
5268 if (!stream)
5269 continue;
5270
5271 if (stream->timing.flags.DSC != 1) {
5272 drm_dp_mst_atomic_enable_dsc(state,
5273 aconnector->port,
5274 dm_conn_state->pbn,
5275 0,
5276 false);
5277 continue;
5278 }
5279
5280 pbn_div = dm_mst_get_pbn_divider(stream->link);
5281 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5282 clock = stream->timing.pix_clk_100hz / 10;
5283 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5284 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5285 aconnector->port,
5286 pbn, pbn_div,
5287 true);
5288 if (vcpi < 0)
5289 return vcpi;
5290
5291 dm_conn_state->pbn = pbn;
5292 dm_conn_state->vcpi_slots = vcpi;
5293 }
5294 return 0;
5295 }
5296 #endif
5297
5298 static void dm_drm_plane_reset(struct drm_plane *plane)
5299 {
5300 struct dm_plane_state *amdgpu_state = NULL;
5301
5302 if (plane->state)
5303 plane->funcs->atomic_destroy_state(plane, plane->state);
5304
5305 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5306 WARN_ON(amdgpu_state == NULL);
5307
5308 if (amdgpu_state)
5309 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5310 }
5311
5312 static struct drm_plane_state *
5313 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5314 {
5315 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5316
5317 old_dm_plane_state = to_dm_plane_state(plane->state);
5318 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5319 if (!dm_plane_state)
5320 return NULL;
5321
5322 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5323
5324 if (old_dm_plane_state->dc_state) {
5325 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5326 dc_plane_state_retain(dm_plane_state->dc_state);
5327 }
5328
5329 return &dm_plane_state->base;
5330 }
5331
5332 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5333 struct drm_plane_state *state)
5334 {
5335 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5336
5337 if (dm_plane_state->dc_state)
5338 dc_plane_state_release(dm_plane_state->dc_state);
5339
5340 drm_atomic_helper_plane_destroy_state(plane, state);
5341 }
5342
5343 static const struct drm_plane_funcs dm_plane_funcs = {
5344 .update_plane = drm_atomic_helper_update_plane,
5345 .disable_plane = drm_atomic_helper_disable_plane,
5346 .destroy = drm_primary_helper_destroy,
5347 .reset = dm_drm_plane_reset,
5348 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5349 .atomic_destroy_state = dm_drm_plane_destroy_state,
5350 };
5351
5352 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5353 struct drm_plane_state *new_state)
5354 {
5355 struct amdgpu_framebuffer *afb;
5356 struct drm_gem_object *obj;
5357 struct amdgpu_device *adev;
5358 struct amdgpu_bo *rbo;
5359 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5360 struct list_head list;
5361 struct ttm_validate_buffer tv;
5362 struct ww_acquire_ctx ticket;
5363 uint64_t tiling_flags;
5364 uint32_t domain;
5365 int r;
5366 bool tmz_surface = false;
5367 bool force_disable_dcc = false;
5368
5369 dm_plane_state_old = to_dm_plane_state(plane->state);
5370 dm_plane_state_new = to_dm_plane_state(new_state);
5371
5372 if (!new_state->fb) {
5373 DRM_DEBUG_DRIVER("No FB bound\n");
5374 return 0;
5375 }
5376
5377 afb = to_amdgpu_framebuffer(new_state->fb);
5378 obj = new_state->fb->obj[0];
5379 rbo = gem_to_amdgpu_bo(obj);
5380 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5381 INIT_LIST_HEAD(&list);
5382
5383 tv.bo = &rbo->tbo;
5384 tv.num_shared = 1;
5385 list_add(&tv.head, &list);
5386
5387 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5388 if (r) {
5389 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5390 return r;
5391 }
5392
5393 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5394 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5395 else
5396 domain = AMDGPU_GEM_DOMAIN_VRAM;
5397
5398 r = amdgpu_bo_pin(rbo, domain);
5399 if (unlikely(r != 0)) {
5400 if (r != -ERESTARTSYS)
5401 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5402 ttm_eu_backoff_reservation(&ticket, &list);
5403 return r;
5404 }
5405
5406 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5407 if (unlikely(r != 0)) {
5408 amdgpu_bo_unpin(rbo);
5409 ttm_eu_backoff_reservation(&ticket, &list);
5410 DRM_ERROR("%p bind failed\n", rbo);
5411 return r;
5412 }
5413
5414 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5415
5416 tmz_surface = amdgpu_bo_encrypted(rbo);
5417
5418 ttm_eu_backoff_reservation(&ticket, &list);
5419
5420 afb->address = amdgpu_bo_gpu_offset(rbo);
5421
5422 amdgpu_bo_ref(rbo);
5423
5424 if (dm_plane_state_new->dc_state &&
5425 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5426 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5427
5428 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5429 fill_plane_buffer_attributes(
5430 adev, afb, plane_state->format, plane_state->rotation,
5431 tiling_flags, &plane_state->tiling_info,
5432 &plane_state->plane_size, &plane_state->dcc,
5433 &plane_state->address, tmz_surface,
5434 force_disable_dcc);
5435 }
5436
5437 return 0;
5438 }
5439
5440 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5441 struct drm_plane_state *old_state)
5442 {
5443 struct amdgpu_bo *rbo;
5444 int r;
5445
5446 if (!old_state->fb)
5447 return;
5448
5449 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5450 r = amdgpu_bo_reserve(rbo, false);
5451 if (unlikely(r)) {
5452 DRM_ERROR("failed to reserve rbo before unpin\n");
5453 return;
5454 }
5455
5456 amdgpu_bo_unpin(rbo);
5457 amdgpu_bo_unreserve(rbo);
5458 amdgpu_bo_unref(&rbo);
5459 }
5460
5461 static int dm_plane_atomic_check(struct drm_plane *plane,
5462 struct drm_plane_state *state)
5463 {
5464 struct amdgpu_device *adev = plane->dev->dev_private;
5465 struct dc *dc = adev->dm.dc;
5466 struct dm_plane_state *dm_plane_state;
5467 struct dc_scaling_info scaling_info;
5468 int ret;
5469
5470 dm_plane_state = to_dm_plane_state(state);
5471
5472 if (!dm_plane_state->dc_state)
5473 return 0;
5474
5475 ret = fill_dc_scaling_info(state, &scaling_info);
5476 if (ret)
5477 return ret;
5478
5479 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5480 return 0;
5481
5482 return -EINVAL;
5483 }
5484
5485 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5486 struct drm_plane_state *new_plane_state)
5487 {
5488 /* Only support async updates on cursor planes. */
5489 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5490 return -EINVAL;
5491
5492 return 0;
5493 }
5494
5495 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5496 struct drm_plane_state *new_state)
5497 {
5498 struct drm_plane_state *old_state =
5499 drm_atomic_get_old_plane_state(new_state->state, plane);
5500
5501 swap(plane->state->fb, new_state->fb);
5502
5503 plane->state->src_x = new_state->src_x;
5504 plane->state->src_y = new_state->src_y;
5505 plane->state->src_w = new_state->src_w;
5506 plane->state->src_h = new_state->src_h;
5507 plane->state->crtc_x = new_state->crtc_x;
5508 plane->state->crtc_y = new_state->crtc_y;
5509 plane->state->crtc_w = new_state->crtc_w;
5510 plane->state->crtc_h = new_state->crtc_h;
5511
5512 handle_cursor_update(plane, old_state);
5513 }
5514
5515 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5516 .prepare_fb = dm_plane_helper_prepare_fb,
5517 .cleanup_fb = dm_plane_helper_cleanup_fb,
5518 .atomic_check = dm_plane_atomic_check,
5519 .atomic_async_check = dm_plane_atomic_async_check,
5520 .atomic_async_update = dm_plane_atomic_async_update
5521 };
5522
5523 /*
5524 * TODO: these are currently initialized to rgb formats only.
5525 * For future use cases we should either initialize them dynamically based on
5526 * plane capabilities, or initialize this array to all formats, so internal drm
5527 * check will succeed, and let DC implement proper check
5528 */
5529 static const uint32_t rgb_formats[] = {
5530 DRM_FORMAT_XRGB8888,
5531 DRM_FORMAT_ARGB8888,
5532 DRM_FORMAT_RGBA8888,
5533 DRM_FORMAT_XRGB2101010,
5534 DRM_FORMAT_XBGR2101010,
5535 DRM_FORMAT_ARGB2101010,
5536 DRM_FORMAT_ABGR2101010,
5537 DRM_FORMAT_XBGR8888,
5538 DRM_FORMAT_ABGR8888,
5539 DRM_FORMAT_RGB565,
5540 };
5541
5542 static const uint32_t overlay_formats[] = {
5543 DRM_FORMAT_XRGB8888,
5544 DRM_FORMAT_ARGB8888,
5545 DRM_FORMAT_RGBA8888,
5546 DRM_FORMAT_XBGR8888,
5547 DRM_FORMAT_ABGR8888,
5548 DRM_FORMAT_RGB565
5549 };
5550
5551 static const u32 cursor_formats[] = {
5552 DRM_FORMAT_ARGB8888
5553 };
5554
5555 static int get_plane_formats(const struct drm_plane *plane,
5556 const struct dc_plane_cap *plane_cap,
5557 uint32_t *formats, int max_formats)
5558 {
5559 int i, num_formats = 0;
5560
5561 /*
5562 * TODO: Query support for each group of formats directly from
5563 * DC plane caps. This will require adding more formats to the
5564 * caps list.
5565 */
5566
5567 switch (plane->type) {
5568 case DRM_PLANE_TYPE_PRIMARY:
5569 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5570 if (num_formats >= max_formats)
5571 break;
5572
5573 formats[num_formats++] = rgb_formats[i];
5574 }
5575
5576 if (plane_cap && plane_cap->pixel_format_support.nv12)
5577 formats[num_formats++] = DRM_FORMAT_NV12;
5578 if (plane_cap && plane_cap->pixel_format_support.p010)
5579 formats[num_formats++] = DRM_FORMAT_P010;
5580 break;
5581
5582 case DRM_PLANE_TYPE_OVERLAY:
5583 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5584 if (num_formats >= max_formats)
5585 break;
5586
5587 formats[num_formats++] = overlay_formats[i];
5588 }
5589 break;
5590
5591 case DRM_PLANE_TYPE_CURSOR:
5592 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5593 if (num_formats >= max_formats)
5594 break;
5595
5596 formats[num_formats++] = cursor_formats[i];
5597 }
5598 break;
5599 }
5600
5601 return num_formats;
5602 }
5603
5604 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5605 struct drm_plane *plane,
5606 unsigned long possible_crtcs,
5607 const struct dc_plane_cap *plane_cap)
5608 {
5609 uint32_t formats[32];
5610 int num_formats;
5611 int res = -EPERM;
5612
5613 num_formats = get_plane_formats(plane, plane_cap, formats,
5614 ARRAY_SIZE(formats));
5615
5616 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5617 &dm_plane_funcs, formats, num_formats,
5618 NULL, plane->type, NULL);
5619 if (res)
5620 return res;
5621
5622 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5623 plane_cap && plane_cap->per_pixel_alpha) {
5624 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5625 BIT(DRM_MODE_BLEND_PREMULTI);
5626
5627 drm_plane_create_alpha_property(plane);
5628 drm_plane_create_blend_mode_property(plane, blend_caps);
5629 }
5630
5631 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5632 plane_cap &&
5633 (plane_cap->pixel_format_support.nv12 ||
5634 plane_cap->pixel_format_support.p010)) {
5635 /* This only affects YUV formats. */
5636 drm_plane_create_color_properties(
5637 plane,
5638 BIT(DRM_COLOR_YCBCR_BT601) |
5639 BIT(DRM_COLOR_YCBCR_BT709) |
5640 BIT(DRM_COLOR_YCBCR_BT2020),
5641 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5642 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5643 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5644 }
5645
5646 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5647
5648 /* Create (reset) the plane state */
5649 if (plane->funcs->reset)
5650 plane->funcs->reset(plane);
5651
5652 return 0;
5653 }
5654
5655 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5656 struct drm_plane *plane,
5657 uint32_t crtc_index)
5658 {
5659 struct amdgpu_crtc *acrtc = NULL;
5660 struct drm_plane *cursor_plane;
5661
5662 int res = -ENOMEM;
5663
5664 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5665 if (!cursor_plane)
5666 goto fail;
5667
5668 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5669 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5670
5671 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5672 if (!acrtc)
5673 goto fail;
5674
5675 res = drm_crtc_init_with_planes(
5676 dm->ddev,
5677 &acrtc->base,
5678 plane,
5679 cursor_plane,
5680 &amdgpu_dm_crtc_funcs, NULL);
5681
5682 if (res)
5683 goto fail;
5684
5685 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5686
5687 /* Create (reset) the plane state */
5688 if (acrtc->base.funcs->reset)
5689 acrtc->base.funcs->reset(&acrtc->base);
5690
5691 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5692 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5693
5694 acrtc->crtc_id = crtc_index;
5695 acrtc->base.enabled = false;
5696 acrtc->otg_inst = -1;
5697
5698 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5699 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5700 true, MAX_COLOR_LUT_ENTRIES);
5701 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5702
5703 return 0;
5704
5705 fail:
5706 kfree(acrtc);
5707 kfree(cursor_plane);
5708 return res;
5709 }
5710
5711
5712 static int to_drm_connector_type(enum signal_type st)
5713 {
5714 switch (st) {
5715 case SIGNAL_TYPE_HDMI_TYPE_A:
5716 return DRM_MODE_CONNECTOR_HDMIA;
5717 case SIGNAL_TYPE_EDP:
5718 return DRM_MODE_CONNECTOR_eDP;
5719 case SIGNAL_TYPE_LVDS:
5720 return DRM_MODE_CONNECTOR_LVDS;
5721 case SIGNAL_TYPE_RGB:
5722 return DRM_MODE_CONNECTOR_VGA;
5723 case SIGNAL_TYPE_DISPLAY_PORT:
5724 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5725 return DRM_MODE_CONNECTOR_DisplayPort;
5726 case SIGNAL_TYPE_DVI_DUAL_LINK:
5727 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5728 return DRM_MODE_CONNECTOR_DVID;
5729 case SIGNAL_TYPE_VIRTUAL:
5730 return DRM_MODE_CONNECTOR_VIRTUAL;
5731
5732 default:
5733 return DRM_MODE_CONNECTOR_Unknown;
5734 }
5735 }
5736
5737 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5738 {
5739 struct drm_encoder *encoder;
5740
5741 /* There is only one encoder per connector */
5742 drm_connector_for_each_possible_encoder(connector, encoder)
5743 return encoder;
5744
5745 return NULL;
5746 }
5747
5748 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5749 {
5750 struct drm_encoder *encoder;
5751 struct amdgpu_encoder *amdgpu_encoder;
5752
5753 encoder = amdgpu_dm_connector_to_encoder(connector);
5754
5755 if (encoder == NULL)
5756 return;
5757
5758 amdgpu_encoder = to_amdgpu_encoder(encoder);
5759
5760 amdgpu_encoder->native_mode.clock = 0;
5761
5762 if (!list_empty(&connector->probed_modes)) {
5763 struct drm_display_mode *preferred_mode = NULL;
5764
5765 list_for_each_entry(preferred_mode,
5766 &connector->probed_modes,
5767 head) {
5768 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5769 amdgpu_encoder->native_mode = *preferred_mode;
5770
5771 break;
5772 }
5773
5774 }
5775 }
5776
5777 static struct drm_display_mode *
5778 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5779 char *name,
5780 int hdisplay, int vdisplay)
5781 {
5782 struct drm_device *dev = encoder->dev;
5783 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5784 struct drm_display_mode *mode = NULL;
5785 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5786
5787 mode = drm_mode_duplicate(dev, native_mode);
5788
5789 if (mode == NULL)
5790 return NULL;
5791
5792 mode->hdisplay = hdisplay;
5793 mode->vdisplay = vdisplay;
5794 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5795 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5796
5797 return mode;
5798
5799 }
5800
5801 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5802 struct drm_connector *connector)
5803 {
5804 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5805 struct drm_display_mode *mode = NULL;
5806 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5807 struct amdgpu_dm_connector *amdgpu_dm_connector =
5808 to_amdgpu_dm_connector(connector);
5809 int i;
5810 int n;
5811 struct mode_size {
5812 char name[DRM_DISPLAY_MODE_LEN];
5813 int w;
5814 int h;
5815 } common_modes[] = {
5816 { "640x480", 640, 480},
5817 { "800x600", 800, 600},
5818 { "1024x768", 1024, 768},
5819 { "1280x720", 1280, 720},
5820 { "1280x800", 1280, 800},
5821 {"1280x1024", 1280, 1024},
5822 { "1440x900", 1440, 900},
5823 {"1680x1050", 1680, 1050},
5824 {"1600x1200", 1600, 1200},
5825 {"1920x1080", 1920, 1080},
5826 {"1920x1200", 1920, 1200}
5827 };
5828
5829 n = ARRAY_SIZE(common_modes);
5830
5831 for (i = 0; i < n; i++) {
5832 struct drm_display_mode *curmode = NULL;
5833 bool mode_existed = false;
5834
5835 if (common_modes[i].w > native_mode->hdisplay ||
5836 common_modes[i].h > native_mode->vdisplay ||
5837 (common_modes[i].w == native_mode->hdisplay &&
5838 common_modes[i].h == native_mode->vdisplay))
5839 continue;
5840
5841 list_for_each_entry(curmode, &connector->probed_modes, head) {
5842 if (common_modes[i].w == curmode->hdisplay &&
5843 common_modes[i].h == curmode->vdisplay) {
5844 mode_existed = true;
5845 break;
5846 }
5847 }
5848
5849 if (mode_existed)
5850 continue;
5851
5852 mode = amdgpu_dm_create_common_mode(encoder,
5853 common_modes[i].name, common_modes[i].w,
5854 common_modes[i].h);
5855 drm_mode_probed_add(connector, mode);
5856 amdgpu_dm_connector->num_modes++;
5857 }
5858 }
5859
5860 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5861 struct edid *edid)
5862 {
5863 struct amdgpu_dm_connector *amdgpu_dm_connector =
5864 to_amdgpu_dm_connector(connector);
5865
5866 if (edid) {
5867 /* empty probed_modes */
5868 INIT_LIST_HEAD(&connector->probed_modes);
5869 amdgpu_dm_connector->num_modes =
5870 drm_add_edid_modes(connector, edid);
5871
5872 /* sorting the probed modes before calling function
5873 * amdgpu_dm_get_native_mode() since EDID can have
5874 * more than one preferred mode. The modes that are
5875 * later in the probed mode list could be of higher
5876 * and preferred resolution. For example, 3840x2160
5877 * resolution in base EDID preferred timing and 4096x2160
5878 * preferred resolution in DID extension block later.
5879 */
5880 drm_mode_sort(&connector->probed_modes);
5881 amdgpu_dm_get_native_mode(connector);
5882 } else {
5883 amdgpu_dm_connector->num_modes = 0;
5884 }
5885 }
5886
5887 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5888 {
5889 struct amdgpu_dm_connector *amdgpu_dm_connector =
5890 to_amdgpu_dm_connector(connector);
5891 struct drm_encoder *encoder;
5892 struct edid *edid = amdgpu_dm_connector->edid;
5893
5894 encoder = amdgpu_dm_connector_to_encoder(connector);
5895
5896 if (!edid || !drm_edid_is_valid(edid)) {
5897 amdgpu_dm_connector->num_modes =
5898 drm_add_modes_noedid(connector, 640, 480);
5899 } else {
5900 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5901 amdgpu_dm_connector_add_common_modes(encoder, connector);
5902 }
5903 amdgpu_dm_fbc_init(connector);
5904
5905 return amdgpu_dm_connector->num_modes;
5906 }
5907
5908 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5909 struct amdgpu_dm_connector *aconnector,
5910 int connector_type,
5911 struct dc_link *link,
5912 int link_index)
5913 {
5914 struct amdgpu_device *adev = dm->ddev->dev_private;
5915
5916 /*
5917 * Some of the properties below require access to state, like bpc.
5918 * Allocate some default initial connector state with our reset helper.
5919 */
5920 if (aconnector->base.funcs->reset)
5921 aconnector->base.funcs->reset(&aconnector->base);
5922
5923 aconnector->connector_id = link_index;
5924 aconnector->dc_link = link;
5925 aconnector->base.interlace_allowed = false;
5926 aconnector->base.doublescan_allowed = false;
5927 aconnector->base.stereo_allowed = false;
5928 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5929 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5930 aconnector->audio_inst = -1;
5931 mutex_init(&aconnector->hpd_lock);
5932
5933 /*
5934 * configure support HPD hot plug connector_>polled default value is 0
5935 * which means HPD hot plug not supported
5936 */
5937 switch (connector_type) {
5938 case DRM_MODE_CONNECTOR_HDMIA:
5939 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5940 aconnector->base.ycbcr_420_allowed =
5941 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5942 break;
5943 case DRM_MODE_CONNECTOR_DisplayPort:
5944 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5945 aconnector->base.ycbcr_420_allowed =
5946 link->link_enc->features.dp_ycbcr420_supported ? true : false;
5947 break;
5948 case DRM_MODE_CONNECTOR_DVID:
5949 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5950 break;
5951 default:
5952 break;
5953 }
5954
5955 drm_object_attach_property(&aconnector->base.base,
5956 dm->ddev->mode_config.scaling_mode_property,
5957 DRM_MODE_SCALE_NONE);
5958
5959 drm_object_attach_property(&aconnector->base.base,
5960 adev->mode_info.underscan_property,
5961 UNDERSCAN_OFF);
5962 drm_object_attach_property(&aconnector->base.base,
5963 adev->mode_info.underscan_hborder_property,
5964 0);
5965 drm_object_attach_property(&aconnector->base.base,
5966 adev->mode_info.underscan_vborder_property,
5967 0);
5968
5969 if (!aconnector->mst_port)
5970 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5971
5972 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5973 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5974 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5975
5976 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5977 dc_is_dmcu_initialized(adev->dm.dc)) {
5978 drm_object_attach_property(&aconnector->base.base,
5979 adev->mode_info.abm_level_property, 0);
5980 }
5981
5982 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5983 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5984 connector_type == DRM_MODE_CONNECTOR_eDP) {
5985 drm_object_attach_property(
5986 &aconnector->base.base,
5987 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5988
5989 if (!aconnector->mst_port)
5990 drm_connector_attach_vrr_capable_property(&aconnector->base);
5991
5992 #ifdef CONFIG_DRM_AMD_DC_HDCP
5993 if (adev->dm.hdcp_workqueue)
5994 drm_connector_attach_content_protection_property(&aconnector->base, true);
5995 #endif
5996 }
5997 }
5998
5999 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6000 struct i2c_msg *msgs, int num)
6001 {
6002 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6003 struct ddc_service *ddc_service = i2c->ddc_service;
6004 struct i2c_command cmd;
6005 int i;
6006 int result = -EIO;
6007
6008 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6009
6010 if (!cmd.payloads)
6011 return result;
6012
6013 cmd.number_of_payloads = num;
6014 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6015 cmd.speed = 100;
6016
6017 for (i = 0; i < num; i++) {
6018 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6019 cmd.payloads[i].address = msgs[i].addr;
6020 cmd.payloads[i].length = msgs[i].len;
6021 cmd.payloads[i].data = msgs[i].buf;
6022 }
6023
6024 if (dc_submit_i2c(
6025 ddc_service->ctx->dc,
6026 ddc_service->ddc_pin->hw_info.ddc_channel,
6027 &cmd))
6028 result = num;
6029
6030 kfree(cmd.payloads);
6031 return result;
6032 }
6033
6034 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6035 {
6036 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6037 }
6038
6039 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6040 .master_xfer = amdgpu_dm_i2c_xfer,
6041 .functionality = amdgpu_dm_i2c_func,
6042 };
6043
6044 static struct amdgpu_i2c_adapter *
6045 create_i2c(struct ddc_service *ddc_service,
6046 int link_index,
6047 int *res)
6048 {
6049 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6050 struct amdgpu_i2c_adapter *i2c;
6051
6052 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6053 if (!i2c)
6054 return NULL;
6055 i2c->base.owner = THIS_MODULE;
6056 i2c->base.class = I2C_CLASS_DDC;
6057 i2c->base.dev.parent = &adev->pdev->dev;
6058 i2c->base.algo = &amdgpu_dm_i2c_algo;
6059 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6060 i2c_set_adapdata(&i2c->base, i2c);
6061 i2c->ddc_service = ddc_service;
6062 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6063
6064 return i2c;
6065 }
6066
6067
6068 /*
6069 * Note: this function assumes that dc_link_detect() was called for the
6070 * dc_link which will be represented by this aconnector.
6071 */
6072 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6073 struct amdgpu_dm_connector *aconnector,
6074 uint32_t link_index,
6075 struct amdgpu_encoder *aencoder)
6076 {
6077 int res = 0;
6078 int connector_type;
6079 struct dc *dc = dm->dc;
6080 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6081 struct amdgpu_i2c_adapter *i2c;
6082
6083 link->priv = aconnector;
6084
6085 DRM_DEBUG_DRIVER("%s()\n", __func__);
6086
6087 i2c = create_i2c(link->ddc, link->link_index, &res);
6088 if (!i2c) {
6089 DRM_ERROR("Failed to create i2c adapter data\n");
6090 return -ENOMEM;
6091 }
6092
6093 aconnector->i2c = i2c;
6094 res = i2c_add_adapter(&i2c->base);
6095
6096 if (res) {
6097 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6098 goto out_free;
6099 }
6100
6101 connector_type = to_drm_connector_type(link->connector_signal);
6102
6103 res = drm_connector_init_with_ddc(
6104 dm->ddev,
6105 &aconnector->base,
6106 &amdgpu_dm_connector_funcs,
6107 connector_type,
6108 &i2c->base);
6109
6110 if (res) {
6111 DRM_ERROR("connector_init failed\n");
6112 aconnector->connector_id = -1;
6113 goto out_free;
6114 }
6115
6116 drm_connector_helper_add(
6117 &aconnector->base,
6118 &amdgpu_dm_connector_helper_funcs);
6119
6120 amdgpu_dm_connector_init_helper(
6121 dm,
6122 aconnector,
6123 connector_type,
6124 link,
6125 link_index);
6126
6127 drm_connector_attach_encoder(
6128 &aconnector->base, &aencoder->base);
6129
6130 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6131 || connector_type == DRM_MODE_CONNECTOR_eDP)
6132 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6133
6134 out_free:
6135 if (res) {
6136 kfree(i2c);
6137 aconnector->i2c = NULL;
6138 }
6139 return res;
6140 }
6141
6142 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6143 {
6144 switch (adev->mode_info.num_crtc) {
6145 case 1:
6146 return 0x1;
6147 case 2:
6148 return 0x3;
6149 case 3:
6150 return 0x7;
6151 case 4:
6152 return 0xf;
6153 case 5:
6154 return 0x1f;
6155 case 6:
6156 default:
6157 return 0x3f;
6158 }
6159 }
6160
6161 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6162 struct amdgpu_encoder *aencoder,
6163 uint32_t link_index)
6164 {
6165 struct amdgpu_device *adev = dev->dev_private;
6166
6167 int res = drm_encoder_init(dev,
6168 &aencoder->base,
6169 &amdgpu_dm_encoder_funcs,
6170 DRM_MODE_ENCODER_TMDS,
6171 NULL);
6172
6173 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6174
6175 if (!res)
6176 aencoder->encoder_id = link_index;
6177 else
6178 aencoder->encoder_id = -1;
6179
6180 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6181
6182 return res;
6183 }
6184
6185 static void manage_dm_interrupts(struct amdgpu_device *adev,
6186 struct amdgpu_crtc *acrtc,
6187 bool enable)
6188 {
6189 /*
6190 * this is not correct translation but will work as soon as VBLANK
6191 * constant is the same as PFLIP
6192 */
6193 int irq_type =
6194 amdgpu_display_crtc_idx_to_irq_type(
6195 adev,
6196 acrtc->crtc_id);
6197
6198 if (enable) {
6199 drm_crtc_vblank_on(&acrtc->base);
6200 amdgpu_irq_get(
6201 adev,
6202 &adev->pageflip_irq,
6203 irq_type);
6204 } else {
6205
6206 amdgpu_irq_put(
6207 adev,
6208 &adev->pageflip_irq,
6209 irq_type);
6210 drm_crtc_vblank_off(&acrtc->base);
6211 }
6212 }
6213
6214 static bool
6215 is_scaling_state_different(const struct dm_connector_state *dm_state,
6216 const struct dm_connector_state *old_dm_state)
6217 {
6218 if (dm_state->scaling != old_dm_state->scaling)
6219 return true;
6220 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6221 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6222 return true;
6223 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6224 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6225 return true;
6226 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6227 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6228 return true;
6229 return false;
6230 }
6231
6232 #ifdef CONFIG_DRM_AMD_DC_HDCP
6233 static bool is_content_protection_different(struct drm_connector_state *state,
6234 const struct drm_connector_state *old_state,
6235 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6236 {
6237 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6238
6239 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6240 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6241 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6242 return true;
6243 }
6244
6245 /* CP is being re enabled, ignore this */
6246 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6247 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6248 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6249 return false;
6250 }
6251
6252 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6253 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6254 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6255 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6256
6257 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6258 * hot-plug, headless s3, dpms
6259 */
6260 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6261 aconnector->dc_sink != NULL)
6262 return true;
6263
6264 if (old_state->content_protection == state->content_protection)
6265 return false;
6266
6267 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6268 return true;
6269
6270 return false;
6271 }
6272
6273 #endif
6274 static void remove_stream(struct amdgpu_device *adev,
6275 struct amdgpu_crtc *acrtc,
6276 struct dc_stream_state *stream)
6277 {
6278 /* this is the update mode case */
6279
6280 acrtc->otg_inst = -1;
6281 acrtc->enabled = false;
6282 }
6283
6284 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6285 struct dc_cursor_position *position)
6286 {
6287 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6288 int x, y;
6289 int xorigin = 0, yorigin = 0;
6290
6291 position->enable = false;
6292 position->x = 0;
6293 position->y = 0;
6294
6295 if (!crtc || !plane->state->fb)
6296 return 0;
6297
6298 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6299 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6300 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6301 __func__,
6302 plane->state->crtc_w,
6303 plane->state->crtc_h);
6304 return -EINVAL;
6305 }
6306
6307 x = plane->state->crtc_x;
6308 y = plane->state->crtc_y;
6309
6310 if (x <= -amdgpu_crtc->max_cursor_width ||
6311 y <= -amdgpu_crtc->max_cursor_height)
6312 return 0;
6313
6314 if (x < 0) {
6315 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6316 x = 0;
6317 }
6318 if (y < 0) {
6319 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6320 y = 0;
6321 }
6322 position->enable = true;
6323 position->translate_by_source = true;
6324 position->x = x;
6325 position->y = y;
6326 position->x_hotspot = xorigin;
6327 position->y_hotspot = yorigin;
6328
6329 return 0;
6330 }
6331
6332 static void handle_cursor_update(struct drm_plane *plane,
6333 struct drm_plane_state *old_plane_state)
6334 {
6335 struct amdgpu_device *adev = plane->dev->dev_private;
6336 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6337 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6338 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6339 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6340 uint64_t address = afb ? afb->address : 0;
6341 struct dc_cursor_position position;
6342 struct dc_cursor_attributes attributes;
6343 int ret;
6344
6345 if (!plane->state->fb && !old_plane_state->fb)
6346 return;
6347
6348 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6349 __func__,
6350 amdgpu_crtc->crtc_id,
6351 plane->state->crtc_w,
6352 plane->state->crtc_h);
6353
6354 ret = get_cursor_position(plane, crtc, &position);
6355 if (ret)
6356 return;
6357
6358 if (!position.enable) {
6359 /* turn off cursor */
6360 if (crtc_state && crtc_state->stream) {
6361 mutex_lock(&adev->dm.dc_lock);
6362 dc_stream_set_cursor_position(crtc_state->stream,
6363 &position);
6364 mutex_unlock(&adev->dm.dc_lock);
6365 }
6366 return;
6367 }
6368
6369 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6370 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6371
6372 memset(&attributes, 0, sizeof(attributes));
6373 attributes.address.high_part = upper_32_bits(address);
6374 attributes.address.low_part = lower_32_bits(address);
6375 attributes.width = plane->state->crtc_w;
6376 attributes.height = plane->state->crtc_h;
6377 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6378 attributes.rotation_angle = 0;
6379 attributes.attribute_flags.value = 0;
6380
6381 attributes.pitch = attributes.width;
6382
6383 if (crtc_state->stream) {
6384 mutex_lock(&adev->dm.dc_lock);
6385 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6386 &attributes))
6387 DRM_ERROR("DC failed to set cursor attributes\n");
6388
6389 if (!dc_stream_set_cursor_position(crtc_state->stream,
6390 &position))
6391 DRM_ERROR("DC failed to set cursor position\n");
6392 mutex_unlock(&adev->dm.dc_lock);
6393 }
6394 }
6395
6396 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6397 {
6398
6399 assert_spin_locked(&acrtc->base.dev->event_lock);
6400 WARN_ON(acrtc->event);
6401
6402 acrtc->event = acrtc->base.state->event;
6403
6404 /* Set the flip status */
6405 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6406
6407 /* Mark this event as consumed */
6408 acrtc->base.state->event = NULL;
6409
6410 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6411 acrtc->crtc_id);
6412 }
6413
6414 static void update_freesync_state_on_stream(
6415 struct amdgpu_display_manager *dm,
6416 struct dm_crtc_state *new_crtc_state,
6417 struct dc_stream_state *new_stream,
6418 struct dc_plane_state *surface,
6419 u32 flip_timestamp_in_us)
6420 {
6421 struct mod_vrr_params vrr_params;
6422 struct dc_info_packet vrr_infopacket = {0};
6423 struct amdgpu_device *adev = dm->adev;
6424 unsigned long flags;
6425
6426 if (!new_stream)
6427 return;
6428
6429 /*
6430 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6431 * For now it's sufficient to just guard against these conditions.
6432 */
6433
6434 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6435 return;
6436
6437 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6438 vrr_params = new_crtc_state->vrr_params;
6439
6440 if (surface) {
6441 mod_freesync_handle_preflip(
6442 dm->freesync_module,
6443 surface,
6444 new_stream,
6445 flip_timestamp_in_us,
6446 &vrr_params);
6447
6448 if (adev->family < AMDGPU_FAMILY_AI &&
6449 amdgpu_dm_vrr_active(new_crtc_state)) {
6450 mod_freesync_handle_v_update(dm->freesync_module,
6451 new_stream, &vrr_params);
6452
6453 /* Need to call this before the frame ends. */
6454 dc_stream_adjust_vmin_vmax(dm->dc,
6455 new_crtc_state->stream,
6456 &vrr_params.adjust);
6457 }
6458 }
6459
6460 mod_freesync_build_vrr_infopacket(
6461 dm->freesync_module,
6462 new_stream,
6463 &vrr_params,
6464 PACKET_TYPE_VRR,
6465 TRANSFER_FUNC_UNKNOWN,
6466 &vrr_infopacket);
6467
6468 new_crtc_state->freesync_timing_changed |=
6469 (memcmp(&new_crtc_state->vrr_params.adjust,
6470 &vrr_params.adjust,
6471 sizeof(vrr_params.adjust)) != 0);
6472
6473 new_crtc_state->freesync_vrr_info_changed |=
6474 (memcmp(&new_crtc_state->vrr_infopacket,
6475 &vrr_infopacket,
6476 sizeof(vrr_infopacket)) != 0);
6477
6478 new_crtc_state->vrr_params = vrr_params;
6479 new_crtc_state->vrr_infopacket = vrr_infopacket;
6480
6481 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6482 new_stream->vrr_infopacket = vrr_infopacket;
6483
6484 if (new_crtc_state->freesync_vrr_info_changed)
6485 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6486 new_crtc_state->base.crtc->base.id,
6487 (int)new_crtc_state->base.vrr_enabled,
6488 (int)vrr_params.state);
6489
6490 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6491 }
6492
6493 static void pre_update_freesync_state_on_stream(
6494 struct amdgpu_display_manager *dm,
6495 struct dm_crtc_state *new_crtc_state)
6496 {
6497 struct dc_stream_state *new_stream = new_crtc_state->stream;
6498 struct mod_vrr_params vrr_params;
6499 struct mod_freesync_config config = new_crtc_state->freesync_config;
6500 struct amdgpu_device *adev = dm->adev;
6501 unsigned long flags;
6502
6503 if (!new_stream)
6504 return;
6505
6506 /*
6507 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6508 * For now it's sufficient to just guard against these conditions.
6509 */
6510 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6511 return;
6512
6513 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6514 vrr_params = new_crtc_state->vrr_params;
6515
6516 if (new_crtc_state->vrr_supported &&
6517 config.min_refresh_in_uhz &&
6518 config.max_refresh_in_uhz) {
6519 config.state = new_crtc_state->base.vrr_enabled ?
6520 VRR_STATE_ACTIVE_VARIABLE :
6521 VRR_STATE_INACTIVE;
6522 } else {
6523 config.state = VRR_STATE_UNSUPPORTED;
6524 }
6525
6526 mod_freesync_build_vrr_params(dm->freesync_module,
6527 new_stream,
6528 &config, &vrr_params);
6529
6530 new_crtc_state->freesync_timing_changed |=
6531 (memcmp(&new_crtc_state->vrr_params.adjust,
6532 &vrr_params.adjust,
6533 sizeof(vrr_params.adjust)) != 0);
6534
6535 new_crtc_state->vrr_params = vrr_params;
6536 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6537 }
6538
6539 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6540 struct dm_crtc_state *new_state)
6541 {
6542 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6543 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6544
6545 if (!old_vrr_active && new_vrr_active) {
6546 /* Transition VRR inactive -> active:
6547 * While VRR is active, we must not disable vblank irq, as a
6548 * reenable after disable would compute bogus vblank/pflip
6549 * timestamps if it likely happened inside display front-porch.
6550 *
6551 * We also need vupdate irq for the actual core vblank handling
6552 * at end of vblank.
6553 */
6554 dm_set_vupdate_irq(new_state->base.crtc, true);
6555 drm_crtc_vblank_get(new_state->base.crtc);
6556 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6557 __func__, new_state->base.crtc->base.id);
6558 } else if (old_vrr_active && !new_vrr_active) {
6559 /* Transition VRR active -> inactive:
6560 * Allow vblank irq disable again for fixed refresh rate.
6561 */
6562 dm_set_vupdate_irq(new_state->base.crtc, false);
6563 drm_crtc_vblank_put(new_state->base.crtc);
6564 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6565 __func__, new_state->base.crtc->base.id);
6566 }
6567 }
6568
6569 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6570 {
6571 struct drm_plane *plane;
6572 struct drm_plane_state *old_plane_state, *new_plane_state;
6573 int i;
6574
6575 /*
6576 * TODO: Make this per-stream so we don't issue redundant updates for
6577 * commits with multiple streams.
6578 */
6579 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6580 new_plane_state, i)
6581 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6582 handle_cursor_update(plane, old_plane_state);
6583 }
6584
6585 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6586 struct dc_state *dc_state,
6587 struct drm_device *dev,
6588 struct amdgpu_display_manager *dm,
6589 struct drm_crtc *pcrtc,
6590 bool wait_for_vblank)
6591 {
6592 uint32_t i;
6593 uint64_t timestamp_ns;
6594 struct drm_plane *plane;
6595 struct drm_plane_state *old_plane_state, *new_plane_state;
6596 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6597 struct drm_crtc_state *new_pcrtc_state =
6598 drm_atomic_get_new_crtc_state(state, pcrtc);
6599 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6600 struct dm_crtc_state *dm_old_crtc_state =
6601 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6602 int planes_count = 0, vpos, hpos;
6603 long r;
6604 unsigned long flags;
6605 struct amdgpu_bo *abo;
6606 uint64_t tiling_flags;
6607 bool tmz_surface = false;
6608 uint32_t target_vblank, last_flip_vblank;
6609 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6610 bool pflip_present = false;
6611 struct {
6612 struct dc_surface_update surface_updates[MAX_SURFACES];
6613 struct dc_plane_info plane_infos[MAX_SURFACES];
6614 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6615 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6616 struct dc_stream_update stream_update;
6617 } *bundle;
6618
6619 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6620
6621 if (!bundle) {
6622 dm_error("Failed to allocate update bundle\n");
6623 goto cleanup;
6624 }
6625
6626 /*
6627 * Disable the cursor first if we're disabling all the planes.
6628 * It'll remain on the screen after the planes are re-enabled
6629 * if we don't.
6630 */
6631 if (acrtc_state->active_planes == 0)
6632 amdgpu_dm_commit_cursors(state);
6633
6634 /* update planes when needed */
6635 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6636 struct drm_crtc *crtc = new_plane_state->crtc;
6637 struct drm_crtc_state *new_crtc_state;
6638 struct drm_framebuffer *fb = new_plane_state->fb;
6639 bool plane_needs_flip;
6640 struct dc_plane_state *dc_plane;
6641 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6642
6643 /* Cursor plane is handled after stream updates */
6644 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6645 continue;
6646
6647 if (!fb || !crtc || pcrtc != crtc)
6648 continue;
6649
6650 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6651 if (!new_crtc_state->active)
6652 continue;
6653
6654 dc_plane = dm_new_plane_state->dc_state;
6655
6656 bundle->surface_updates[planes_count].surface = dc_plane;
6657 if (new_pcrtc_state->color_mgmt_changed) {
6658 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6659 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6660 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6661 }
6662
6663 fill_dc_scaling_info(new_plane_state,
6664 &bundle->scaling_infos[planes_count]);
6665
6666 bundle->surface_updates[planes_count].scaling_info =
6667 &bundle->scaling_infos[planes_count];
6668
6669 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6670
6671 pflip_present = pflip_present || plane_needs_flip;
6672
6673 if (!plane_needs_flip) {
6674 planes_count += 1;
6675 continue;
6676 }
6677
6678 abo = gem_to_amdgpu_bo(fb->obj[0]);
6679
6680 /*
6681 * Wait for all fences on this FB. Do limited wait to avoid
6682 * deadlock during GPU reset when this fence will not signal
6683 * but we hold reservation lock for the BO.
6684 */
6685 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6686 false,
6687 msecs_to_jiffies(5000));
6688 if (unlikely(r <= 0))
6689 DRM_ERROR("Waiting for fences timed out!");
6690
6691 /*
6692 * TODO This might fail and hence better not used, wait
6693 * explicitly on fences instead
6694 * and in general should be called for
6695 * blocking commit to as per framework helpers
6696 */
6697 r = amdgpu_bo_reserve(abo, true);
6698 if (unlikely(r != 0))
6699 DRM_ERROR("failed to reserve buffer before flip\n");
6700
6701 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6702
6703 tmz_surface = amdgpu_bo_encrypted(abo);
6704
6705 amdgpu_bo_unreserve(abo);
6706
6707 fill_dc_plane_info_and_addr(
6708 dm->adev, new_plane_state, tiling_flags,
6709 &bundle->plane_infos[planes_count],
6710 &bundle->flip_addrs[planes_count].address,
6711 tmz_surface,
6712 false);
6713
6714 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6715 new_plane_state->plane->index,
6716 bundle->plane_infos[planes_count].dcc.enable);
6717
6718 bundle->surface_updates[planes_count].plane_info =
6719 &bundle->plane_infos[planes_count];
6720
6721 /*
6722 * Only allow immediate flips for fast updates that don't
6723 * change FB pitch, DCC state, rotation or mirroing.
6724 */
6725 bundle->flip_addrs[planes_count].flip_immediate =
6726 crtc->state->async_flip &&
6727 acrtc_state->update_type == UPDATE_TYPE_FAST;
6728
6729 timestamp_ns = ktime_get_ns();
6730 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6731 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6732 bundle->surface_updates[planes_count].surface = dc_plane;
6733
6734 if (!bundle->surface_updates[planes_count].surface) {
6735 DRM_ERROR("No surface for CRTC: id=%d\n",
6736 acrtc_attach->crtc_id);
6737 continue;
6738 }
6739
6740 if (plane == pcrtc->primary)
6741 update_freesync_state_on_stream(
6742 dm,
6743 acrtc_state,
6744 acrtc_state->stream,
6745 dc_plane,
6746 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6747
6748 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6749 __func__,
6750 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6751 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6752
6753 planes_count += 1;
6754
6755 }
6756
6757 if (pflip_present) {
6758 if (!vrr_active) {
6759 /* Use old throttling in non-vrr fixed refresh rate mode
6760 * to keep flip scheduling based on target vblank counts
6761 * working in a backwards compatible way, e.g., for
6762 * clients using the GLX_OML_sync_control extension or
6763 * DRI3/Present extension with defined target_msc.
6764 */
6765 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6766 }
6767 else {
6768 /* For variable refresh rate mode only:
6769 * Get vblank of last completed flip to avoid > 1 vrr
6770 * flips per video frame by use of throttling, but allow
6771 * flip programming anywhere in the possibly large
6772 * variable vrr vblank interval for fine-grained flip
6773 * timing control and more opportunity to avoid stutter
6774 * on late submission of flips.
6775 */
6776 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6777 last_flip_vblank = acrtc_attach->last_flip_vblank;
6778 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6779 }
6780
6781 target_vblank = last_flip_vblank + wait_for_vblank;
6782
6783 /*
6784 * Wait until we're out of the vertical blank period before the one
6785 * targeted by the flip
6786 */
6787 while ((acrtc_attach->enabled &&
6788 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6789 0, &vpos, &hpos, NULL,
6790 NULL, &pcrtc->hwmode)
6791 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6792 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6793 (int)(target_vblank -
6794 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6795 usleep_range(1000, 1100);
6796 }
6797
6798 if (acrtc_attach->base.state->event) {
6799 drm_crtc_vblank_get(pcrtc);
6800
6801 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6802
6803 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6804 prepare_flip_isr(acrtc_attach);
6805
6806 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6807 }
6808
6809 if (acrtc_state->stream) {
6810 if (acrtc_state->freesync_vrr_info_changed)
6811 bundle->stream_update.vrr_infopacket =
6812 &acrtc_state->stream->vrr_infopacket;
6813 }
6814 }
6815
6816 /* Update the planes if changed or disable if we don't have any. */
6817 if ((planes_count || acrtc_state->active_planes == 0) &&
6818 acrtc_state->stream) {
6819 bundle->stream_update.stream = acrtc_state->stream;
6820 if (new_pcrtc_state->mode_changed) {
6821 bundle->stream_update.src = acrtc_state->stream->src;
6822 bundle->stream_update.dst = acrtc_state->stream->dst;
6823 }
6824
6825 if (new_pcrtc_state->color_mgmt_changed) {
6826 /*
6827 * TODO: This isn't fully correct since we've actually
6828 * already modified the stream in place.
6829 */
6830 bundle->stream_update.gamut_remap =
6831 &acrtc_state->stream->gamut_remap_matrix;
6832 bundle->stream_update.output_csc_transform =
6833 &acrtc_state->stream->csc_color_matrix;
6834 bundle->stream_update.out_transfer_func =
6835 acrtc_state->stream->out_transfer_func;
6836 }
6837
6838 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6839 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6840 bundle->stream_update.abm_level = &acrtc_state->abm_level;
6841
6842 /*
6843 * If FreeSync state on the stream has changed then we need to
6844 * re-adjust the min/max bounds now that DC doesn't handle this
6845 * as part of commit.
6846 */
6847 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6848 amdgpu_dm_vrr_active(acrtc_state)) {
6849 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6850 dc_stream_adjust_vmin_vmax(
6851 dm->dc, acrtc_state->stream,
6852 &acrtc_state->vrr_params.adjust);
6853 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6854 }
6855 mutex_lock(&dm->dc_lock);
6856 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6857 acrtc_state->stream->link->psr_settings.psr_allow_active)
6858 amdgpu_dm_psr_disable(acrtc_state->stream);
6859
6860 dc_commit_updates_for_stream(dm->dc,
6861 bundle->surface_updates,
6862 planes_count,
6863 acrtc_state->stream,
6864 &bundle->stream_update,
6865 dc_state);
6866
6867 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6868 acrtc_state->stream->link->psr_settings.psr_version != PSR_VERSION_UNSUPPORTED &&
6869 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
6870 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6871 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6872 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
6873 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
6874 amdgpu_dm_psr_enable(acrtc_state->stream);
6875 }
6876
6877 mutex_unlock(&dm->dc_lock);
6878 }
6879
6880 /*
6881 * Update cursor state *after* programming all the planes.
6882 * This avoids redundant programming in the case where we're going
6883 * to be disabling a single plane - those pipes are being disabled.
6884 */
6885 if (acrtc_state->active_planes)
6886 amdgpu_dm_commit_cursors(state);
6887
6888 cleanup:
6889 kfree(bundle);
6890 }
6891
6892 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6893 struct drm_atomic_state *state)
6894 {
6895 struct amdgpu_device *adev = dev->dev_private;
6896 struct amdgpu_dm_connector *aconnector;
6897 struct drm_connector *connector;
6898 struct drm_connector_state *old_con_state, *new_con_state;
6899 struct drm_crtc_state *new_crtc_state;
6900 struct dm_crtc_state *new_dm_crtc_state;
6901 const struct dc_stream_status *status;
6902 int i, inst;
6903
6904 /* Notify device removals. */
6905 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6906 if (old_con_state->crtc != new_con_state->crtc) {
6907 /* CRTC changes require notification. */
6908 goto notify;
6909 }
6910
6911 if (!new_con_state->crtc)
6912 continue;
6913
6914 new_crtc_state = drm_atomic_get_new_crtc_state(
6915 state, new_con_state->crtc);
6916
6917 if (!new_crtc_state)
6918 continue;
6919
6920 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6921 continue;
6922
6923 notify:
6924 aconnector = to_amdgpu_dm_connector(connector);
6925
6926 mutex_lock(&adev->dm.audio_lock);
6927 inst = aconnector->audio_inst;
6928 aconnector->audio_inst = -1;
6929 mutex_unlock(&adev->dm.audio_lock);
6930
6931 amdgpu_dm_audio_eld_notify(adev, inst);
6932 }
6933
6934 /* Notify audio device additions. */
6935 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6936 if (!new_con_state->crtc)
6937 continue;
6938
6939 new_crtc_state = drm_atomic_get_new_crtc_state(
6940 state, new_con_state->crtc);
6941
6942 if (!new_crtc_state)
6943 continue;
6944
6945 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6946 continue;
6947
6948 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6949 if (!new_dm_crtc_state->stream)
6950 continue;
6951
6952 status = dc_stream_get_status(new_dm_crtc_state->stream);
6953 if (!status)
6954 continue;
6955
6956 aconnector = to_amdgpu_dm_connector(connector);
6957
6958 mutex_lock(&adev->dm.audio_lock);
6959 inst = status->audio_inst;
6960 aconnector->audio_inst = inst;
6961 mutex_unlock(&adev->dm.audio_lock);
6962
6963 amdgpu_dm_audio_eld_notify(adev, inst);
6964 }
6965 }
6966
6967 /*
6968 * Enable interrupts on CRTCs that are newly active, undergone
6969 * a modeset, or have active planes again.
6970 *
6971 * Done in two passes, based on the for_modeset flag:
6972 * Pass 1: For CRTCs going through modeset
6973 * Pass 2: For CRTCs going from 0 to n active planes
6974 *
6975 * Interrupts can only be enabled after the planes are programmed,
6976 * so this requires a two-pass approach since we don't want to
6977 * just defer the interrupts until after commit planes every time.
6978 */
6979 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6980 struct drm_atomic_state *state,
6981 bool for_modeset)
6982 {
6983 struct amdgpu_device *adev = dev->dev_private;
6984 struct drm_crtc *crtc;
6985 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6986 int i;
6987 #ifdef CONFIG_DEBUG_FS
6988 enum amdgpu_dm_pipe_crc_source source;
6989 #endif
6990
6991 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6992 new_crtc_state, i) {
6993 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6994 struct dm_crtc_state *dm_new_crtc_state =
6995 to_dm_crtc_state(new_crtc_state);
6996 struct dm_crtc_state *dm_old_crtc_state =
6997 to_dm_crtc_state(old_crtc_state);
6998 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6999 bool run_pass;
7000
7001 run_pass = (for_modeset && modeset) ||
7002 (!for_modeset && !modeset &&
7003 !dm_old_crtc_state->interrupts_enabled);
7004
7005 if (!run_pass)
7006 continue;
7007
7008 if (!dm_new_crtc_state->interrupts_enabled)
7009 continue;
7010
7011 manage_dm_interrupts(adev, acrtc, true);
7012
7013 #ifdef CONFIG_DEBUG_FS
7014 /* The stream has changed so CRC capture needs to re-enabled. */
7015 source = dm_new_crtc_state->crc_src;
7016 if (amdgpu_dm_is_valid_crc_source(source)) {
7017 amdgpu_dm_crtc_configure_crc_source(
7018 crtc, dm_new_crtc_state,
7019 dm_new_crtc_state->crc_src);
7020 }
7021 #endif
7022 }
7023 }
7024
7025 /*
7026 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7027 * @crtc_state: the DRM CRTC state
7028 * @stream_state: the DC stream state.
7029 *
7030 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7031 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7032 */
7033 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7034 struct dc_stream_state *stream_state)
7035 {
7036 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7037 }
7038
7039 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7040 struct drm_atomic_state *state,
7041 bool nonblock)
7042 {
7043 struct drm_crtc *crtc;
7044 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7045 struct amdgpu_device *adev = dev->dev_private;
7046 int i;
7047
7048 /*
7049 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7050 * a modeset, being disabled, or have no active planes.
7051 *
7052 * It's done in atomic commit rather than commit tail for now since
7053 * some of these interrupt handlers access the current CRTC state and
7054 * potentially the stream pointer itself.
7055 *
7056 * Since the atomic state is swapped within atomic commit and not within
7057 * commit tail this would leave to new state (that hasn't been committed yet)
7058 * being accesssed from within the handlers.
7059 *
7060 * TODO: Fix this so we can do this in commit tail and not have to block
7061 * in atomic check.
7062 */
7063 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7064 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7065 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7066 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7067
7068 if (dm_old_crtc_state->interrupts_enabled &&
7069 (!dm_new_crtc_state->interrupts_enabled ||
7070 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7071 manage_dm_interrupts(adev, acrtc, false);
7072 }
7073 /*
7074 * Add check here for SoC's that support hardware cursor plane, to
7075 * unset legacy_cursor_update
7076 */
7077
7078 return drm_atomic_helper_commit(dev, state, nonblock);
7079
7080 /*TODO Handle EINTR, reenable IRQ*/
7081 }
7082
7083 /**
7084 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7085 * @state: The atomic state to commit
7086 *
7087 * This will tell DC to commit the constructed DC state from atomic_check,
7088 * programming the hardware. Any failures here implies a hardware failure, since
7089 * atomic check should have filtered anything non-kosher.
7090 */
7091 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7092 {
7093 struct drm_device *dev = state->dev;
7094 struct amdgpu_device *adev = dev->dev_private;
7095 struct amdgpu_display_manager *dm = &adev->dm;
7096 struct dm_atomic_state *dm_state;
7097 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7098 uint32_t i, j;
7099 struct drm_crtc *crtc;
7100 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7101 unsigned long flags;
7102 bool wait_for_vblank = true;
7103 struct drm_connector *connector;
7104 struct drm_connector_state *old_con_state, *new_con_state;
7105 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7106 int crtc_disable_count = 0;
7107
7108 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7109
7110 dm_state = dm_atomic_get_new_state(state);
7111 if (dm_state && dm_state->context) {
7112 dc_state = dm_state->context;
7113 } else {
7114 /* No state changes, retain current state. */
7115 dc_state_temp = dc_create_state(dm->dc);
7116 ASSERT(dc_state_temp);
7117 dc_state = dc_state_temp;
7118 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7119 }
7120
7121 /* update changed items */
7122 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7123 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7124
7125 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7126 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7127
7128 DRM_DEBUG_DRIVER(
7129 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7130 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7131 "connectors_changed:%d\n",
7132 acrtc->crtc_id,
7133 new_crtc_state->enable,
7134 new_crtc_state->active,
7135 new_crtc_state->planes_changed,
7136 new_crtc_state->mode_changed,
7137 new_crtc_state->active_changed,
7138 new_crtc_state->connectors_changed);
7139
7140 /* Copy all transient state flags into dc state */
7141 if (dm_new_crtc_state->stream) {
7142 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7143 dm_new_crtc_state->stream);
7144 }
7145
7146 /* handles headless hotplug case, updating new_state and
7147 * aconnector as needed
7148 */
7149
7150 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7151
7152 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7153
7154 if (!dm_new_crtc_state->stream) {
7155 /*
7156 * this could happen because of issues with
7157 * userspace notifications delivery.
7158 * In this case userspace tries to set mode on
7159 * display which is disconnected in fact.
7160 * dc_sink is NULL in this case on aconnector.
7161 * We expect reset mode will come soon.
7162 *
7163 * This can also happen when unplug is done
7164 * during resume sequence ended
7165 *
7166 * In this case, we want to pretend we still
7167 * have a sink to keep the pipe running so that
7168 * hw state is consistent with the sw state
7169 */
7170 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7171 __func__, acrtc->base.base.id);
7172 continue;
7173 }
7174
7175 if (dm_old_crtc_state->stream)
7176 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7177
7178 pm_runtime_get_noresume(dev->dev);
7179
7180 acrtc->enabled = true;
7181 acrtc->hw_mode = new_crtc_state->mode;
7182 crtc->hwmode = new_crtc_state->mode;
7183 } else if (modereset_required(new_crtc_state)) {
7184 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7185 /* i.e. reset mode */
7186 if (dm_old_crtc_state->stream) {
7187 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7188 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7189
7190 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7191 }
7192 }
7193 } /* for_each_crtc_in_state() */
7194
7195 if (dc_state) {
7196 dm_enable_per_frame_crtc_master_sync(dc_state);
7197 mutex_lock(&dm->dc_lock);
7198 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7199 mutex_unlock(&dm->dc_lock);
7200 }
7201
7202 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7203 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7204
7205 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7206
7207 if (dm_new_crtc_state->stream != NULL) {
7208 const struct dc_stream_status *status =
7209 dc_stream_get_status(dm_new_crtc_state->stream);
7210
7211 if (!status)
7212 status = dc_stream_get_status_from_state(dc_state,
7213 dm_new_crtc_state->stream);
7214
7215 if (!status)
7216 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7217 else
7218 acrtc->otg_inst = status->primary_otg_inst;
7219 }
7220 }
7221 #ifdef CONFIG_DRM_AMD_DC_HDCP
7222 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7223 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7224 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7225 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7226
7227 new_crtc_state = NULL;
7228
7229 if (acrtc)
7230 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7231
7232 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7233
7234 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7235 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7236 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7237 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7238 continue;
7239 }
7240
7241 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7242 hdcp_update_display(
7243 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7244 new_con_state->hdcp_content_type,
7245 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7246 : false);
7247 }
7248 #endif
7249
7250 /* Handle connector state changes */
7251 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7252 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7253 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7254 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7255 struct dc_surface_update dummy_updates[MAX_SURFACES];
7256 struct dc_stream_update stream_update;
7257 struct dc_info_packet hdr_packet;
7258 struct dc_stream_status *status = NULL;
7259 bool abm_changed, hdr_changed, scaling_changed;
7260
7261 memset(&dummy_updates, 0, sizeof(dummy_updates));
7262 memset(&stream_update, 0, sizeof(stream_update));
7263
7264 if (acrtc) {
7265 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7266 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7267 }
7268
7269 /* Skip any modesets/resets */
7270 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7271 continue;
7272
7273 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7274 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7275
7276 scaling_changed = is_scaling_state_different(dm_new_con_state,
7277 dm_old_con_state);
7278
7279 abm_changed = dm_new_crtc_state->abm_level !=
7280 dm_old_crtc_state->abm_level;
7281
7282 hdr_changed =
7283 is_hdr_metadata_different(old_con_state, new_con_state);
7284
7285 if (!scaling_changed && !abm_changed && !hdr_changed)
7286 continue;
7287
7288 stream_update.stream = dm_new_crtc_state->stream;
7289 if (scaling_changed) {
7290 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7291 dm_new_con_state, dm_new_crtc_state->stream);
7292
7293 stream_update.src = dm_new_crtc_state->stream->src;
7294 stream_update.dst = dm_new_crtc_state->stream->dst;
7295 }
7296
7297 if (abm_changed) {
7298 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7299
7300 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7301 }
7302
7303 if (hdr_changed) {
7304 fill_hdr_info_packet(new_con_state, &hdr_packet);
7305 stream_update.hdr_static_metadata = &hdr_packet;
7306 }
7307
7308 status = dc_stream_get_status(dm_new_crtc_state->stream);
7309 WARN_ON(!status);
7310 WARN_ON(!status->plane_count);
7311
7312 /*
7313 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7314 * Here we create an empty update on each plane.
7315 * To fix this, DC should permit updating only stream properties.
7316 */
7317 for (j = 0; j < status->plane_count; j++)
7318 dummy_updates[j].surface = status->plane_states[0];
7319
7320
7321 mutex_lock(&dm->dc_lock);
7322 dc_commit_updates_for_stream(dm->dc,
7323 dummy_updates,
7324 status->plane_count,
7325 dm_new_crtc_state->stream,
7326 &stream_update,
7327 dc_state);
7328 mutex_unlock(&dm->dc_lock);
7329 }
7330
7331 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7332 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7333 new_crtc_state, i) {
7334 if (old_crtc_state->active && !new_crtc_state->active)
7335 crtc_disable_count++;
7336
7337 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7338 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7339
7340 /* Update freesync active state. */
7341 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7342
7343 /* Handle vrr on->off / off->on transitions */
7344 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7345 dm_new_crtc_state);
7346 }
7347
7348 /* Enable interrupts for CRTCs going through a modeset. */
7349 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7350
7351 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7352 if (new_crtc_state->async_flip)
7353 wait_for_vblank = false;
7354
7355 /* update planes when needed per crtc*/
7356 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7357 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7358
7359 if (dm_new_crtc_state->stream)
7360 amdgpu_dm_commit_planes(state, dc_state, dev,
7361 dm, crtc, wait_for_vblank);
7362 }
7363
7364 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7365 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7366
7367 /* Update audio instances for each connector. */
7368 amdgpu_dm_commit_audio(dev, state);
7369
7370 /*
7371 * send vblank event on all events not handled in flip and
7372 * mark consumed event for drm_atomic_helper_commit_hw_done
7373 */
7374 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7375 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7376
7377 if (new_crtc_state->event)
7378 drm_send_event_locked(dev, &new_crtc_state->event->base);
7379
7380 new_crtc_state->event = NULL;
7381 }
7382 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7383
7384 /* Signal HW programming completion */
7385 drm_atomic_helper_commit_hw_done(state);
7386
7387 if (wait_for_vblank)
7388 drm_atomic_helper_wait_for_flip_done(dev, state);
7389
7390 drm_atomic_helper_cleanup_planes(dev, state);
7391
7392 /*
7393 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7394 * so we can put the GPU into runtime suspend if we're not driving any
7395 * displays anymore
7396 */
7397 for (i = 0; i < crtc_disable_count; i++)
7398 pm_runtime_put_autosuspend(dev->dev);
7399 pm_runtime_mark_last_busy(dev->dev);
7400
7401 if (dc_state_temp)
7402 dc_release_state(dc_state_temp);
7403 }
7404
7405
7406 static int dm_force_atomic_commit(struct drm_connector *connector)
7407 {
7408 int ret = 0;
7409 struct drm_device *ddev = connector->dev;
7410 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7411 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7412 struct drm_plane *plane = disconnected_acrtc->base.primary;
7413 struct drm_connector_state *conn_state;
7414 struct drm_crtc_state *crtc_state;
7415 struct drm_plane_state *plane_state;
7416
7417 if (!state)
7418 return -ENOMEM;
7419
7420 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7421
7422 /* Construct an atomic state to restore previous display setting */
7423
7424 /*
7425 * Attach connectors to drm_atomic_state
7426 */
7427 conn_state = drm_atomic_get_connector_state(state, connector);
7428
7429 ret = PTR_ERR_OR_ZERO(conn_state);
7430 if (ret)
7431 goto err;
7432
7433 /* Attach crtc to drm_atomic_state*/
7434 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7435
7436 ret = PTR_ERR_OR_ZERO(crtc_state);
7437 if (ret)
7438 goto err;
7439
7440 /* force a restore */
7441 crtc_state->mode_changed = true;
7442
7443 /* Attach plane to drm_atomic_state */
7444 plane_state = drm_atomic_get_plane_state(state, plane);
7445
7446 ret = PTR_ERR_OR_ZERO(plane_state);
7447 if (ret)
7448 goto err;
7449
7450
7451 /* Call commit internally with the state we just constructed */
7452 ret = drm_atomic_commit(state);
7453 if (!ret)
7454 return 0;
7455
7456 err:
7457 DRM_ERROR("Restoring old state failed with %i\n", ret);
7458 drm_atomic_state_put(state);
7459
7460 return ret;
7461 }
7462
7463 /*
7464 * This function handles all cases when set mode does not come upon hotplug.
7465 * This includes when a display is unplugged then plugged back into the
7466 * same port and when running without usermode desktop manager supprot
7467 */
7468 void dm_restore_drm_connector_state(struct drm_device *dev,
7469 struct drm_connector *connector)
7470 {
7471 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7472 struct amdgpu_crtc *disconnected_acrtc;
7473 struct dm_crtc_state *acrtc_state;
7474
7475 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7476 return;
7477
7478 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7479 if (!disconnected_acrtc)
7480 return;
7481
7482 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7483 if (!acrtc_state->stream)
7484 return;
7485
7486 /*
7487 * If the previous sink is not released and different from the current,
7488 * we deduce we are in a state where we can not rely on usermode call
7489 * to turn on the display, so we do it here
7490 */
7491 if (acrtc_state->stream->sink != aconnector->dc_sink)
7492 dm_force_atomic_commit(&aconnector->base);
7493 }
7494
7495 /*
7496 * Grabs all modesetting locks to serialize against any blocking commits,
7497 * Waits for completion of all non blocking commits.
7498 */
7499 static int do_aquire_global_lock(struct drm_device *dev,
7500 struct drm_atomic_state *state)
7501 {
7502 struct drm_crtc *crtc;
7503 struct drm_crtc_commit *commit;
7504 long ret;
7505
7506 /*
7507 * Adding all modeset locks to aquire_ctx will
7508 * ensure that when the framework release it the
7509 * extra locks we are locking here will get released to
7510 */
7511 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7512 if (ret)
7513 return ret;
7514
7515 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7516 spin_lock(&crtc->commit_lock);
7517 commit = list_first_entry_or_null(&crtc->commit_list,
7518 struct drm_crtc_commit, commit_entry);
7519 if (commit)
7520 drm_crtc_commit_get(commit);
7521 spin_unlock(&crtc->commit_lock);
7522
7523 if (!commit)
7524 continue;
7525
7526 /*
7527 * Make sure all pending HW programming completed and
7528 * page flips done
7529 */
7530 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7531
7532 if (ret > 0)
7533 ret = wait_for_completion_interruptible_timeout(
7534 &commit->flip_done, 10*HZ);
7535
7536 if (ret == 0)
7537 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7538 "timed out\n", crtc->base.id, crtc->name);
7539
7540 drm_crtc_commit_put(commit);
7541 }
7542
7543 return ret < 0 ? ret : 0;
7544 }
7545
7546 static void get_freesync_config_for_crtc(
7547 struct dm_crtc_state *new_crtc_state,
7548 struct dm_connector_state *new_con_state)
7549 {
7550 struct mod_freesync_config config = {0};
7551 struct amdgpu_dm_connector *aconnector =
7552 to_amdgpu_dm_connector(new_con_state->base.connector);
7553 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7554 int vrefresh = drm_mode_vrefresh(mode);
7555
7556 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7557 vrefresh >= aconnector->min_vfreq &&
7558 vrefresh <= aconnector->max_vfreq;
7559
7560 if (new_crtc_state->vrr_supported) {
7561 new_crtc_state->stream->ignore_msa_timing_param = true;
7562 config.state = new_crtc_state->base.vrr_enabled ?
7563 VRR_STATE_ACTIVE_VARIABLE :
7564 VRR_STATE_INACTIVE;
7565 config.min_refresh_in_uhz =
7566 aconnector->min_vfreq * 1000000;
7567 config.max_refresh_in_uhz =
7568 aconnector->max_vfreq * 1000000;
7569 config.vsif_supported = true;
7570 config.btr = true;
7571 }
7572
7573 new_crtc_state->freesync_config = config;
7574 }
7575
7576 static void reset_freesync_config_for_crtc(
7577 struct dm_crtc_state *new_crtc_state)
7578 {
7579 new_crtc_state->vrr_supported = false;
7580
7581 memset(&new_crtc_state->vrr_params, 0,
7582 sizeof(new_crtc_state->vrr_params));
7583 memset(&new_crtc_state->vrr_infopacket, 0,
7584 sizeof(new_crtc_state->vrr_infopacket));
7585 }
7586
7587 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7588 struct drm_atomic_state *state,
7589 struct drm_crtc *crtc,
7590 struct drm_crtc_state *old_crtc_state,
7591 struct drm_crtc_state *new_crtc_state,
7592 bool enable,
7593 bool *lock_and_validation_needed)
7594 {
7595 struct dm_atomic_state *dm_state = NULL;
7596 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7597 struct dc_stream_state *new_stream;
7598 int ret = 0;
7599
7600 /*
7601 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7602 * update changed items
7603 */
7604 struct amdgpu_crtc *acrtc = NULL;
7605 struct amdgpu_dm_connector *aconnector = NULL;
7606 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7607 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7608
7609 new_stream = NULL;
7610
7611 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7612 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7613 acrtc = to_amdgpu_crtc(crtc);
7614 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7615
7616 /* TODO This hack should go away */
7617 if (aconnector && enable) {
7618 /* Make sure fake sink is created in plug-in scenario */
7619 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7620 &aconnector->base);
7621 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7622 &aconnector->base);
7623
7624 if (IS_ERR(drm_new_conn_state)) {
7625 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7626 goto fail;
7627 }
7628
7629 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7630 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7631
7632 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7633 goto skip_modeset;
7634
7635 new_stream = create_stream_for_sink(aconnector,
7636 &new_crtc_state->mode,
7637 dm_new_conn_state,
7638 dm_old_crtc_state->stream);
7639
7640 /*
7641 * we can have no stream on ACTION_SET if a display
7642 * was disconnected during S3, in this case it is not an
7643 * error, the OS will be updated after detection, and
7644 * will do the right thing on next atomic commit
7645 */
7646
7647 if (!new_stream) {
7648 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7649 __func__, acrtc->base.base.id);
7650 ret = -ENOMEM;
7651 goto fail;
7652 }
7653
7654 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7655
7656 ret = fill_hdr_info_packet(drm_new_conn_state,
7657 &new_stream->hdr_static_metadata);
7658 if (ret)
7659 goto fail;
7660
7661 /*
7662 * If we already removed the old stream from the context
7663 * (and set the new stream to NULL) then we can't reuse
7664 * the old stream even if the stream and scaling are unchanged.
7665 * We'll hit the BUG_ON and black screen.
7666 *
7667 * TODO: Refactor this function to allow this check to work
7668 * in all conditions.
7669 */
7670 if (dm_new_crtc_state->stream &&
7671 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7672 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7673 new_crtc_state->mode_changed = false;
7674 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7675 new_crtc_state->mode_changed);
7676 }
7677 }
7678
7679 /* mode_changed flag may get updated above, need to check again */
7680 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7681 goto skip_modeset;
7682
7683 DRM_DEBUG_DRIVER(
7684 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7685 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7686 "connectors_changed:%d\n",
7687 acrtc->crtc_id,
7688 new_crtc_state->enable,
7689 new_crtc_state->active,
7690 new_crtc_state->planes_changed,
7691 new_crtc_state->mode_changed,
7692 new_crtc_state->active_changed,
7693 new_crtc_state->connectors_changed);
7694
7695 /* Remove stream for any changed/disabled CRTC */
7696 if (!enable) {
7697
7698 if (!dm_old_crtc_state->stream)
7699 goto skip_modeset;
7700
7701 ret = dm_atomic_get_state(state, &dm_state);
7702 if (ret)
7703 goto fail;
7704
7705 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7706 crtc->base.id);
7707
7708 /* i.e. reset mode */
7709 if (dc_remove_stream_from_ctx(
7710 dm->dc,
7711 dm_state->context,
7712 dm_old_crtc_state->stream) != DC_OK) {
7713 ret = -EINVAL;
7714 goto fail;
7715 }
7716
7717 dc_stream_release(dm_old_crtc_state->stream);
7718 dm_new_crtc_state->stream = NULL;
7719
7720 reset_freesync_config_for_crtc(dm_new_crtc_state);
7721
7722 *lock_and_validation_needed = true;
7723
7724 } else {/* Add stream for any updated/enabled CRTC */
7725 /*
7726 * Quick fix to prevent NULL pointer on new_stream when
7727 * added MST connectors not found in existing crtc_state in the chained mode
7728 * TODO: need to dig out the root cause of that
7729 */
7730 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7731 goto skip_modeset;
7732
7733 if (modereset_required(new_crtc_state))
7734 goto skip_modeset;
7735
7736 if (modeset_required(new_crtc_state, new_stream,
7737 dm_old_crtc_state->stream)) {
7738
7739 WARN_ON(dm_new_crtc_state->stream);
7740
7741 ret = dm_atomic_get_state(state, &dm_state);
7742 if (ret)
7743 goto fail;
7744
7745 dm_new_crtc_state->stream = new_stream;
7746
7747 dc_stream_retain(new_stream);
7748
7749 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7750 crtc->base.id);
7751
7752 if (dc_add_stream_to_ctx(
7753 dm->dc,
7754 dm_state->context,
7755 dm_new_crtc_state->stream) != DC_OK) {
7756 ret = -EINVAL;
7757 goto fail;
7758 }
7759
7760 *lock_and_validation_needed = true;
7761 }
7762 }
7763
7764 skip_modeset:
7765 /* Release extra reference */
7766 if (new_stream)
7767 dc_stream_release(new_stream);
7768
7769 /*
7770 * We want to do dc stream updates that do not require a
7771 * full modeset below.
7772 */
7773 if (!(enable && aconnector && new_crtc_state->enable &&
7774 new_crtc_state->active))
7775 return 0;
7776 /*
7777 * Given above conditions, the dc state cannot be NULL because:
7778 * 1. We're in the process of enabling CRTCs (just been added
7779 * to the dc context, or already is on the context)
7780 * 2. Has a valid connector attached, and
7781 * 3. Is currently active and enabled.
7782 * => The dc stream state currently exists.
7783 */
7784 BUG_ON(dm_new_crtc_state->stream == NULL);
7785
7786 /* Scaling or underscan settings */
7787 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7788 update_stream_scaling_settings(
7789 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7790
7791 /* ABM settings */
7792 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7793
7794 /*
7795 * Color management settings. We also update color properties
7796 * when a modeset is needed, to ensure it gets reprogrammed.
7797 */
7798 if (dm_new_crtc_state->base.color_mgmt_changed ||
7799 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7800 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7801 if (ret)
7802 goto fail;
7803 }
7804
7805 /* Update Freesync settings. */
7806 get_freesync_config_for_crtc(dm_new_crtc_state,
7807 dm_new_conn_state);
7808
7809 return ret;
7810
7811 fail:
7812 if (new_stream)
7813 dc_stream_release(new_stream);
7814 return ret;
7815 }
7816
7817 static bool should_reset_plane(struct drm_atomic_state *state,
7818 struct drm_plane *plane,
7819 struct drm_plane_state *old_plane_state,
7820 struct drm_plane_state *new_plane_state)
7821 {
7822 struct drm_plane *other;
7823 struct drm_plane_state *old_other_state, *new_other_state;
7824 struct drm_crtc_state *new_crtc_state;
7825 int i;
7826
7827 /*
7828 * TODO: Remove this hack once the checks below are sufficient
7829 * enough to determine when we need to reset all the planes on
7830 * the stream.
7831 */
7832 if (state->allow_modeset)
7833 return true;
7834
7835 /* Exit early if we know that we're adding or removing the plane. */
7836 if (old_plane_state->crtc != new_plane_state->crtc)
7837 return true;
7838
7839 /* old crtc == new_crtc == NULL, plane not in context. */
7840 if (!new_plane_state->crtc)
7841 return false;
7842
7843 new_crtc_state =
7844 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7845
7846 if (!new_crtc_state)
7847 return true;
7848
7849 /* CRTC Degamma changes currently require us to recreate planes. */
7850 if (new_crtc_state->color_mgmt_changed)
7851 return true;
7852
7853 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7854 return true;
7855
7856 /*
7857 * If there are any new primary or overlay planes being added or
7858 * removed then the z-order can potentially change. To ensure
7859 * correct z-order and pipe acquisition the current DC architecture
7860 * requires us to remove and recreate all existing planes.
7861 *
7862 * TODO: Come up with a more elegant solution for this.
7863 */
7864 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7865 if (other->type == DRM_PLANE_TYPE_CURSOR)
7866 continue;
7867
7868 if (old_other_state->crtc != new_plane_state->crtc &&
7869 new_other_state->crtc != new_plane_state->crtc)
7870 continue;
7871
7872 if (old_other_state->crtc != new_other_state->crtc)
7873 return true;
7874
7875 /* TODO: Remove this once we can handle fast format changes. */
7876 if (old_other_state->fb && new_other_state->fb &&
7877 old_other_state->fb->format != new_other_state->fb->format)
7878 return true;
7879 }
7880
7881 return false;
7882 }
7883
7884 static int dm_update_plane_state(struct dc *dc,
7885 struct drm_atomic_state *state,
7886 struct drm_plane *plane,
7887 struct drm_plane_state *old_plane_state,
7888 struct drm_plane_state *new_plane_state,
7889 bool enable,
7890 bool *lock_and_validation_needed)
7891 {
7892
7893 struct dm_atomic_state *dm_state = NULL;
7894 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7895 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7896 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7897 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7898 bool needs_reset;
7899 int ret = 0;
7900
7901
7902 new_plane_crtc = new_plane_state->crtc;
7903 old_plane_crtc = old_plane_state->crtc;
7904 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7905 dm_old_plane_state = to_dm_plane_state(old_plane_state);
7906
7907 /*TODO Implement atomic check for cursor plane */
7908 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7909 return 0;
7910
7911 needs_reset = should_reset_plane(state, plane, old_plane_state,
7912 new_plane_state);
7913
7914 /* Remove any changed/removed planes */
7915 if (!enable) {
7916 if (!needs_reset)
7917 return 0;
7918
7919 if (!old_plane_crtc)
7920 return 0;
7921
7922 old_crtc_state = drm_atomic_get_old_crtc_state(
7923 state, old_plane_crtc);
7924 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7925
7926 if (!dm_old_crtc_state->stream)
7927 return 0;
7928
7929 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7930 plane->base.id, old_plane_crtc->base.id);
7931
7932 ret = dm_atomic_get_state(state, &dm_state);
7933 if (ret)
7934 return ret;
7935
7936 if (!dc_remove_plane_from_context(
7937 dc,
7938 dm_old_crtc_state->stream,
7939 dm_old_plane_state->dc_state,
7940 dm_state->context)) {
7941
7942 ret = EINVAL;
7943 return ret;
7944 }
7945
7946
7947 dc_plane_state_release(dm_old_plane_state->dc_state);
7948 dm_new_plane_state->dc_state = NULL;
7949
7950 *lock_and_validation_needed = true;
7951
7952 } else { /* Add new planes */
7953 struct dc_plane_state *dc_new_plane_state;
7954
7955 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7956 return 0;
7957
7958 if (!new_plane_crtc)
7959 return 0;
7960
7961 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7962 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7963
7964 if (!dm_new_crtc_state->stream)
7965 return 0;
7966
7967 if (!needs_reset)
7968 return 0;
7969
7970 WARN_ON(dm_new_plane_state->dc_state);
7971
7972 dc_new_plane_state = dc_create_plane_state(dc);
7973 if (!dc_new_plane_state)
7974 return -ENOMEM;
7975
7976 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7977 plane->base.id, new_plane_crtc->base.id);
7978
7979 ret = fill_dc_plane_attributes(
7980 new_plane_crtc->dev->dev_private,
7981 dc_new_plane_state,
7982 new_plane_state,
7983 new_crtc_state);
7984 if (ret) {
7985 dc_plane_state_release(dc_new_plane_state);
7986 return ret;
7987 }
7988
7989 ret = dm_atomic_get_state(state, &dm_state);
7990 if (ret) {
7991 dc_plane_state_release(dc_new_plane_state);
7992 return ret;
7993 }
7994
7995 /*
7996 * Any atomic check errors that occur after this will
7997 * not need a release. The plane state will be attached
7998 * to the stream, and therefore part of the atomic
7999 * state. It'll be released when the atomic state is
8000 * cleaned.
8001 */
8002 if (!dc_add_plane_to_context(
8003 dc,
8004 dm_new_crtc_state->stream,
8005 dc_new_plane_state,
8006 dm_state->context)) {
8007
8008 dc_plane_state_release(dc_new_plane_state);
8009 return -EINVAL;
8010 }
8011
8012 dm_new_plane_state->dc_state = dc_new_plane_state;
8013
8014 /* Tell DC to do a full surface update every time there
8015 * is a plane change. Inefficient, but works for now.
8016 */
8017 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8018
8019 *lock_and_validation_needed = true;
8020 }
8021
8022
8023 return ret;
8024 }
8025
8026 static int
8027 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8028 struct drm_atomic_state *state,
8029 enum surface_update_type *out_type)
8030 {
8031 struct dc *dc = dm->dc;
8032 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8033 int i, j, num_plane, ret = 0;
8034 struct drm_plane_state *old_plane_state, *new_plane_state;
8035 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8036 struct drm_crtc *new_plane_crtc;
8037 struct drm_plane *plane;
8038
8039 struct drm_crtc *crtc;
8040 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8041 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8042 struct dc_stream_status *status = NULL;
8043 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8044 struct surface_info_bundle {
8045 struct dc_surface_update surface_updates[MAX_SURFACES];
8046 struct dc_plane_info plane_infos[MAX_SURFACES];
8047 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8048 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8049 struct dc_stream_update stream_update;
8050 } *bundle;
8051
8052 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8053
8054 if (!bundle) {
8055 DRM_ERROR("Failed to allocate update bundle\n");
8056 /* Set type to FULL to avoid crashing in DC*/
8057 update_type = UPDATE_TYPE_FULL;
8058 goto cleanup;
8059 }
8060
8061 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8062
8063 memset(bundle, 0, sizeof(struct surface_info_bundle));
8064
8065 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8066 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8067 num_plane = 0;
8068
8069 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8070 update_type = UPDATE_TYPE_FULL;
8071 goto cleanup;
8072 }
8073
8074 if (!new_dm_crtc_state->stream)
8075 continue;
8076
8077 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8078 const struct amdgpu_framebuffer *amdgpu_fb =
8079 to_amdgpu_framebuffer(new_plane_state->fb);
8080 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8081 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8082 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8083 uint64_t tiling_flags;
8084 bool tmz_surface = false;
8085
8086 new_plane_crtc = new_plane_state->crtc;
8087 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8088 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8089
8090 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8091 continue;
8092
8093 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8094 update_type = UPDATE_TYPE_FULL;
8095 goto cleanup;
8096 }
8097
8098 if (crtc != new_plane_crtc)
8099 continue;
8100
8101 bundle->surface_updates[num_plane].surface =
8102 new_dm_plane_state->dc_state;
8103
8104 if (new_crtc_state->mode_changed) {
8105 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8106 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8107 }
8108
8109 if (new_crtc_state->color_mgmt_changed) {
8110 bundle->surface_updates[num_plane].gamma =
8111 new_dm_plane_state->dc_state->gamma_correction;
8112 bundle->surface_updates[num_plane].in_transfer_func =
8113 new_dm_plane_state->dc_state->in_transfer_func;
8114 bundle->surface_updates[num_plane].gamut_remap_matrix =
8115 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8116 bundle->stream_update.gamut_remap =
8117 &new_dm_crtc_state->stream->gamut_remap_matrix;
8118 bundle->stream_update.output_csc_transform =
8119 &new_dm_crtc_state->stream->csc_color_matrix;
8120 bundle->stream_update.out_transfer_func =
8121 new_dm_crtc_state->stream->out_transfer_func;
8122 }
8123
8124 ret = fill_dc_scaling_info(new_plane_state,
8125 scaling_info);
8126 if (ret)
8127 goto cleanup;
8128
8129 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8130
8131 if (amdgpu_fb) {
8132 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8133 if (ret)
8134 goto cleanup;
8135
8136 ret = fill_dc_plane_info_and_addr(
8137 dm->adev, new_plane_state, tiling_flags,
8138 plane_info,
8139 &flip_addr->address, tmz_surface,
8140 false);
8141 if (ret)
8142 goto cleanup;
8143
8144 bundle->surface_updates[num_plane].plane_info = plane_info;
8145 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8146 }
8147
8148 num_plane++;
8149 }
8150
8151 if (num_plane == 0)
8152 continue;
8153
8154 ret = dm_atomic_get_state(state, &dm_state);
8155 if (ret)
8156 goto cleanup;
8157
8158 old_dm_state = dm_atomic_get_old_state(state);
8159 if (!old_dm_state) {
8160 ret = -EINVAL;
8161 goto cleanup;
8162 }
8163
8164 status = dc_stream_get_status_from_state(old_dm_state->context,
8165 new_dm_crtc_state->stream);
8166 bundle->stream_update.stream = new_dm_crtc_state->stream;
8167 /*
8168 * TODO: DC modifies the surface during this call so we need
8169 * to lock here - find a way to do this without locking.
8170 */
8171 mutex_lock(&dm->dc_lock);
8172 update_type = dc_check_update_surfaces_for_stream(
8173 dc, bundle->surface_updates, num_plane,
8174 &bundle->stream_update, status);
8175 mutex_unlock(&dm->dc_lock);
8176
8177 if (update_type > UPDATE_TYPE_MED) {
8178 update_type = UPDATE_TYPE_FULL;
8179 goto cleanup;
8180 }
8181 }
8182
8183 cleanup:
8184 kfree(bundle);
8185
8186 *out_type = update_type;
8187 return ret;
8188 }
8189
8190 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8191 {
8192 struct drm_connector *connector;
8193 struct drm_connector_state *conn_state;
8194 struct amdgpu_dm_connector *aconnector = NULL;
8195 int i;
8196 for_each_new_connector_in_state(state, connector, conn_state, i) {
8197 if (conn_state->crtc != crtc)
8198 continue;
8199
8200 aconnector = to_amdgpu_dm_connector(connector);
8201 if (!aconnector->port || !aconnector->mst_port)
8202 aconnector = NULL;
8203 else
8204 break;
8205 }
8206
8207 if (!aconnector)
8208 return 0;
8209
8210 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8211 }
8212
8213 /**
8214 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8215 * @dev: The DRM device
8216 * @state: The atomic state to commit
8217 *
8218 * Validate that the given atomic state is programmable by DC into hardware.
8219 * This involves constructing a &struct dc_state reflecting the new hardware
8220 * state we wish to commit, then querying DC to see if it is programmable. It's
8221 * important not to modify the existing DC state. Otherwise, atomic_check
8222 * may unexpectedly commit hardware changes.
8223 *
8224 * When validating the DC state, it's important that the right locks are
8225 * acquired. For full updates case which removes/adds/updates streams on one
8226 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8227 * that any such full update commit will wait for completion of any outstanding
8228 * flip using DRMs synchronization events. See
8229 * dm_determine_update_type_for_commit()
8230 *
8231 * Note that DM adds the affected connectors for all CRTCs in state, when that
8232 * might not seem necessary. This is because DC stream creation requires the
8233 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8234 * be possible but non-trivial - a possible TODO item.
8235 *
8236 * Return: -Error code if validation failed.
8237 */
8238 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8239 struct drm_atomic_state *state)
8240 {
8241 struct amdgpu_device *adev = dev->dev_private;
8242 struct dm_atomic_state *dm_state = NULL;
8243 struct dc *dc = adev->dm.dc;
8244 struct drm_connector *connector;
8245 struct drm_connector_state *old_con_state, *new_con_state;
8246 struct drm_crtc *crtc;
8247 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8248 struct drm_plane *plane;
8249 struct drm_plane_state *old_plane_state, *new_plane_state;
8250 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8251 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8252
8253 int ret, i;
8254
8255 /*
8256 * This bool will be set for true for any modeset/reset
8257 * or plane update which implies non fast surface update.
8258 */
8259 bool lock_and_validation_needed = false;
8260
8261 ret = drm_atomic_helper_check_modeset(dev, state);
8262 if (ret)
8263 goto fail;
8264
8265 if (adev->asic_type >= CHIP_NAVI10) {
8266 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8267 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8268 ret = add_affected_mst_dsc_crtcs(state, crtc);
8269 if (ret)
8270 goto fail;
8271 }
8272 }
8273 }
8274
8275 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8276 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8277 !new_crtc_state->color_mgmt_changed &&
8278 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8279 continue;
8280
8281 if (!new_crtc_state->enable)
8282 continue;
8283
8284 ret = drm_atomic_add_affected_connectors(state, crtc);
8285 if (ret)
8286 return ret;
8287
8288 ret = drm_atomic_add_affected_planes(state, crtc);
8289 if (ret)
8290 goto fail;
8291 }
8292
8293 /*
8294 * Add all primary and overlay planes on the CRTC to the state
8295 * whenever a plane is enabled to maintain correct z-ordering
8296 * and to enable fast surface updates.
8297 */
8298 drm_for_each_crtc(crtc, dev) {
8299 bool modified = false;
8300
8301 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8302 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8303 continue;
8304
8305 if (new_plane_state->crtc == crtc ||
8306 old_plane_state->crtc == crtc) {
8307 modified = true;
8308 break;
8309 }
8310 }
8311
8312 if (!modified)
8313 continue;
8314
8315 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8316 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8317 continue;
8318
8319 new_plane_state =
8320 drm_atomic_get_plane_state(state, plane);
8321
8322 if (IS_ERR(new_plane_state)) {
8323 ret = PTR_ERR(new_plane_state);
8324 goto fail;
8325 }
8326 }
8327 }
8328
8329 /* Remove exiting planes if they are modified */
8330 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8331 ret = dm_update_plane_state(dc, state, plane,
8332 old_plane_state,
8333 new_plane_state,
8334 false,
8335 &lock_and_validation_needed);
8336 if (ret)
8337 goto fail;
8338 }
8339
8340 /* Disable all crtcs which require disable */
8341 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8342 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8343 old_crtc_state,
8344 new_crtc_state,
8345 false,
8346 &lock_and_validation_needed);
8347 if (ret)
8348 goto fail;
8349 }
8350
8351 /* Enable all crtcs which require enable */
8352 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8353 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8354 old_crtc_state,
8355 new_crtc_state,
8356 true,
8357 &lock_and_validation_needed);
8358 if (ret)
8359 goto fail;
8360 }
8361
8362 /* Add new/modified planes */
8363 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8364 ret = dm_update_plane_state(dc, state, plane,
8365 old_plane_state,
8366 new_plane_state,
8367 true,
8368 &lock_and_validation_needed);
8369 if (ret)
8370 goto fail;
8371 }
8372
8373 /* Run this here since we want to validate the streams we created */
8374 ret = drm_atomic_helper_check_planes(dev, state);
8375 if (ret)
8376 goto fail;
8377
8378 if (state->legacy_cursor_update) {
8379 /*
8380 * This is a fast cursor update coming from the plane update
8381 * helper, check if it can be done asynchronously for better
8382 * performance.
8383 */
8384 state->async_update =
8385 !drm_atomic_helper_async_check(dev, state);
8386
8387 /*
8388 * Skip the remaining global validation if this is an async
8389 * update. Cursor updates can be done without affecting
8390 * state or bandwidth calcs and this avoids the performance
8391 * penalty of locking the private state object and
8392 * allocating a new dc_state.
8393 */
8394 if (state->async_update)
8395 return 0;
8396 }
8397
8398 /* Check scaling and underscan changes*/
8399 /* TODO Removed scaling changes validation due to inability to commit
8400 * new stream into context w\o causing full reset. Need to
8401 * decide how to handle.
8402 */
8403 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8404 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8405 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8406 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8407
8408 /* Skip any modesets/resets */
8409 if (!acrtc || drm_atomic_crtc_needs_modeset(
8410 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8411 continue;
8412
8413 /* Skip any thing not scale or underscan changes */
8414 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8415 continue;
8416
8417 overall_update_type = UPDATE_TYPE_FULL;
8418 lock_and_validation_needed = true;
8419 }
8420
8421 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8422 if (ret)
8423 goto fail;
8424
8425 if (overall_update_type < update_type)
8426 overall_update_type = update_type;
8427
8428 /*
8429 * lock_and_validation_needed was an old way to determine if we need to set
8430 * the global lock. Leaving it in to check if we broke any corner cases
8431 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8432 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8433 */
8434 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8435 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8436
8437 if (overall_update_type > UPDATE_TYPE_FAST) {
8438 ret = dm_atomic_get_state(state, &dm_state);
8439 if (ret)
8440 goto fail;
8441
8442 ret = do_aquire_global_lock(dev, state);
8443 if (ret)
8444 goto fail;
8445
8446 #if defined(CONFIG_DRM_AMD_DC_DCN)
8447 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8448 goto fail;
8449
8450 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8451 if (ret)
8452 goto fail;
8453 #endif
8454
8455 /*
8456 * Perform validation of MST topology in the state:
8457 * We need to perform MST atomic check before calling
8458 * dc_validate_global_state(), or there is a chance
8459 * to get stuck in an infinite loop and hang eventually.
8460 */
8461 ret = drm_dp_mst_atomic_check(state);
8462 if (ret)
8463 goto fail;
8464
8465 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8466 ret = -EINVAL;
8467 goto fail;
8468 }
8469 } else {
8470 /*
8471 * The commit is a fast update. Fast updates shouldn't change
8472 * the DC context, affect global validation, and can have their
8473 * commit work done in parallel with other commits not touching
8474 * the same resource. If we have a new DC context as part of
8475 * the DM atomic state from validation we need to free it and
8476 * retain the existing one instead.
8477 */
8478 struct dm_atomic_state *new_dm_state, *old_dm_state;
8479
8480 new_dm_state = dm_atomic_get_new_state(state);
8481 old_dm_state = dm_atomic_get_old_state(state);
8482
8483 if (new_dm_state && old_dm_state) {
8484 if (new_dm_state->context)
8485 dc_release_state(new_dm_state->context);
8486
8487 new_dm_state->context = old_dm_state->context;
8488
8489 if (old_dm_state->context)
8490 dc_retain_state(old_dm_state->context);
8491 }
8492 }
8493
8494 /* Store the overall update type for use later in atomic check. */
8495 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8496 struct dm_crtc_state *dm_new_crtc_state =
8497 to_dm_crtc_state(new_crtc_state);
8498
8499 dm_new_crtc_state->update_type = (int)overall_update_type;
8500 }
8501
8502 /* Must be success */
8503 WARN_ON(ret);
8504 return ret;
8505
8506 fail:
8507 if (ret == -EDEADLK)
8508 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8509 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8510 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8511 else
8512 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8513
8514 return ret;
8515 }
8516
8517 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8518 struct amdgpu_dm_connector *amdgpu_dm_connector)
8519 {
8520 uint8_t dpcd_data;
8521 bool capable = false;
8522
8523 if (amdgpu_dm_connector->dc_link &&
8524 dm_helpers_dp_read_dpcd(
8525 NULL,
8526 amdgpu_dm_connector->dc_link,
8527 DP_DOWN_STREAM_PORT_COUNT,
8528 &dpcd_data,
8529 sizeof(dpcd_data))) {
8530 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8531 }
8532
8533 return capable;
8534 }
8535 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8536 struct edid *edid)
8537 {
8538 int i;
8539 bool edid_check_required;
8540 struct detailed_timing *timing;
8541 struct detailed_non_pixel *data;
8542 struct detailed_data_monitor_range *range;
8543 struct amdgpu_dm_connector *amdgpu_dm_connector =
8544 to_amdgpu_dm_connector(connector);
8545 struct dm_connector_state *dm_con_state = NULL;
8546
8547 struct drm_device *dev = connector->dev;
8548 struct amdgpu_device *adev = dev->dev_private;
8549 bool freesync_capable = false;
8550
8551 if (!connector->state) {
8552 DRM_ERROR("%s - Connector has no state", __func__);
8553 goto update;
8554 }
8555
8556 if (!edid) {
8557 dm_con_state = to_dm_connector_state(connector->state);
8558
8559 amdgpu_dm_connector->min_vfreq = 0;
8560 amdgpu_dm_connector->max_vfreq = 0;
8561 amdgpu_dm_connector->pixel_clock_mhz = 0;
8562
8563 goto update;
8564 }
8565
8566 dm_con_state = to_dm_connector_state(connector->state);
8567
8568 edid_check_required = false;
8569 if (!amdgpu_dm_connector->dc_sink) {
8570 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8571 goto update;
8572 }
8573 if (!adev->dm.freesync_module)
8574 goto update;
8575 /*
8576 * if edid non zero restrict freesync only for dp and edp
8577 */
8578 if (edid) {
8579 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8580 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8581 edid_check_required = is_dp_capable_without_timing_msa(
8582 adev->dm.dc,
8583 amdgpu_dm_connector);
8584 }
8585 }
8586 if (edid_check_required == true && (edid->version > 1 ||
8587 (edid->version == 1 && edid->revision > 1))) {
8588 for (i = 0; i < 4; i++) {
8589
8590 timing = &edid->detailed_timings[i];
8591 data = &timing->data.other_data;
8592 range = &data->data.range;
8593 /*
8594 * Check if monitor has continuous frequency mode
8595 */
8596 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8597 continue;
8598 /*
8599 * Check for flag range limits only. If flag == 1 then
8600 * no additional timing information provided.
8601 * Default GTF, GTF Secondary curve and CVT are not
8602 * supported
8603 */
8604 if (range->flags != 1)
8605 continue;
8606
8607 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8608 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8609 amdgpu_dm_connector->pixel_clock_mhz =
8610 range->pixel_clock_mhz * 10;
8611 break;
8612 }
8613
8614 if (amdgpu_dm_connector->max_vfreq -
8615 amdgpu_dm_connector->min_vfreq > 10) {
8616
8617 freesync_capable = true;
8618 }
8619 }
8620
8621 update:
8622 if (dm_con_state)
8623 dm_con_state->freesync_capable = freesync_capable;
8624
8625 if (connector->vrr_capable_property)
8626 drm_connector_set_vrr_capable_property(connector,
8627 freesync_capable);
8628 }
8629
8630 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8631 {
8632 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8633
8634 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8635 return;
8636 if (link->type == dc_connection_none)
8637 return;
8638 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8639 dpcd_data, sizeof(dpcd_data))) {
8640 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8641
8642 if (dpcd_data[0] == 0) {
8643 link->psr_settings.psr_version = PSR_VERSION_UNSUPPORTED;
8644 link->psr_settings.psr_feature_enabled = false;
8645 } else {
8646 link->psr_settings.psr_version = PSR_VERSION_1;
8647 link->psr_settings.psr_feature_enabled = true;
8648 }
8649
8650 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8651 }
8652 }
8653
8654 /*
8655 * amdgpu_dm_link_setup_psr() - configure psr link
8656 * @stream: stream state
8657 *
8658 * Return: true if success
8659 */
8660 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8661 {
8662 struct dc_link *link = NULL;
8663 struct psr_config psr_config = {0};
8664 struct psr_context psr_context = {0};
8665 struct dc *dc = NULL;
8666 bool ret = false;
8667
8668 if (stream == NULL)
8669 return false;
8670
8671 link = stream->link;
8672 dc = link->ctx->dc;
8673
8674 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8675
8676 if (psr_config.psr_version > 0) {
8677 psr_config.psr_exit_link_training_required = 0x1;
8678 psr_config.psr_frame_capture_indication_req = 0;
8679 psr_config.psr_rfb_setup_time = 0x37;
8680 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8681 psr_config.allow_smu_optimizations = 0x0;
8682
8683 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8684
8685 }
8686 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8687
8688 return ret;
8689 }
8690
8691 /*
8692 * amdgpu_dm_psr_enable() - enable psr f/w
8693 * @stream: stream state
8694 *
8695 * Return: true if success
8696 */
8697 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8698 {
8699 struct dc_link *link = stream->link;
8700 unsigned int vsync_rate_hz = 0;
8701 struct dc_static_screen_params params = {0};
8702 /* Calculate number of static frames before generating interrupt to
8703 * enter PSR.
8704 */
8705 // Init fail safe of 2 frames static
8706 unsigned int num_frames_static = 2;
8707
8708 DRM_DEBUG_DRIVER("Enabling psr...\n");
8709
8710 vsync_rate_hz = div64_u64(div64_u64((
8711 stream->timing.pix_clk_100hz * 100),
8712 stream->timing.v_total),
8713 stream->timing.h_total);
8714
8715 /* Round up
8716 * Calculate number of frames such that at least 30 ms of time has
8717 * passed.
8718 */
8719 if (vsync_rate_hz != 0) {
8720 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8721 num_frames_static = (30000 / frame_time_microsec) + 1;
8722 }
8723
8724 params.triggers.cursor_update = true;
8725 params.triggers.overlay_update = true;
8726 params.triggers.surface_update = true;
8727 params.num_frames = num_frames_static;
8728
8729 dc_stream_set_static_screen_params(link->ctx->dc,
8730 &stream, 1,
8731 &params);
8732
8733 return dc_link_set_psr_allow_active(link, true, false);
8734 }
8735
8736 /*
8737 * amdgpu_dm_psr_disable() - disable psr f/w
8738 * @stream: stream state
8739 *
8740 * Return: true if success
8741 */
8742 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8743 {
8744
8745 DRM_DEBUG_DRIVER("Disabling psr...\n");
8746
8747 return dc_link_set_psr_allow_active(stream->link, false, true);
8748 }