]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
26217e5e800de606af976a9a8edfc19cce5863be
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103
104 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106
107 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115
116 /**
117 * DOC: overview
118 *
119 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121 * requests into DC requests, and DC responses into DRM responses.
122 *
123 * The root control structure is &struct amdgpu_display_manager.
124 */
125
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129
130 /*
131 * initializes drm_device display related structures, based on the information
132 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133 * drm_encoder, drm_mode_config
134 *
135 * Returns 0 on success
136 */
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142 struct drm_plane *plane,
143 unsigned long possible_crtcs,
144 const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 struct drm_plane *plane,
147 uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 struct amdgpu_dm_connector *amdgpu_dm_connector,
150 uint32_t link_index,
151 struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 struct amdgpu_encoder *aencoder,
154 uint32_t link_index);
155
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 struct drm_atomic_state *state,
160 bool nonblock);
161
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 struct drm_atomic_state *state);
166
167 static void handle_cursor_update(struct drm_plane *plane,
168 struct drm_plane_state *old_plane_state);
169
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174
175
176 /*
177 * dm_vblank_get_counter
178 *
179 * @brief
180 * Get counter for number of vertical blanks
181 *
182 * @param
183 * struct amdgpu_device *adev - [in] desired amdgpu device
184 * int disp_idx - [in] which CRTC to get the counter from
185 *
186 * @return
187 * Counter for vertical blanks
188 */
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190 {
191 if (crtc >= adev->mode_info.num_crtc)
192 return 0;
193 else {
194 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196 acrtc->base.state);
197
198
199 if (acrtc_state->stream == NULL) {
200 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201 crtc);
202 return 0;
203 }
204
205 return dc_stream_get_vblank_counter(acrtc_state->stream);
206 }
207 }
208
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210 u32 *vbl, u32 *position)
211 {
212 uint32_t v_blank_start, v_blank_end, h_position, v_position;
213
214 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215 return -EINVAL;
216 else {
217 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219 acrtc->base.state);
220
221 if (acrtc_state->stream == NULL) {
222 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223 crtc);
224 return 0;
225 }
226
227 /*
228 * TODO rework base driver to use values directly.
229 * for now parse it back into reg-format
230 */
231 dc_stream_get_scanoutpos(acrtc_state->stream,
232 &v_blank_start,
233 &v_blank_end,
234 &h_position,
235 &v_position);
236
237 *position = v_position | (h_position << 16);
238 *vbl = v_blank_start | (v_blank_end << 16);
239 }
240
241 return 0;
242 }
243
244 static bool dm_is_idle(void *handle)
245 {
246 /* XXX todo */
247 return true;
248 }
249
250 static int dm_wait_for_idle(void *handle)
251 {
252 /* XXX todo */
253 return 0;
254 }
255
256 static bool dm_check_soft_reset(void *handle)
257 {
258 return false;
259 }
260
261 static int dm_soft_reset(void *handle)
262 {
263 /* XXX todo */
264 return 0;
265 }
266
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
269 int otg_inst)
270 {
271 struct drm_device *dev = adev->ddev;
272 struct drm_crtc *crtc;
273 struct amdgpu_crtc *amdgpu_crtc;
274
275 if (otg_inst == -1) {
276 WARN_ON(1);
277 return adev->mode_info.crtcs[0];
278 }
279
280 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 amdgpu_crtc = to_amdgpu_crtc(crtc);
282
283 if (amdgpu_crtc->otg_inst == otg_inst)
284 return amdgpu_crtc;
285 }
286
287 return NULL;
288 }
289
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291 {
292 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294 }
295
296 /**
297 * dm_pflip_high_irq() - Handle pageflip interrupt
298 * @interrupt_params: ignored
299 *
300 * Handles the pageflip interrupt by notifying all interested parties
301 * that the pageflip has been completed.
302 */
303 static void dm_pflip_high_irq(void *interrupt_params)
304 {
305 struct amdgpu_crtc *amdgpu_crtc;
306 struct common_irq_params *irq_params = interrupt_params;
307 struct amdgpu_device *adev = irq_params->adev;
308 unsigned long flags;
309 struct drm_pending_vblank_event *e;
310 struct dm_crtc_state *acrtc_state;
311 uint32_t vpos, hpos, v_blank_start, v_blank_end;
312 bool vrr_active;
313
314 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315
316 /* IRQ could occur when in initial stage */
317 /* TODO work and BO cleanup */
318 if (amdgpu_crtc == NULL) {
319 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320 return;
321 }
322
323 spin_lock_irqsave(&adev->ddev->event_lock, flags);
324
325 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 amdgpu_crtc->pflip_status,
328 AMDGPU_FLIP_SUBMITTED,
329 amdgpu_crtc->crtc_id,
330 amdgpu_crtc);
331 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
332 return;
333 }
334
335 /* page flip completed. */
336 e = amdgpu_crtc->event;
337 amdgpu_crtc->event = NULL;
338
339 if (!e)
340 WARN_ON(1);
341
342 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344
345 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
346 if (!vrr_active ||
347 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 &v_blank_end, &hpos, &vpos) ||
349 (vpos < v_blank_start)) {
350 /* Update to correct count and vblank timestamp if racing with
351 * vblank irq. This also updates to the correct vblank timestamp
352 * even in VRR mode, as scanout is past the front-porch atm.
353 */
354 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
355
356 /* Wake up userspace by sending the pageflip event with proper
357 * count and timestamp of vblank of flip completion.
358 */
359 if (e) {
360 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361
362 /* Event sent, so done with vblank for this flip */
363 drm_crtc_vblank_put(&amdgpu_crtc->base);
364 }
365 } else if (e) {
366 /* VRR active and inside front-porch: vblank count and
367 * timestamp for pageflip event will only be up to date after
368 * drm_crtc_handle_vblank() has been executed from late vblank
369 * irq handler after start of back-porch (vline 0). We queue the
370 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 * updated timestamp and count, once it runs after us.
372 *
373 * We need to open-code this instead of using the helper
374 * drm_crtc_arm_vblank_event(), as that helper would
375 * call drm_crtc_accurate_vblank_count(), which we must
376 * not call in VRR mode while we are in front-porch!
377 */
378
379 /* sequence will be replaced by real count during send-out. */
380 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 e->pipe = amdgpu_crtc->crtc_id;
382
383 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
384 e = NULL;
385 }
386
387 /* Keep track of vblank of this flip for flip throttling. We use the
388 * cooked hw counter, as that one incremented at start of this vblank
389 * of pageflip completion, so last_flip_vblank is the forbidden count
390 * for queueing new pageflips if vsync + VRR is enabled.
391 */
392 amdgpu_crtc->last_flip_vblank =
393 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
394
395 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
397
398 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 vrr_active, (int) !e);
401 }
402
403 static void dm_vupdate_high_irq(void *interrupt_params)
404 {
405 struct common_irq_params *irq_params = interrupt_params;
406 struct amdgpu_device *adev = irq_params->adev;
407 struct amdgpu_crtc *acrtc;
408 struct dm_crtc_state *acrtc_state;
409 unsigned long flags;
410
411 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412
413 if (acrtc) {
414 acrtc_state = to_dm_crtc_state(acrtc->base.state);
415
416 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417 acrtc->crtc_id,
418 amdgpu_dm_vrr_active(acrtc_state));
419
420 /* Core vblank handling is done here after end of front-porch in
421 * vrr mode, as vblank timestamping will give valid results
422 * while now done after front-porch. This will also deliver
423 * page-flip completion events that have been queued to us
424 * if a pageflip happened inside front-porch.
425 */
426 if (amdgpu_dm_vrr_active(acrtc_state)) {
427 drm_crtc_handle_vblank(&acrtc->base);
428
429 /* BTR processing for pre-DCE12 ASICs */
430 if (acrtc_state->stream &&
431 adev->family < AMDGPU_FAMILY_AI) {
432 spin_lock_irqsave(&adev->ddev->event_lock, flags);
433 mod_freesync_handle_v_update(
434 adev->dm.freesync_module,
435 acrtc_state->stream,
436 &acrtc_state->vrr_params);
437
438 dc_stream_adjust_vmin_vmax(
439 adev->dm.dc,
440 acrtc_state->stream,
441 &acrtc_state->vrr_params.adjust);
442 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 }
444 }
445 }
446 }
447
448 /**
449 * dm_crtc_high_irq() - Handles CRTC interrupt
450 * @interrupt_params: used for determining the CRTC instance
451 *
452 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453 * event handler.
454 */
455 static void dm_crtc_high_irq(void *interrupt_params)
456 {
457 struct common_irq_params *irq_params = interrupt_params;
458 struct amdgpu_device *adev = irq_params->adev;
459 struct amdgpu_crtc *acrtc;
460 struct dm_crtc_state *acrtc_state;
461 unsigned long flags;
462
463 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
464 if (!acrtc)
465 return;
466
467 acrtc_state = to_dm_crtc_state(acrtc->base.state);
468
469 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 amdgpu_dm_vrr_active(acrtc_state),
471 acrtc_state->active_planes);
472
473 /**
474 * Core vblank handling at start of front-porch is only possible
475 * in non-vrr mode, as only there vblank timestamping will give
476 * valid results while done in front-porch. Otherwise defer it
477 * to dm_vupdate_high_irq after end of front-porch.
478 */
479 if (!amdgpu_dm_vrr_active(acrtc_state))
480 drm_crtc_handle_vblank(&acrtc->base);
481
482 /**
483 * Following stuff must happen at start of vblank, for crc
484 * computation and below-the-range btr support in vrr mode.
485 */
486 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
487
488 /* BTR updates need to happen before VUPDATE on Vega and above. */
489 if (adev->family < AMDGPU_FAMILY_AI)
490 return;
491
492 spin_lock_irqsave(&adev->ddev->event_lock, flags);
493
494 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496 mod_freesync_handle_v_update(adev->dm.freesync_module,
497 acrtc_state->stream,
498 &acrtc_state->vrr_params);
499
500 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 &acrtc_state->vrr_params.adjust);
502 }
503
504 /*
505 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 * In that case, pageflip completion interrupts won't fire and pageflip
507 * completion events won't get delivered. Prevent this by sending
508 * pending pageflip events from here if a flip is still pending.
509 *
510 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 * avoid race conditions between flip programming and completion,
512 * which could cause too early flip completion events.
513 */
514 if (adev->family >= AMDGPU_FAMILY_RV &&
515 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516 acrtc_state->active_planes == 0) {
517 if (acrtc->event) {
518 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519 acrtc->event = NULL;
520 drm_crtc_vblank_put(&acrtc->base);
521 }
522 acrtc->pflip_status = AMDGPU_FLIP_NONE;
523 }
524
525 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
526 }
527
528 static int dm_set_clockgating_state(void *handle,
529 enum amd_clockgating_state state)
530 {
531 return 0;
532 }
533
534 static int dm_set_powergating_state(void *handle,
535 enum amd_powergating_state state)
536 {
537 return 0;
538 }
539
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
542
543 /* Allocate memory for FBC compressed data */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
545 {
546 struct drm_device *dev = connector->dev;
547 struct amdgpu_device *adev = dev->dev_private;
548 struct dm_comressor_info *compressor = &adev->dm.compressor;
549 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 struct drm_display_mode *mode;
551 unsigned long max_size = 0;
552
553 if (adev->dm.dc->fbc_compressor == NULL)
554 return;
555
556 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
557 return;
558
559 if (compressor->bo_ptr)
560 return;
561
562
563 list_for_each_entry(mode, &connector->modes, head) {
564 if (max_size < mode->htotal * mode->vtotal)
565 max_size = mode->htotal * mode->vtotal;
566 }
567
568 if (max_size) {
569 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571 &compressor->gpu_addr, &compressor->cpu_addr);
572
573 if (r)
574 DRM_ERROR("DM: Failed to initialize FBC\n");
575 else {
576 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 }
579
580 }
581
582 }
583
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 int pipe, bool *enabled,
586 unsigned char *buf, int max_bytes)
587 {
588 struct drm_device *dev = dev_get_drvdata(kdev);
589 struct amdgpu_device *adev = dev->dev_private;
590 struct drm_connector *connector;
591 struct drm_connector_list_iter conn_iter;
592 struct amdgpu_dm_connector *aconnector;
593 int ret = 0;
594
595 *enabled = false;
596
597 mutex_lock(&adev->dm.audio_lock);
598
599 drm_connector_list_iter_begin(dev, &conn_iter);
600 drm_for_each_connector_iter(connector, &conn_iter) {
601 aconnector = to_amdgpu_dm_connector(connector);
602 if (aconnector->audio_inst != port)
603 continue;
604
605 *enabled = true;
606 ret = drm_eld_size(connector->eld);
607 memcpy(buf, connector->eld, min(max_bytes, ret));
608
609 break;
610 }
611 drm_connector_list_iter_end(&conn_iter);
612
613 mutex_unlock(&adev->dm.audio_lock);
614
615 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616
617 return ret;
618 }
619
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 .get_eld = amdgpu_dm_audio_component_get_eld,
622 };
623
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 struct device *hda_kdev, void *data)
626 {
627 struct drm_device *dev = dev_get_drvdata(kdev);
628 struct amdgpu_device *adev = dev->dev_private;
629 struct drm_audio_component *acomp = data;
630
631 acomp->ops = &amdgpu_dm_audio_component_ops;
632 acomp->dev = kdev;
633 adev->dm.audio_component = acomp;
634
635 return 0;
636 }
637
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 struct device *hda_kdev, void *data)
640 {
641 struct drm_device *dev = dev_get_drvdata(kdev);
642 struct amdgpu_device *adev = dev->dev_private;
643 struct drm_audio_component *acomp = data;
644
645 acomp->ops = NULL;
646 acomp->dev = NULL;
647 adev->dm.audio_component = NULL;
648 }
649
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 .bind = amdgpu_dm_audio_component_bind,
652 .unbind = amdgpu_dm_audio_component_unbind,
653 };
654
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 {
657 int i, ret;
658
659 if (!amdgpu_audio)
660 return 0;
661
662 adev->mode_info.audio.enabled = true;
663
664 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665
666 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 adev->mode_info.audio.pin[i].channels = -1;
668 adev->mode_info.audio.pin[i].rate = -1;
669 adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 adev->mode_info.audio.pin[i].status_bits = 0;
671 adev->mode_info.audio.pin[i].category_code = 0;
672 adev->mode_info.audio.pin[i].connected = false;
673 adev->mode_info.audio.pin[i].id =
674 adev->dm.dc->res_pool->audios[i]->inst;
675 adev->mode_info.audio.pin[i].offset = 0;
676 }
677
678 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679 if (ret < 0)
680 return ret;
681
682 adev->dm.audio_registered = true;
683
684 return 0;
685 }
686
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688 {
689 if (!amdgpu_audio)
690 return;
691
692 if (!adev->mode_info.audio.enabled)
693 return;
694
695 if (adev->dm.audio_registered) {
696 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 adev->dm.audio_registered = false;
698 }
699
700 /* TODO: Disable audio? */
701
702 adev->mode_info.audio.enabled = false;
703 }
704
705 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
706 {
707 struct drm_audio_component *acomp = adev->dm.audio_component;
708
709 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711
712 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713 pin, -1);
714 }
715 }
716
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
718 {
719 const struct dmcub_firmware_header_v1_0 *hdr;
720 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722 const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 struct abm *abm = adev->dm.dc->res_pool->abm;
725 struct dmub_srv_hw_params hw_params;
726 enum dmub_status status;
727 const unsigned char *fw_inst_const, *fw_bss_data;
728 uint32_t i, fw_inst_const_size, fw_bss_data_size;
729 bool has_hw_support;
730
731 if (!dmub_srv)
732 /* DMUB isn't supported on the ASIC. */
733 return 0;
734
735 if (!fb_info) {
736 DRM_ERROR("No framebuffer info for DMUB service.\n");
737 return -EINVAL;
738 }
739
740 if (!dmub_fw) {
741 /* Firmware required for DMUB support. */
742 DRM_ERROR("No firmware provided for DMUB.\n");
743 return -EINVAL;
744 }
745
746 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 if (status != DMUB_STATUS_OK) {
748 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749 return -EINVAL;
750 }
751
752 if (!has_hw_support) {
753 DRM_INFO("DMUB unsupported on ASIC\n");
754 return 0;
755 }
756
757 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758
759 fw_inst_const = dmub_fw->data +
760 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 PSP_HEADER_BYTES;
762
763 fw_bss_data = dmub_fw->data +
764 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 le32_to_cpu(hdr->inst_const_bytes);
766
767 /* Copy firmware and bios info into FB memory. */
768 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770
771 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772
773 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 * amdgpu_ucode_init_single_fw will load dmub firmware
775 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 * will be done by dm_dmub_hw_init
777 */
778 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780 fw_inst_const_size);
781 }
782
783 if (fw_bss_data_size)
784 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 fw_bss_data, fw_bss_data_size);
786
787 /* Copy firmware bios info into FB memory. */
788 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789 adev->bios_size);
790
791 /* Reset regions that need to be reset. */
792 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794
795 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797
798 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
800
801 /* Initialize hardware. */
802 memset(&hw_params, 0, sizeof(hw_params));
803 hw_params.fb_base = adev->gmc.fb_start;
804 hw_params.fb_offset = adev->gmc.aper_base;
805
806 /* backdoor load firmware and trigger dmub running */
807 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 hw_params.load_inst_const = true;
809
810 if (dmcu)
811 hw_params.psp_version = dmcu->psp_version;
812
813 for (i = 0; i < fb_info->num_fb; ++i)
814 hw_params.fb[i] = &fb_info->fb[i];
815
816 status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 if (status != DMUB_STATUS_OK) {
818 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819 return -EINVAL;
820 }
821
822 /* Wait for firmware load to finish. */
823 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 if (status != DMUB_STATUS_OK)
825 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826
827 /* Init DMCU and ABM if available. */
828 if (dmcu && abm) {
829 dmcu->funcs->dmcu_init(dmcu);
830 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831 }
832
833 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 if (!adev->dm.dc->ctx->dmub_srv) {
835 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836 return -ENOMEM;
837 }
838
839 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 adev->dm.dmcub_fw_version);
841
842 return 0;
843 }
844
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
846 {
847 struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 struct dc_callback_init init_params;
850 #endif
851 int r;
852
853 adev->dm.ddev = adev->ddev;
854 adev->dm.adev = adev;
855
856 /* Zero all the fields */
857 memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 memset(&init_params, 0, sizeof(init_params));
860 #endif
861
862 mutex_init(&adev->dm.dc_lock);
863 mutex_init(&adev->dm.audio_lock);
864
865 if(amdgpu_dm_irq_init(adev)) {
866 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867 goto error;
868 }
869
870 init_data.asic_id.chip_family = adev->family;
871
872 init_data.asic_id.pci_revision_id = adev->pdev->revision;
873 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874
875 init_data.asic_id.vram_width = adev->gmc.vram_width;
876 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 init_data.asic_id.atombios_base_address =
878 adev->mode_info.atom_context->bios;
879
880 init_data.driver = adev;
881
882 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883
884 if (!adev->dm.cgs_device) {
885 DRM_ERROR("amdgpu: failed to create cgs device.\n");
886 goto error;
887 }
888
889 init_data.cgs_device = adev->dm.cgs_device;
890
891 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892
893 switch (adev->asic_type) {
894 case CHIP_CARRIZO:
895 case CHIP_STONEY:
896 case CHIP_RAVEN:
897 case CHIP_RENOIR:
898 init_data.flags.gpu_vm_support = true;
899 break;
900 default:
901 break;
902 }
903
904 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 init_data.flags.fbc_support = true;
906
907 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 init_data.flags.multi_mon_pp_mclk_switch = true;
909
910 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 init_data.flags.disable_fractional_pwm = true;
912
913 init_data.flags.power_down_display_on_boot = true;
914
915 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
916
917 /* Display Core create. */
918 adev->dm.dc = dc_create(&init_data);
919
920 if (adev->dm.dc) {
921 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
922 } else {
923 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
924 goto error;
925 }
926
927 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930 }
931
932 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934
935 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 adev->dm.dc->debug.disable_stutter = true;
937
938 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 adev->dm.dc->debug.disable_dsc = true;
940
941 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 adev->dm.dc->debug.disable_clock_gate = true;
943
944 r = dm_dmub_hw_init(adev);
945 if (r) {
946 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947 goto error;
948 }
949
950 dc_hardware_init(adev->dm.dc);
951
952 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 if (!adev->dm.freesync_module) {
954 DRM_ERROR(
955 "amdgpu: failed to initialize freesync_module.\n");
956 } else
957 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 adev->dm.freesync_module);
959
960 amdgpu_dm_init_color_mod();
961
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 if (adev->asic_type >= CHIP_RAVEN) {
964 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
965
966 if (!adev->dm.hdcp_workqueue)
967 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 else
969 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
970
971 dc_init_callbacks(adev->dm.dc, &init_params);
972 }
973 #endif
974 if (amdgpu_dm_initialize_drm_device(adev)) {
975 DRM_ERROR(
976 "amdgpu: failed to initialize sw for display support.\n");
977 goto error;
978 }
979
980 /* Update the actual used number of crtc */
981 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982
983 /* create fake encoders for MST */
984 dm_dp_create_fake_mst_encoders(adev);
985
986 /* TODO: Add_display_info? */
987
988 /* TODO use dynamic cursor width */
989 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
991
992 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
993 DRM_ERROR(
994 "amdgpu: failed to initialize sw for display support.\n");
995 goto error;
996 }
997
998 DRM_DEBUG_DRIVER("KMS initialized.\n");
999
1000 return 0;
1001 error:
1002 amdgpu_dm_fini(adev);
1003
1004 return -EINVAL;
1005 }
1006
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1008 {
1009 int i;
1010
1011 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013 }
1014
1015 amdgpu_dm_audio_fini(adev);
1016
1017 amdgpu_dm_destroy_drm_device(&adev->dm);
1018
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 if (adev->dm.hdcp_workqueue) {
1021 hdcp_destroy(adev->dm.hdcp_workqueue);
1022 adev->dm.hdcp_workqueue = NULL;
1023 }
1024
1025 if (adev->dm.dc)
1026 dc_deinit_callbacks(adev->dm.dc);
1027 #endif
1028 if (adev->dm.dc->ctx->dmub_srv) {
1029 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 adev->dm.dc->ctx->dmub_srv = NULL;
1031 }
1032
1033 if (adev->dm.dmub_bo)
1034 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 &adev->dm.dmub_bo_gpu_addr,
1036 &adev->dm.dmub_bo_cpu_addr);
1037
1038 /* DC Destroy TODO: Replace destroy DAL */
1039 if (adev->dm.dc)
1040 dc_destroy(&adev->dm.dc);
1041 /*
1042 * TODO: pageflip, vlank interrupt
1043 *
1044 * amdgpu_dm_irq_fini(adev);
1045 */
1046
1047 if (adev->dm.cgs_device) {
1048 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 adev->dm.cgs_device = NULL;
1050 }
1051 if (adev->dm.freesync_module) {
1052 mod_freesync_destroy(adev->dm.freesync_module);
1053 adev->dm.freesync_module = NULL;
1054 }
1055
1056 mutex_destroy(&adev->dm.audio_lock);
1057 mutex_destroy(&adev->dm.dc_lock);
1058
1059 return;
1060 }
1061
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1063 {
1064 const char *fw_name_dmcu = NULL;
1065 int r;
1066 const struct dmcu_firmware_header_v1_0 *hdr;
1067
1068 switch(adev->asic_type) {
1069 #if defined(CONFIG_DRM_AMD_DC_SI)
1070 case CHIP_TAHITI:
1071 case CHIP_PITCAIRN:
1072 case CHIP_VERDE:
1073 case CHIP_OLAND:
1074 #endif
1075 case CHIP_BONAIRE:
1076 case CHIP_HAWAII:
1077 case CHIP_KAVERI:
1078 case CHIP_KABINI:
1079 case CHIP_MULLINS:
1080 case CHIP_TONGA:
1081 case CHIP_FIJI:
1082 case CHIP_CARRIZO:
1083 case CHIP_STONEY:
1084 case CHIP_POLARIS11:
1085 case CHIP_POLARIS10:
1086 case CHIP_POLARIS12:
1087 case CHIP_VEGAM:
1088 case CHIP_VEGA10:
1089 case CHIP_VEGA12:
1090 case CHIP_VEGA20:
1091 case CHIP_NAVI10:
1092 case CHIP_NAVI14:
1093 case CHIP_RENOIR:
1094 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1095 case CHIP_SIENNA_CICHLID:
1096 case CHIP_NAVY_FLOUNDER:
1097 #endif
1098 return 0;
1099 case CHIP_NAVI12:
1100 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1101 break;
1102 case CHIP_RAVEN:
1103 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1104 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1105 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1106 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1107 else
1108 return 0;
1109 break;
1110 default:
1111 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1112 return -EINVAL;
1113 }
1114
1115 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1116 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1117 return 0;
1118 }
1119
1120 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1121 if (r == -ENOENT) {
1122 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1123 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1124 adev->dm.fw_dmcu = NULL;
1125 return 0;
1126 }
1127 if (r) {
1128 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1129 fw_name_dmcu);
1130 return r;
1131 }
1132
1133 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1134 if (r) {
1135 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1136 fw_name_dmcu);
1137 release_firmware(adev->dm.fw_dmcu);
1138 adev->dm.fw_dmcu = NULL;
1139 return r;
1140 }
1141
1142 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1143 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1144 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1145 adev->firmware.fw_size +=
1146 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1147
1148 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1149 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1150 adev->firmware.fw_size +=
1151 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1152
1153 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1154
1155 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1156
1157 return 0;
1158 }
1159
1160 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1161 {
1162 struct amdgpu_device *adev = ctx;
1163
1164 return dm_read_reg(adev->dm.dc->ctx, address);
1165 }
1166
1167 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1168 uint32_t value)
1169 {
1170 struct amdgpu_device *adev = ctx;
1171
1172 return dm_write_reg(adev->dm.dc->ctx, address, value);
1173 }
1174
1175 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1176 {
1177 struct dmub_srv_create_params create_params;
1178 struct dmub_srv_region_params region_params;
1179 struct dmub_srv_region_info region_info;
1180 struct dmub_srv_fb_params fb_params;
1181 struct dmub_srv_fb_info *fb_info;
1182 struct dmub_srv *dmub_srv;
1183 const struct dmcub_firmware_header_v1_0 *hdr;
1184 const char *fw_name_dmub;
1185 enum dmub_asic dmub_asic;
1186 enum dmub_status status;
1187 int r;
1188
1189 switch (adev->asic_type) {
1190 case CHIP_RENOIR:
1191 dmub_asic = DMUB_ASIC_DCN21;
1192 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1193 break;
1194 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1195 case CHIP_SIENNA_CICHLID:
1196 dmub_asic = DMUB_ASIC_DCN30;
1197 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1198 break;
1199 case CHIP_NAVY_FLOUNDER:
1200 dmub_asic = DMUB_ASIC_DCN30;
1201 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1202 break;
1203 #endif
1204
1205 default:
1206 /* ASIC doesn't support DMUB. */
1207 return 0;
1208 }
1209
1210 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1211 if (r) {
1212 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1213 return 0;
1214 }
1215
1216 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1217 if (r) {
1218 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1219 return 0;
1220 }
1221
1222 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1223
1224 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1226 AMDGPU_UCODE_ID_DMCUB;
1227 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1228 adev->dm.dmub_fw;
1229 adev->firmware.fw_size +=
1230 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1231
1232 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1233 adev->dm.dmcub_fw_version);
1234 }
1235
1236 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1237
1238 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1239 dmub_srv = adev->dm.dmub_srv;
1240
1241 if (!dmub_srv) {
1242 DRM_ERROR("Failed to allocate DMUB service!\n");
1243 return -ENOMEM;
1244 }
1245
1246 memset(&create_params, 0, sizeof(create_params));
1247 create_params.user_ctx = adev;
1248 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1249 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1250 create_params.asic = dmub_asic;
1251
1252 /* Create the DMUB service. */
1253 status = dmub_srv_create(dmub_srv, &create_params);
1254 if (status != DMUB_STATUS_OK) {
1255 DRM_ERROR("Error creating DMUB service: %d\n", status);
1256 return -EINVAL;
1257 }
1258
1259 /* Calculate the size of all the regions for the DMUB service. */
1260 memset(&region_params, 0, sizeof(region_params));
1261
1262 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1263 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1264 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1265 region_params.vbios_size = adev->bios_size;
1266 region_params.fw_bss_data = region_params.bss_data_size ?
1267 adev->dm.dmub_fw->data +
1268 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1269 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1270 region_params.fw_inst_const =
1271 adev->dm.dmub_fw->data +
1272 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1273 PSP_HEADER_BYTES;
1274
1275 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1276 &region_info);
1277
1278 if (status != DMUB_STATUS_OK) {
1279 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1280 return -EINVAL;
1281 }
1282
1283 /*
1284 * Allocate a framebuffer based on the total size of all the regions.
1285 * TODO: Move this into GART.
1286 */
1287 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1288 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1289 &adev->dm.dmub_bo_gpu_addr,
1290 &adev->dm.dmub_bo_cpu_addr);
1291 if (r)
1292 return r;
1293
1294 /* Rebase the regions on the framebuffer address. */
1295 memset(&fb_params, 0, sizeof(fb_params));
1296 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1297 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1298 fb_params.region_info = &region_info;
1299
1300 adev->dm.dmub_fb_info =
1301 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1302 fb_info = adev->dm.dmub_fb_info;
1303
1304 if (!fb_info) {
1305 DRM_ERROR(
1306 "Failed to allocate framebuffer info for DMUB service!\n");
1307 return -ENOMEM;
1308 }
1309
1310 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1311 if (status != DMUB_STATUS_OK) {
1312 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1313 return -EINVAL;
1314 }
1315
1316 return 0;
1317 }
1318
1319 static int dm_sw_init(void *handle)
1320 {
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322 int r;
1323
1324 r = dm_dmub_sw_init(adev);
1325 if (r)
1326 return r;
1327
1328 return load_dmcu_fw(adev);
1329 }
1330
1331 static int dm_sw_fini(void *handle)
1332 {
1333 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1334
1335 kfree(adev->dm.dmub_fb_info);
1336 adev->dm.dmub_fb_info = NULL;
1337
1338 if (adev->dm.dmub_srv) {
1339 dmub_srv_destroy(adev->dm.dmub_srv);
1340 adev->dm.dmub_srv = NULL;
1341 }
1342
1343 release_firmware(adev->dm.dmub_fw);
1344 adev->dm.dmub_fw = NULL;
1345
1346 release_firmware(adev->dm.fw_dmcu);
1347 adev->dm.fw_dmcu = NULL;
1348
1349 return 0;
1350 }
1351
1352 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1353 {
1354 struct amdgpu_dm_connector *aconnector;
1355 struct drm_connector *connector;
1356 struct drm_connector_list_iter iter;
1357 int ret = 0;
1358
1359 drm_connector_list_iter_begin(dev, &iter);
1360 drm_for_each_connector_iter(connector, &iter) {
1361 aconnector = to_amdgpu_dm_connector(connector);
1362 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1363 aconnector->mst_mgr.aux) {
1364 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1365 aconnector,
1366 aconnector->base.base.id);
1367
1368 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1369 if (ret < 0) {
1370 DRM_ERROR("DM_MST: Failed to start MST\n");
1371 aconnector->dc_link->type =
1372 dc_connection_single;
1373 break;
1374 }
1375 }
1376 }
1377 drm_connector_list_iter_end(&iter);
1378
1379 return ret;
1380 }
1381
1382 static int dm_late_init(void *handle)
1383 {
1384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1385
1386 struct dmcu_iram_parameters params;
1387 unsigned int linear_lut[16];
1388 int i;
1389 struct dmcu *dmcu = NULL;
1390 bool ret = true;
1391
1392 if (!adev->dm.fw_dmcu)
1393 return detect_mst_link_for_all_connectors(adev->ddev);
1394
1395 dmcu = adev->dm.dc->res_pool->dmcu;
1396
1397 for (i = 0; i < 16; i++)
1398 linear_lut[i] = 0xFFFF * i / 15;
1399
1400 params.set = 0;
1401 params.backlight_ramping_start = 0xCCCC;
1402 params.backlight_ramping_reduction = 0xCCCCCCCC;
1403 params.backlight_lut_array_size = 16;
1404 params.backlight_lut_array = linear_lut;
1405
1406 /* Min backlight level after ABM reduction, Don't allow below 1%
1407 * 0xFFFF x 0.01 = 0x28F
1408 */
1409 params.min_abm_backlight = 0x28F;
1410
1411 /* In the case where abm is implemented on dmcub,
1412 * dmcu object will be null.
1413 * ABM 2.4 and up are implemented on dmcub.
1414 */
1415 if (dmcu)
1416 ret = dmcu_load_iram(dmcu, params);
1417 else if (adev->dm.dc->ctx->dmub_srv)
1418 ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1419
1420 if (!ret)
1421 return -EINVAL;
1422
1423 return detect_mst_link_for_all_connectors(adev->ddev);
1424 }
1425
1426 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1427 {
1428 struct amdgpu_dm_connector *aconnector;
1429 struct drm_connector *connector;
1430 struct drm_connector_list_iter iter;
1431 struct drm_dp_mst_topology_mgr *mgr;
1432 int ret;
1433 bool need_hotplug = false;
1434
1435 drm_connector_list_iter_begin(dev, &iter);
1436 drm_for_each_connector_iter(connector, &iter) {
1437 aconnector = to_amdgpu_dm_connector(connector);
1438 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1439 aconnector->mst_port)
1440 continue;
1441
1442 mgr = &aconnector->mst_mgr;
1443
1444 if (suspend) {
1445 drm_dp_mst_topology_mgr_suspend(mgr);
1446 } else {
1447 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1448 if (ret < 0) {
1449 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1450 need_hotplug = true;
1451 }
1452 }
1453 }
1454 drm_connector_list_iter_end(&iter);
1455
1456 if (need_hotplug)
1457 drm_kms_helper_hotplug_event(dev);
1458 }
1459
1460 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1461 {
1462 struct smu_context *smu = &adev->smu;
1463 int ret = 0;
1464
1465 if (!is_support_sw_smu(adev))
1466 return 0;
1467
1468 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1469 * on window driver dc implementation.
1470 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1471 * should be passed to smu during boot up and resume from s3.
1472 * boot up: dc calculate dcn watermark clock settings within dc_create,
1473 * dcn20_resource_construct
1474 * then call pplib functions below to pass the settings to smu:
1475 * smu_set_watermarks_for_clock_ranges
1476 * smu_set_watermarks_table
1477 * navi10_set_watermarks_table
1478 * smu_write_watermarks_table
1479 *
1480 * For Renoir, clock settings of dcn watermark are also fixed values.
1481 * dc has implemented different flow for window driver:
1482 * dc_hardware_init / dc_set_power_state
1483 * dcn10_init_hw
1484 * notify_wm_ranges
1485 * set_wm_ranges
1486 * -- Linux
1487 * smu_set_watermarks_for_clock_ranges
1488 * renoir_set_watermarks_table
1489 * smu_write_watermarks_table
1490 *
1491 * For Linux,
1492 * dc_hardware_init -> amdgpu_dm_init
1493 * dc_set_power_state --> dm_resume
1494 *
1495 * therefore, this function apply to navi10/12/14 but not Renoir
1496 * *
1497 */
1498 switch(adev->asic_type) {
1499 case CHIP_NAVI10:
1500 case CHIP_NAVI14:
1501 case CHIP_NAVI12:
1502 break;
1503 default:
1504 return 0;
1505 }
1506
1507 ret = smu_write_watermarks_table(smu);
1508 if (ret) {
1509 DRM_ERROR("Failed to update WMTABLE!\n");
1510 return ret;
1511 }
1512
1513 return 0;
1514 }
1515
1516 /**
1517 * dm_hw_init() - Initialize DC device
1518 * @handle: The base driver device containing the amdgpu_dm device.
1519 *
1520 * Initialize the &struct amdgpu_display_manager device. This involves calling
1521 * the initializers of each DM component, then populating the struct with them.
1522 *
1523 * Although the function implies hardware initialization, both hardware and
1524 * software are initialized here. Splitting them out to their relevant init
1525 * hooks is a future TODO item.
1526 *
1527 * Some notable things that are initialized here:
1528 *
1529 * - Display Core, both software and hardware
1530 * - DC modules that we need (freesync and color management)
1531 * - DRM software states
1532 * - Interrupt sources and handlers
1533 * - Vblank support
1534 * - Debug FS entries, if enabled
1535 */
1536 static int dm_hw_init(void *handle)
1537 {
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 /* Create DAL display manager */
1540 amdgpu_dm_init(adev);
1541 amdgpu_dm_hpd_init(adev);
1542
1543 return 0;
1544 }
1545
1546 /**
1547 * dm_hw_fini() - Teardown DC device
1548 * @handle: The base driver device containing the amdgpu_dm device.
1549 *
1550 * Teardown components within &struct amdgpu_display_manager that require
1551 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552 * were loaded. Also flush IRQ workqueues and disable them.
1553 */
1554 static int dm_hw_fini(void *handle)
1555 {
1556 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557
1558 amdgpu_dm_hpd_fini(adev);
1559
1560 amdgpu_dm_irq_fini(adev);
1561 amdgpu_dm_fini(adev);
1562 return 0;
1563 }
1564
1565
1566 static int dm_enable_vblank(struct drm_crtc *crtc);
1567 static void dm_disable_vblank(struct drm_crtc *crtc);
1568
1569 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1570 struct dc_state *state, bool enable)
1571 {
1572 enum dc_irq_source irq_source;
1573 struct amdgpu_crtc *acrtc;
1574 int rc = -EBUSY;
1575 int i = 0;
1576
1577 for (i = 0; i < state->stream_count; i++) {
1578 acrtc = get_crtc_by_otg_inst(
1579 adev, state->stream_status[i].primary_otg_inst);
1580
1581 if (acrtc && state->stream_status[i].plane_count != 0) {
1582 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1583 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1584 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1585 acrtc->crtc_id, enable ? "en" : "dis", rc);
1586 if (rc)
1587 DRM_WARN("Failed to %s pflip interrupts\n",
1588 enable ? "enable" : "disable");
1589
1590 if (enable) {
1591 rc = dm_enable_vblank(&acrtc->base);
1592 if (rc)
1593 DRM_WARN("Failed to enable vblank interrupts\n");
1594 } else {
1595 dm_disable_vblank(&acrtc->base);
1596 }
1597
1598 }
1599 }
1600
1601 }
1602
1603 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1604 {
1605 struct dc_state *context = NULL;
1606 enum dc_status res = DC_ERROR_UNEXPECTED;
1607 int i;
1608 struct dc_stream_state *del_streams[MAX_PIPES];
1609 int del_streams_count = 0;
1610
1611 memset(del_streams, 0, sizeof(del_streams));
1612
1613 context = dc_create_state(dc);
1614 if (context == NULL)
1615 goto context_alloc_fail;
1616
1617 dc_resource_state_copy_construct_current(dc, context);
1618
1619 /* First remove from context all streams */
1620 for (i = 0; i < context->stream_count; i++) {
1621 struct dc_stream_state *stream = context->streams[i];
1622
1623 del_streams[del_streams_count++] = stream;
1624 }
1625
1626 /* Remove all planes for removed streams and then remove the streams */
1627 for (i = 0; i < del_streams_count; i++) {
1628 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1629 res = DC_FAIL_DETACH_SURFACES;
1630 goto fail;
1631 }
1632
1633 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1634 if (res != DC_OK)
1635 goto fail;
1636 }
1637
1638
1639 res = dc_validate_global_state(dc, context, false);
1640
1641 if (res != DC_OK) {
1642 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1643 goto fail;
1644 }
1645
1646 res = dc_commit_state(dc, context);
1647
1648 fail:
1649 dc_release_state(context);
1650
1651 context_alloc_fail:
1652 return res;
1653 }
1654
1655 static int dm_suspend(void *handle)
1656 {
1657 struct amdgpu_device *adev = handle;
1658 struct amdgpu_display_manager *dm = &adev->dm;
1659 int ret = 0;
1660
1661 if (amdgpu_in_reset(adev)) {
1662 mutex_lock(&dm->dc_lock);
1663 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1664
1665 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1666
1667 amdgpu_dm_commit_zero_streams(dm->dc);
1668
1669 amdgpu_dm_irq_suspend(adev);
1670
1671 return ret;
1672 }
1673
1674 WARN_ON(adev->dm.cached_state);
1675 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1676
1677 s3_handle_mst(adev->ddev, true);
1678
1679 amdgpu_dm_irq_suspend(adev);
1680
1681
1682 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1683
1684 return 0;
1685 }
1686
1687 static struct amdgpu_dm_connector *
1688 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1689 struct drm_crtc *crtc)
1690 {
1691 uint32_t i;
1692 struct drm_connector_state *new_con_state;
1693 struct drm_connector *connector;
1694 struct drm_crtc *crtc_from_state;
1695
1696 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1697 crtc_from_state = new_con_state->crtc;
1698
1699 if (crtc_from_state == crtc)
1700 return to_amdgpu_dm_connector(connector);
1701 }
1702
1703 return NULL;
1704 }
1705
1706 static void emulated_link_detect(struct dc_link *link)
1707 {
1708 struct dc_sink_init_data sink_init_data = { 0 };
1709 struct display_sink_capability sink_caps = { 0 };
1710 enum dc_edid_status edid_status;
1711 struct dc_context *dc_ctx = link->ctx;
1712 struct dc_sink *sink = NULL;
1713 struct dc_sink *prev_sink = NULL;
1714
1715 link->type = dc_connection_none;
1716 prev_sink = link->local_sink;
1717
1718 if (prev_sink != NULL)
1719 dc_sink_retain(prev_sink);
1720
1721 switch (link->connector_signal) {
1722 case SIGNAL_TYPE_HDMI_TYPE_A: {
1723 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1725 break;
1726 }
1727
1728 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1729 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1731 break;
1732 }
1733
1734 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1735 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1737 break;
1738 }
1739
1740 case SIGNAL_TYPE_LVDS: {
1741 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1742 sink_caps.signal = SIGNAL_TYPE_LVDS;
1743 break;
1744 }
1745
1746 case SIGNAL_TYPE_EDP: {
1747 sink_caps.transaction_type =
1748 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1749 sink_caps.signal = SIGNAL_TYPE_EDP;
1750 break;
1751 }
1752
1753 case SIGNAL_TYPE_DISPLAY_PORT: {
1754 sink_caps.transaction_type =
1755 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1756 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1757 break;
1758 }
1759
1760 default:
1761 DC_ERROR("Invalid connector type! signal:%d\n",
1762 link->connector_signal);
1763 return;
1764 }
1765
1766 sink_init_data.link = link;
1767 sink_init_data.sink_signal = sink_caps.signal;
1768
1769 sink = dc_sink_create(&sink_init_data);
1770 if (!sink) {
1771 DC_ERROR("Failed to create sink!\n");
1772 return;
1773 }
1774
1775 /* dc_sink_create returns a new reference */
1776 link->local_sink = sink;
1777
1778 edid_status = dm_helpers_read_local_edid(
1779 link->ctx,
1780 link,
1781 sink);
1782
1783 if (edid_status != EDID_OK)
1784 DC_ERROR("Failed to read EDID");
1785
1786 }
1787
1788 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1789 struct amdgpu_display_manager *dm)
1790 {
1791 struct {
1792 struct dc_surface_update surface_updates[MAX_SURFACES];
1793 struct dc_plane_info plane_infos[MAX_SURFACES];
1794 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1795 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1796 struct dc_stream_update stream_update;
1797 } * bundle;
1798 int k, m;
1799
1800 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1801
1802 if (!bundle) {
1803 dm_error("Failed to allocate update bundle\n");
1804 goto cleanup;
1805 }
1806
1807 for (k = 0; k < dc_state->stream_count; k++) {
1808 bundle->stream_update.stream = dc_state->streams[k];
1809
1810 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1811 bundle->surface_updates[m].surface =
1812 dc_state->stream_status->plane_states[m];
1813 bundle->surface_updates[m].surface->force_full_update =
1814 true;
1815 }
1816 dc_commit_updates_for_stream(
1817 dm->dc, bundle->surface_updates,
1818 dc_state->stream_status->plane_count,
1819 dc_state->streams[k], &bundle->stream_update, dc_state);
1820 }
1821
1822 cleanup:
1823 kfree(bundle);
1824
1825 return;
1826 }
1827
1828 static int dm_resume(void *handle)
1829 {
1830 struct amdgpu_device *adev = handle;
1831 struct drm_device *ddev = adev->ddev;
1832 struct amdgpu_display_manager *dm = &adev->dm;
1833 struct amdgpu_dm_connector *aconnector;
1834 struct drm_connector *connector;
1835 struct drm_connector_list_iter iter;
1836 struct drm_crtc *crtc;
1837 struct drm_crtc_state *new_crtc_state;
1838 struct dm_crtc_state *dm_new_crtc_state;
1839 struct drm_plane *plane;
1840 struct drm_plane_state *new_plane_state;
1841 struct dm_plane_state *dm_new_plane_state;
1842 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1843 enum dc_connection_type new_connection_type = dc_connection_none;
1844 struct dc_state *dc_state;
1845 int i, r, j;
1846
1847 if (amdgpu_in_reset(adev)) {
1848 dc_state = dm->cached_dc_state;
1849
1850 r = dm_dmub_hw_init(adev);
1851 if (r)
1852 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1853
1854 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1855 dc_resume(dm->dc);
1856
1857 amdgpu_dm_irq_resume_early(adev);
1858
1859 for (i = 0; i < dc_state->stream_count; i++) {
1860 dc_state->streams[i]->mode_changed = true;
1861 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1862 dc_state->stream_status->plane_states[j]->update_flags.raw
1863 = 0xffffffff;
1864 }
1865 }
1866
1867 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1868
1869 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1870
1871 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1872
1873 dc_release_state(dm->cached_dc_state);
1874 dm->cached_dc_state = NULL;
1875
1876 amdgpu_dm_irq_resume_late(adev);
1877
1878 mutex_unlock(&dm->dc_lock);
1879
1880 return 0;
1881 }
1882 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1883 dc_release_state(dm_state->context);
1884 dm_state->context = dc_create_state(dm->dc);
1885 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1886 dc_resource_state_construct(dm->dc, dm_state->context);
1887
1888 /* Before powering on DC we need to re-initialize DMUB. */
1889 r = dm_dmub_hw_init(adev);
1890 if (r)
1891 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1892
1893 /* power on hardware */
1894 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1895
1896 /* program HPD filter */
1897 dc_resume(dm->dc);
1898
1899 /*
1900 * early enable HPD Rx IRQ, should be done before set mode as short
1901 * pulse interrupts are used for MST
1902 */
1903 amdgpu_dm_irq_resume_early(adev);
1904
1905 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1906 s3_handle_mst(ddev, false);
1907
1908 /* Do detection*/
1909 drm_connector_list_iter_begin(ddev, &iter);
1910 drm_for_each_connector_iter(connector, &iter) {
1911 aconnector = to_amdgpu_dm_connector(connector);
1912
1913 /*
1914 * this is the case when traversing through already created
1915 * MST connectors, should be skipped
1916 */
1917 if (aconnector->mst_port)
1918 continue;
1919
1920 mutex_lock(&aconnector->hpd_lock);
1921 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1922 DRM_ERROR("KMS: Failed to detect connector\n");
1923
1924 if (aconnector->base.force && new_connection_type == dc_connection_none)
1925 emulated_link_detect(aconnector->dc_link);
1926 else
1927 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1928
1929 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1930 aconnector->fake_enable = false;
1931
1932 if (aconnector->dc_sink)
1933 dc_sink_release(aconnector->dc_sink);
1934 aconnector->dc_sink = NULL;
1935 amdgpu_dm_update_connector_after_detect(aconnector);
1936 mutex_unlock(&aconnector->hpd_lock);
1937 }
1938 drm_connector_list_iter_end(&iter);
1939
1940 /* Force mode set in atomic commit */
1941 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1942 new_crtc_state->active_changed = true;
1943
1944 /*
1945 * atomic_check is expected to create the dc states. We need to release
1946 * them here, since they were duplicated as part of the suspend
1947 * procedure.
1948 */
1949 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1950 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1951 if (dm_new_crtc_state->stream) {
1952 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1953 dc_stream_release(dm_new_crtc_state->stream);
1954 dm_new_crtc_state->stream = NULL;
1955 }
1956 }
1957
1958 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1959 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1960 if (dm_new_plane_state->dc_state) {
1961 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1962 dc_plane_state_release(dm_new_plane_state->dc_state);
1963 dm_new_plane_state->dc_state = NULL;
1964 }
1965 }
1966
1967 drm_atomic_helper_resume(ddev, dm->cached_state);
1968
1969 dm->cached_state = NULL;
1970
1971 amdgpu_dm_irq_resume_late(adev);
1972
1973 amdgpu_dm_smu_write_watermarks_table(adev);
1974
1975 return 0;
1976 }
1977
1978 /**
1979 * DOC: DM Lifecycle
1980 *
1981 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1982 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1983 * the base driver's device list to be initialized and torn down accordingly.
1984 *
1985 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1986 */
1987
1988 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1989 .name = "dm",
1990 .early_init = dm_early_init,
1991 .late_init = dm_late_init,
1992 .sw_init = dm_sw_init,
1993 .sw_fini = dm_sw_fini,
1994 .hw_init = dm_hw_init,
1995 .hw_fini = dm_hw_fini,
1996 .suspend = dm_suspend,
1997 .resume = dm_resume,
1998 .is_idle = dm_is_idle,
1999 .wait_for_idle = dm_wait_for_idle,
2000 .check_soft_reset = dm_check_soft_reset,
2001 .soft_reset = dm_soft_reset,
2002 .set_clockgating_state = dm_set_clockgating_state,
2003 .set_powergating_state = dm_set_powergating_state,
2004 };
2005
2006 const struct amdgpu_ip_block_version dm_ip_block =
2007 {
2008 .type = AMD_IP_BLOCK_TYPE_DCE,
2009 .major = 1,
2010 .minor = 0,
2011 .rev = 0,
2012 .funcs = &amdgpu_dm_funcs,
2013 };
2014
2015
2016 /**
2017 * DOC: atomic
2018 *
2019 * *WIP*
2020 */
2021
2022 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2023 .fb_create = amdgpu_display_user_framebuffer_create,
2024 .output_poll_changed = drm_fb_helper_output_poll_changed,
2025 .atomic_check = amdgpu_dm_atomic_check,
2026 .atomic_commit = amdgpu_dm_atomic_commit,
2027 };
2028
2029 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2030 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2031 };
2032
2033 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2034 {
2035 u32 max_cll, min_cll, max, min, q, r;
2036 struct amdgpu_dm_backlight_caps *caps;
2037 struct amdgpu_display_manager *dm;
2038 struct drm_connector *conn_base;
2039 struct amdgpu_device *adev;
2040 struct dc_link *link = NULL;
2041 static const u8 pre_computed_values[] = {
2042 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2043 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2044
2045 if (!aconnector || !aconnector->dc_link)
2046 return;
2047
2048 link = aconnector->dc_link;
2049 if (link->connector_signal != SIGNAL_TYPE_EDP)
2050 return;
2051
2052 conn_base = &aconnector->base;
2053 adev = conn_base->dev->dev_private;
2054 dm = &adev->dm;
2055 caps = &dm->backlight_caps;
2056 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2057 caps->aux_support = false;
2058 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2059 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2060
2061 if (caps->ext_caps->bits.oled == 1 ||
2062 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2063 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2064 caps->aux_support = true;
2065
2066 /* From the specification (CTA-861-G), for calculating the maximum
2067 * luminance we need to use:
2068 * Luminance = 50*2**(CV/32)
2069 * Where CV is a one-byte value.
2070 * For calculating this expression we may need float point precision;
2071 * to avoid this complexity level, we take advantage that CV is divided
2072 * by a constant. From the Euclids division algorithm, we know that CV
2073 * can be written as: CV = 32*q + r. Next, we replace CV in the
2074 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2075 * need to pre-compute the value of r/32. For pre-computing the values
2076 * We just used the following Ruby line:
2077 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2078 * The results of the above expressions can be verified at
2079 * pre_computed_values.
2080 */
2081 q = max_cll >> 5;
2082 r = max_cll % 32;
2083 max = (1 << q) * pre_computed_values[r];
2084
2085 // min luminance: maxLum * (CV/255)^2 / 100
2086 q = DIV_ROUND_CLOSEST(min_cll, 255);
2087 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2088
2089 caps->aux_max_input_signal = max;
2090 caps->aux_min_input_signal = min;
2091 }
2092
2093 void amdgpu_dm_update_connector_after_detect(
2094 struct amdgpu_dm_connector *aconnector)
2095 {
2096 struct drm_connector *connector = &aconnector->base;
2097 struct drm_device *dev = connector->dev;
2098 struct dc_sink *sink;
2099
2100 /* MST handled by drm_mst framework */
2101 if (aconnector->mst_mgr.mst_state == true)
2102 return;
2103
2104
2105 sink = aconnector->dc_link->local_sink;
2106 if (sink)
2107 dc_sink_retain(sink);
2108
2109 /*
2110 * Edid mgmt connector gets first update only in mode_valid hook and then
2111 * the connector sink is set to either fake or physical sink depends on link status.
2112 * Skip if already done during boot.
2113 */
2114 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2115 && aconnector->dc_em_sink) {
2116
2117 /*
2118 * For S3 resume with headless use eml_sink to fake stream
2119 * because on resume connector->sink is set to NULL
2120 */
2121 mutex_lock(&dev->mode_config.mutex);
2122
2123 if (sink) {
2124 if (aconnector->dc_sink) {
2125 amdgpu_dm_update_freesync_caps(connector, NULL);
2126 /*
2127 * retain and release below are used to
2128 * bump up refcount for sink because the link doesn't point
2129 * to it anymore after disconnect, so on next crtc to connector
2130 * reshuffle by UMD we will get into unwanted dc_sink release
2131 */
2132 dc_sink_release(aconnector->dc_sink);
2133 }
2134 aconnector->dc_sink = sink;
2135 dc_sink_retain(aconnector->dc_sink);
2136 amdgpu_dm_update_freesync_caps(connector,
2137 aconnector->edid);
2138 } else {
2139 amdgpu_dm_update_freesync_caps(connector, NULL);
2140 if (!aconnector->dc_sink) {
2141 aconnector->dc_sink = aconnector->dc_em_sink;
2142 dc_sink_retain(aconnector->dc_sink);
2143 }
2144 }
2145
2146 mutex_unlock(&dev->mode_config.mutex);
2147
2148 if (sink)
2149 dc_sink_release(sink);
2150 return;
2151 }
2152
2153 /*
2154 * TODO: temporary guard to look for proper fix
2155 * if this sink is MST sink, we should not do anything
2156 */
2157 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2158 dc_sink_release(sink);
2159 return;
2160 }
2161
2162 if (aconnector->dc_sink == sink) {
2163 /*
2164 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2165 * Do nothing!!
2166 */
2167 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2168 aconnector->connector_id);
2169 if (sink)
2170 dc_sink_release(sink);
2171 return;
2172 }
2173
2174 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2175 aconnector->connector_id, aconnector->dc_sink, sink);
2176
2177 mutex_lock(&dev->mode_config.mutex);
2178
2179 /*
2180 * 1. Update status of the drm connector
2181 * 2. Send an event and let userspace tell us what to do
2182 */
2183 if (sink) {
2184 /*
2185 * TODO: check if we still need the S3 mode update workaround.
2186 * If yes, put it here.
2187 */
2188 if (aconnector->dc_sink)
2189 amdgpu_dm_update_freesync_caps(connector, NULL);
2190
2191 aconnector->dc_sink = sink;
2192 dc_sink_retain(aconnector->dc_sink);
2193 if (sink->dc_edid.length == 0) {
2194 aconnector->edid = NULL;
2195 if (aconnector->dc_link->aux_mode) {
2196 drm_dp_cec_unset_edid(
2197 &aconnector->dm_dp_aux.aux);
2198 }
2199 } else {
2200 aconnector->edid =
2201 (struct edid *)sink->dc_edid.raw_edid;
2202
2203 drm_connector_update_edid_property(connector,
2204 aconnector->edid);
2205
2206 if (aconnector->dc_link->aux_mode)
2207 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2208 aconnector->edid);
2209 }
2210
2211 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2212 update_connector_ext_caps(aconnector);
2213 } else {
2214 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2215 amdgpu_dm_update_freesync_caps(connector, NULL);
2216 drm_connector_update_edid_property(connector, NULL);
2217 aconnector->num_modes = 0;
2218 dc_sink_release(aconnector->dc_sink);
2219 aconnector->dc_sink = NULL;
2220 aconnector->edid = NULL;
2221 #ifdef CONFIG_DRM_AMD_DC_HDCP
2222 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2223 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2224 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2225 #endif
2226 }
2227
2228 mutex_unlock(&dev->mode_config.mutex);
2229
2230 if (sink)
2231 dc_sink_release(sink);
2232 }
2233
2234 static void handle_hpd_irq(void *param)
2235 {
2236 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2237 struct drm_connector *connector = &aconnector->base;
2238 struct drm_device *dev = connector->dev;
2239 enum dc_connection_type new_connection_type = dc_connection_none;
2240 #ifdef CONFIG_DRM_AMD_DC_HDCP
2241 struct amdgpu_device *adev = dev->dev_private;
2242 #endif
2243
2244 /*
2245 * In case of failure or MST no need to update connector status or notify the OS
2246 * since (for MST case) MST does this in its own context.
2247 */
2248 mutex_lock(&aconnector->hpd_lock);
2249
2250 #ifdef CONFIG_DRM_AMD_DC_HDCP
2251 if (adev->dm.hdcp_workqueue)
2252 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2253 #endif
2254 if (aconnector->fake_enable)
2255 aconnector->fake_enable = false;
2256
2257 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2258 DRM_ERROR("KMS: Failed to detect connector\n");
2259
2260 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2261 emulated_link_detect(aconnector->dc_link);
2262
2263
2264 drm_modeset_lock_all(dev);
2265 dm_restore_drm_connector_state(dev, connector);
2266 drm_modeset_unlock_all(dev);
2267
2268 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2269 drm_kms_helper_hotplug_event(dev);
2270
2271 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2272 amdgpu_dm_update_connector_after_detect(aconnector);
2273
2274
2275 drm_modeset_lock_all(dev);
2276 dm_restore_drm_connector_state(dev, connector);
2277 drm_modeset_unlock_all(dev);
2278
2279 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2280 drm_kms_helper_hotplug_event(dev);
2281 }
2282 mutex_unlock(&aconnector->hpd_lock);
2283
2284 }
2285
2286 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2287 {
2288 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2289 uint8_t dret;
2290 bool new_irq_handled = false;
2291 int dpcd_addr;
2292 int dpcd_bytes_to_read;
2293
2294 const int max_process_count = 30;
2295 int process_count = 0;
2296
2297 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2298
2299 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2300 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2301 /* DPCD 0x200 - 0x201 for downstream IRQ */
2302 dpcd_addr = DP_SINK_COUNT;
2303 } else {
2304 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2305 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2306 dpcd_addr = DP_SINK_COUNT_ESI;
2307 }
2308
2309 dret = drm_dp_dpcd_read(
2310 &aconnector->dm_dp_aux.aux,
2311 dpcd_addr,
2312 esi,
2313 dpcd_bytes_to_read);
2314
2315 while (dret == dpcd_bytes_to_read &&
2316 process_count < max_process_count) {
2317 uint8_t retry;
2318 dret = 0;
2319
2320 process_count++;
2321
2322 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2323 /* handle HPD short pulse irq */
2324 if (aconnector->mst_mgr.mst_state)
2325 drm_dp_mst_hpd_irq(
2326 &aconnector->mst_mgr,
2327 esi,
2328 &new_irq_handled);
2329
2330 if (new_irq_handled) {
2331 /* ACK at DPCD to notify down stream */
2332 const int ack_dpcd_bytes_to_write =
2333 dpcd_bytes_to_read - 1;
2334
2335 for (retry = 0; retry < 3; retry++) {
2336 uint8_t wret;
2337
2338 wret = drm_dp_dpcd_write(
2339 &aconnector->dm_dp_aux.aux,
2340 dpcd_addr + 1,
2341 &esi[1],
2342 ack_dpcd_bytes_to_write);
2343 if (wret == ack_dpcd_bytes_to_write)
2344 break;
2345 }
2346
2347 /* check if there is new irq to be handled */
2348 dret = drm_dp_dpcd_read(
2349 &aconnector->dm_dp_aux.aux,
2350 dpcd_addr,
2351 esi,
2352 dpcd_bytes_to_read);
2353
2354 new_irq_handled = false;
2355 } else {
2356 break;
2357 }
2358 }
2359
2360 if (process_count == max_process_count)
2361 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2362 }
2363
2364 static void handle_hpd_rx_irq(void *param)
2365 {
2366 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2367 struct drm_connector *connector = &aconnector->base;
2368 struct drm_device *dev = connector->dev;
2369 struct dc_link *dc_link = aconnector->dc_link;
2370 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2371 enum dc_connection_type new_connection_type = dc_connection_none;
2372 #ifdef CONFIG_DRM_AMD_DC_HDCP
2373 union hpd_irq_data hpd_irq_data;
2374 struct amdgpu_device *adev = dev->dev_private;
2375
2376 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2377 #endif
2378
2379 /*
2380 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2381 * conflict, after implement i2c helper, this mutex should be
2382 * retired.
2383 */
2384 if (dc_link->type != dc_connection_mst_branch)
2385 mutex_lock(&aconnector->hpd_lock);
2386
2387
2388 #ifdef CONFIG_DRM_AMD_DC_HDCP
2389 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2390 #else
2391 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2392 #endif
2393 !is_mst_root_connector) {
2394 /* Downstream Port status changed. */
2395 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2396 DRM_ERROR("KMS: Failed to detect connector\n");
2397
2398 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2399 emulated_link_detect(dc_link);
2400
2401 if (aconnector->fake_enable)
2402 aconnector->fake_enable = false;
2403
2404 amdgpu_dm_update_connector_after_detect(aconnector);
2405
2406
2407 drm_modeset_lock_all(dev);
2408 dm_restore_drm_connector_state(dev, connector);
2409 drm_modeset_unlock_all(dev);
2410
2411 drm_kms_helper_hotplug_event(dev);
2412 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2413
2414 if (aconnector->fake_enable)
2415 aconnector->fake_enable = false;
2416
2417 amdgpu_dm_update_connector_after_detect(aconnector);
2418
2419
2420 drm_modeset_lock_all(dev);
2421 dm_restore_drm_connector_state(dev, connector);
2422 drm_modeset_unlock_all(dev);
2423
2424 drm_kms_helper_hotplug_event(dev);
2425 }
2426 }
2427 #ifdef CONFIG_DRM_AMD_DC_HDCP
2428 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2429 if (adev->dm.hdcp_workqueue)
2430 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2431 }
2432 #endif
2433 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2434 (dc_link->type == dc_connection_mst_branch))
2435 dm_handle_hpd_rx_irq(aconnector);
2436
2437 if (dc_link->type != dc_connection_mst_branch) {
2438 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2439 mutex_unlock(&aconnector->hpd_lock);
2440 }
2441 }
2442
2443 static void register_hpd_handlers(struct amdgpu_device *adev)
2444 {
2445 struct drm_device *dev = adev->ddev;
2446 struct drm_connector *connector;
2447 struct amdgpu_dm_connector *aconnector;
2448 const struct dc_link *dc_link;
2449 struct dc_interrupt_params int_params = {0};
2450
2451 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2452 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2453
2454 list_for_each_entry(connector,
2455 &dev->mode_config.connector_list, head) {
2456
2457 aconnector = to_amdgpu_dm_connector(connector);
2458 dc_link = aconnector->dc_link;
2459
2460 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2461 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2462 int_params.irq_source = dc_link->irq_source_hpd;
2463
2464 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2465 handle_hpd_irq,
2466 (void *) aconnector);
2467 }
2468
2469 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2470
2471 /* Also register for DP short pulse (hpd_rx). */
2472 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2473 int_params.irq_source = dc_link->irq_source_hpd_rx;
2474
2475 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2476 handle_hpd_rx_irq,
2477 (void *) aconnector);
2478 }
2479 }
2480 }
2481
2482 #if defined(CONFIG_DRM_AMD_DC_SI)
2483 /* Register IRQ sources and initialize IRQ callbacks */
2484 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2485 {
2486 struct dc *dc = adev->dm.dc;
2487 struct common_irq_params *c_irq_params;
2488 struct dc_interrupt_params int_params = {0};
2489 int r;
2490 int i;
2491 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2492
2493 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2494 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2495
2496 /*
2497 * Actions of amdgpu_irq_add_id():
2498 * 1. Register a set() function with base driver.
2499 * Base driver will call set() function to enable/disable an
2500 * interrupt in DC hardware.
2501 * 2. Register amdgpu_dm_irq_handler().
2502 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2503 * coming from DC hardware.
2504 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2505 * for acknowledging and handling. */
2506
2507 /* Use VBLANK interrupt */
2508 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2509 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2510 if (r) {
2511 DRM_ERROR("Failed to add crtc irq id!\n");
2512 return r;
2513 }
2514
2515 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2516 int_params.irq_source =
2517 dc_interrupt_to_irq_source(dc, i+1 , 0);
2518
2519 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2520
2521 c_irq_params->adev = adev;
2522 c_irq_params->irq_src = int_params.irq_source;
2523
2524 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2525 dm_crtc_high_irq, c_irq_params);
2526 }
2527
2528 /* Use GRPH_PFLIP interrupt */
2529 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2530 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2531 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2532 if (r) {
2533 DRM_ERROR("Failed to add page flip irq id!\n");
2534 return r;
2535 }
2536
2537 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2538 int_params.irq_source =
2539 dc_interrupt_to_irq_source(dc, i, 0);
2540
2541 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2542
2543 c_irq_params->adev = adev;
2544 c_irq_params->irq_src = int_params.irq_source;
2545
2546 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2547 dm_pflip_high_irq, c_irq_params);
2548
2549 }
2550
2551 /* HPD */
2552 r = amdgpu_irq_add_id(adev, client_id,
2553 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2554 if (r) {
2555 DRM_ERROR("Failed to add hpd irq id!\n");
2556 return r;
2557 }
2558
2559 register_hpd_handlers(adev);
2560
2561 return 0;
2562 }
2563 #endif
2564
2565 /* Register IRQ sources and initialize IRQ callbacks */
2566 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2567 {
2568 struct dc *dc = adev->dm.dc;
2569 struct common_irq_params *c_irq_params;
2570 struct dc_interrupt_params int_params = {0};
2571 int r;
2572 int i;
2573 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2574
2575 if (adev->asic_type >= CHIP_VEGA10)
2576 client_id = SOC15_IH_CLIENTID_DCE;
2577
2578 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2579 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2580
2581 /*
2582 * Actions of amdgpu_irq_add_id():
2583 * 1. Register a set() function with base driver.
2584 * Base driver will call set() function to enable/disable an
2585 * interrupt in DC hardware.
2586 * 2. Register amdgpu_dm_irq_handler().
2587 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2588 * coming from DC hardware.
2589 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2590 * for acknowledging and handling. */
2591
2592 /* Use VBLANK interrupt */
2593 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2594 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2595 if (r) {
2596 DRM_ERROR("Failed to add crtc irq id!\n");
2597 return r;
2598 }
2599
2600 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2601 int_params.irq_source =
2602 dc_interrupt_to_irq_source(dc, i, 0);
2603
2604 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2605
2606 c_irq_params->adev = adev;
2607 c_irq_params->irq_src = int_params.irq_source;
2608
2609 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2610 dm_crtc_high_irq, c_irq_params);
2611 }
2612
2613 /* Use VUPDATE interrupt */
2614 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2615 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2616 if (r) {
2617 DRM_ERROR("Failed to add vupdate irq id!\n");
2618 return r;
2619 }
2620
2621 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2622 int_params.irq_source =
2623 dc_interrupt_to_irq_source(dc, i, 0);
2624
2625 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2626
2627 c_irq_params->adev = adev;
2628 c_irq_params->irq_src = int_params.irq_source;
2629
2630 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2631 dm_vupdate_high_irq, c_irq_params);
2632 }
2633
2634 /* Use GRPH_PFLIP interrupt */
2635 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2636 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2637 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2638 if (r) {
2639 DRM_ERROR("Failed to add page flip irq id!\n");
2640 return r;
2641 }
2642
2643 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2644 int_params.irq_source =
2645 dc_interrupt_to_irq_source(dc, i, 0);
2646
2647 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2648
2649 c_irq_params->adev = adev;
2650 c_irq_params->irq_src = int_params.irq_source;
2651
2652 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2653 dm_pflip_high_irq, c_irq_params);
2654
2655 }
2656
2657 /* HPD */
2658 r = amdgpu_irq_add_id(adev, client_id,
2659 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2660 if (r) {
2661 DRM_ERROR("Failed to add hpd irq id!\n");
2662 return r;
2663 }
2664
2665 register_hpd_handlers(adev);
2666
2667 return 0;
2668 }
2669
2670 #if defined(CONFIG_DRM_AMD_DC_DCN)
2671 /* Register IRQ sources and initialize IRQ callbacks */
2672 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2673 {
2674 struct dc *dc = adev->dm.dc;
2675 struct common_irq_params *c_irq_params;
2676 struct dc_interrupt_params int_params = {0};
2677 int r;
2678 int i;
2679
2680 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2681 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2682
2683 /*
2684 * Actions of amdgpu_irq_add_id():
2685 * 1. Register a set() function with base driver.
2686 * Base driver will call set() function to enable/disable an
2687 * interrupt in DC hardware.
2688 * 2. Register amdgpu_dm_irq_handler().
2689 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2690 * coming from DC hardware.
2691 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2692 * for acknowledging and handling.
2693 */
2694
2695 /* Use VSTARTUP interrupt */
2696 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2697 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2698 i++) {
2699 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2700
2701 if (r) {
2702 DRM_ERROR("Failed to add crtc irq id!\n");
2703 return r;
2704 }
2705
2706 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2707 int_params.irq_source =
2708 dc_interrupt_to_irq_source(dc, i, 0);
2709
2710 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2711
2712 c_irq_params->adev = adev;
2713 c_irq_params->irq_src = int_params.irq_source;
2714
2715 amdgpu_dm_irq_register_interrupt(
2716 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2717 }
2718
2719 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2720 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2721 * to trigger at end of each vblank, regardless of state of the lock,
2722 * matching DCE behaviour.
2723 */
2724 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2725 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2726 i++) {
2727 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2728
2729 if (r) {
2730 DRM_ERROR("Failed to add vupdate irq id!\n");
2731 return r;
2732 }
2733
2734 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2735 int_params.irq_source =
2736 dc_interrupt_to_irq_source(dc, i, 0);
2737
2738 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2739
2740 c_irq_params->adev = adev;
2741 c_irq_params->irq_src = int_params.irq_source;
2742
2743 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2744 dm_vupdate_high_irq, c_irq_params);
2745 }
2746
2747 /* Use GRPH_PFLIP interrupt */
2748 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2749 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2750 i++) {
2751 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2752 if (r) {
2753 DRM_ERROR("Failed to add page flip irq id!\n");
2754 return r;
2755 }
2756
2757 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2758 int_params.irq_source =
2759 dc_interrupt_to_irq_source(dc, i, 0);
2760
2761 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2762
2763 c_irq_params->adev = adev;
2764 c_irq_params->irq_src = int_params.irq_source;
2765
2766 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2767 dm_pflip_high_irq, c_irq_params);
2768
2769 }
2770
2771 /* HPD */
2772 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2773 &adev->hpd_irq);
2774 if (r) {
2775 DRM_ERROR("Failed to add hpd irq id!\n");
2776 return r;
2777 }
2778
2779 register_hpd_handlers(adev);
2780
2781 return 0;
2782 }
2783 #endif
2784
2785 /*
2786 * Acquires the lock for the atomic state object and returns
2787 * the new atomic state.
2788 *
2789 * This should only be called during atomic check.
2790 */
2791 static int dm_atomic_get_state(struct drm_atomic_state *state,
2792 struct dm_atomic_state **dm_state)
2793 {
2794 struct drm_device *dev = state->dev;
2795 struct amdgpu_device *adev = dev->dev_private;
2796 struct amdgpu_display_manager *dm = &adev->dm;
2797 struct drm_private_state *priv_state;
2798
2799 if (*dm_state)
2800 return 0;
2801
2802 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2803 if (IS_ERR(priv_state))
2804 return PTR_ERR(priv_state);
2805
2806 *dm_state = to_dm_atomic_state(priv_state);
2807
2808 return 0;
2809 }
2810
2811 static struct dm_atomic_state *
2812 dm_atomic_get_new_state(struct drm_atomic_state *state)
2813 {
2814 struct drm_device *dev = state->dev;
2815 struct amdgpu_device *adev = dev->dev_private;
2816 struct amdgpu_display_manager *dm = &adev->dm;
2817 struct drm_private_obj *obj;
2818 struct drm_private_state *new_obj_state;
2819 int i;
2820
2821 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2822 if (obj->funcs == dm->atomic_obj.funcs)
2823 return to_dm_atomic_state(new_obj_state);
2824 }
2825
2826 return NULL;
2827 }
2828
2829 static struct dm_atomic_state *
2830 dm_atomic_get_old_state(struct drm_atomic_state *state)
2831 {
2832 struct drm_device *dev = state->dev;
2833 struct amdgpu_device *adev = dev->dev_private;
2834 struct amdgpu_display_manager *dm = &adev->dm;
2835 struct drm_private_obj *obj;
2836 struct drm_private_state *old_obj_state;
2837 int i;
2838
2839 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2840 if (obj->funcs == dm->atomic_obj.funcs)
2841 return to_dm_atomic_state(old_obj_state);
2842 }
2843
2844 return NULL;
2845 }
2846
2847 static struct drm_private_state *
2848 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2849 {
2850 struct dm_atomic_state *old_state, *new_state;
2851
2852 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2853 if (!new_state)
2854 return NULL;
2855
2856 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2857
2858 old_state = to_dm_atomic_state(obj->state);
2859
2860 if (old_state && old_state->context)
2861 new_state->context = dc_copy_state(old_state->context);
2862
2863 if (!new_state->context) {
2864 kfree(new_state);
2865 return NULL;
2866 }
2867
2868 return &new_state->base;
2869 }
2870
2871 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2872 struct drm_private_state *state)
2873 {
2874 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2875
2876 if (dm_state && dm_state->context)
2877 dc_release_state(dm_state->context);
2878
2879 kfree(dm_state);
2880 }
2881
2882 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2883 .atomic_duplicate_state = dm_atomic_duplicate_state,
2884 .atomic_destroy_state = dm_atomic_destroy_state,
2885 };
2886
2887 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2888 {
2889 struct dm_atomic_state *state;
2890 int r;
2891
2892 adev->mode_info.mode_config_initialized = true;
2893
2894 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2895 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2896
2897 adev->ddev->mode_config.max_width = 16384;
2898 adev->ddev->mode_config.max_height = 16384;
2899
2900 adev->ddev->mode_config.preferred_depth = 24;
2901 adev->ddev->mode_config.prefer_shadow = 1;
2902 /* indicates support for immediate flip */
2903 adev->ddev->mode_config.async_page_flip = true;
2904
2905 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2906
2907 state = kzalloc(sizeof(*state), GFP_KERNEL);
2908 if (!state)
2909 return -ENOMEM;
2910
2911 state->context = dc_create_state(adev->dm.dc);
2912 if (!state->context) {
2913 kfree(state);
2914 return -ENOMEM;
2915 }
2916
2917 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2918
2919 drm_atomic_private_obj_init(adev->ddev,
2920 &adev->dm.atomic_obj,
2921 &state->base,
2922 &dm_atomic_state_funcs);
2923
2924 r = amdgpu_display_modeset_create_props(adev);
2925 if (r)
2926 return r;
2927
2928 r = amdgpu_dm_audio_init(adev);
2929 if (r)
2930 return r;
2931
2932 return 0;
2933 }
2934
2935 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2936 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2937 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2938
2939 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2940 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2941
2942 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2943 {
2944 #if defined(CONFIG_ACPI)
2945 struct amdgpu_dm_backlight_caps caps;
2946
2947 if (dm->backlight_caps.caps_valid)
2948 return;
2949
2950 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2951 if (caps.caps_valid) {
2952 dm->backlight_caps.caps_valid = true;
2953 if (caps.aux_support)
2954 return;
2955 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2956 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2957 } else {
2958 dm->backlight_caps.min_input_signal =
2959 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2960 dm->backlight_caps.max_input_signal =
2961 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2962 }
2963 #else
2964 if (dm->backlight_caps.aux_support)
2965 return;
2966
2967 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2968 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2969 #endif
2970 }
2971
2972 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2973 {
2974 bool rc;
2975
2976 if (!link)
2977 return 1;
2978
2979 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2980 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2981
2982 return rc ? 0 : 1;
2983 }
2984
2985 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2986 const uint32_t user_brightness)
2987 {
2988 u32 min, max, conversion_pace;
2989 u32 brightness = user_brightness;
2990
2991 if (!caps)
2992 goto out;
2993
2994 if (!caps->aux_support) {
2995 max = caps->max_input_signal;
2996 min = caps->min_input_signal;
2997 /*
2998 * The brightness input is in the range 0-255
2999 * It needs to be rescaled to be between the
3000 * requested min and max input signal
3001 * It also needs to be scaled up by 0x101 to
3002 * match the DC interface which has a range of
3003 * 0 to 0xffff
3004 */
3005 conversion_pace = 0x101;
3006 brightness =
3007 user_brightness
3008 * conversion_pace
3009 * (max - min)
3010 / AMDGPU_MAX_BL_LEVEL
3011 + min * conversion_pace;
3012 } else {
3013 /* TODO
3014 * We are doing a linear interpolation here, which is OK but
3015 * does not provide the optimal result. We probably want
3016 * something close to the Perceptual Quantizer (PQ) curve.
3017 */
3018 max = caps->aux_max_input_signal;
3019 min = caps->aux_min_input_signal;
3020
3021 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
3022 + user_brightness * max;
3023 // Multiple the value by 1000 since we use millinits
3024 brightness *= 1000;
3025 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
3026 }
3027
3028 out:
3029 return brightness;
3030 }
3031
3032 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3033 {
3034 struct amdgpu_display_manager *dm = bl_get_data(bd);
3035 struct amdgpu_dm_backlight_caps caps;
3036 struct dc_link *link = NULL;
3037 u32 brightness;
3038 bool rc;
3039
3040 amdgpu_dm_update_backlight_caps(dm);
3041 caps = dm->backlight_caps;
3042
3043 link = (struct dc_link *)dm->backlight_link;
3044
3045 brightness = convert_brightness(&caps, bd->props.brightness);
3046 // Change brightness based on AUX property
3047 if (caps.aux_support)
3048 return set_backlight_via_aux(link, brightness);
3049
3050 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3051
3052 return rc ? 0 : 1;
3053 }
3054
3055 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3056 {
3057 struct amdgpu_display_manager *dm = bl_get_data(bd);
3058 int ret = dc_link_get_backlight_level(dm->backlight_link);
3059
3060 if (ret == DC_ERROR_UNEXPECTED)
3061 return bd->props.brightness;
3062 return ret;
3063 }
3064
3065 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3066 .options = BL_CORE_SUSPENDRESUME,
3067 .get_brightness = amdgpu_dm_backlight_get_brightness,
3068 .update_status = amdgpu_dm_backlight_update_status,
3069 };
3070
3071 static void
3072 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3073 {
3074 char bl_name[16];
3075 struct backlight_properties props = { 0 };
3076
3077 amdgpu_dm_update_backlight_caps(dm);
3078
3079 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3080 props.brightness = AMDGPU_MAX_BL_LEVEL;
3081 props.type = BACKLIGHT_RAW;
3082
3083 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3084 dm->adev->ddev->primary->index);
3085
3086 dm->backlight_dev = backlight_device_register(bl_name,
3087 dm->adev->ddev->dev,
3088 dm,
3089 &amdgpu_dm_backlight_ops,
3090 &props);
3091
3092 if (IS_ERR(dm->backlight_dev))
3093 DRM_ERROR("DM: Backlight registration failed!\n");
3094 else
3095 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3096 }
3097
3098 #endif
3099
3100 static int initialize_plane(struct amdgpu_display_manager *dm,
3101 struct amdgpu_mode_info *mode_info, int plane_id,
3102 enum drm_plane_type plane_type,
3103 const struct dc_plane_cap *plane_cap)
3104 {
3105 struct drm_plane *plane;
3106 unsigned long possible_crtcs;
3107 int ret = 0;
3108
3109 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3110 if (!plane) {
3111 DRM_ERROR("KMS: Failed to allocate plane\n");
3112 return -ENOMEM;
3113 }
3114 plane->type = plane_type;
3115
3116 /*
3117 * HACK: IGT tests expect that the primary plane for a CRTC
3118 * can only have one possible CRTC. Only expose support for
3119 * any CRTC if they're not going to be used as a primary plane
3120 * for a CRTC - like overlay or underlay planes.
3121 */
3122 possible_crtcs = 1 << plane_id;
3123 if (plane_id >= dm->dc->caps.max_streams)
3124 possible_crtcs = 0xff;
3125
3126 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3127
3128 if (ret) {
3129 DRM_ERROR("KMS: Failed to initialize plane\n");
3130 kfree(plane);
3131 return ret;
3132 }
3133
3134 if (mode_info)
3135 mode_info->planes[plane_id] = plane;
3136
3137 return ret;
3138 }
3139
3140
3141 static void register_backlight_device(struct amdgpu_display_manager *dm,
3142 struct dc_link *link)
3143 {
3144 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3145 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3146
3147 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3148 link->type != dc_connection_none) {
3149 /*
3150 * Event if registration failed, we should continue with
3151 * DM initialization because not having a backlight control
3152 * is better then a black screen.
3153 */
3154 amdgpu_dm_register_backlight_device(dm);
3155
3156 if (dm->backlight_dev)
3157 dm->backlight_link = link;
3158 }
3159 #endif
3160 }
3161
3162
3163 /*
3164 * In this architecture, the association
3165 * connector -> encoder -> crtc
3166 * id not really requried. The crtc and connector will hold the
3167 * display_index as an abstraction to use with DAL component
3168 *
3169 * Returns 0 on success
3170 */
3171 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3172 {
3173 struct amdgpu_display_manager *dm = &adev->dm;
3174 int32_t i;
3175 struct amdgpu_dm_connector *aconnector = NULL;
3176 struct amdgpu_encoder *aencoder = NULL;
3177 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3178 uint32_t link_cnt;
3179 int32_t primary_planes;
3180 enum dc_connection_type new_connection_type = dc_connection_none;
3181 const struct dc_plane_cap *plane;
3182
3183 link_cnt = dm->dc->caps.max_links;
3184 if (amdgpu_dm_mode_config_init(dm->adev)) {
3185 DRM_ERROR("DM: Failed to initialize mode config\n");
3186 return -EINVAL;
3187 }
3188
3189 /* There is one primary plane per CRTC */
3190 primary_planes = dm->dc->caps.max_streams;
3191 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3192
3193 /*
3194 * Initialize primary planes, implicit planes for legacy IOCTLS.
3195 * Order is reversed to match iteration order in atomic check.
3196 */
3197 for (i = (primary_planes - 1); i >= 0; i--) {
3198 plane = &dm->dc->caps.planes[i];
3199
3200 if (initialize_plane(dm, mode_info, i,
3201 DRM_PLANE_TYPE_PRIMARY, plane)) {
3202 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3203 goto fail;
3204 }
3205 }
3206
3207 /*
3208 * Initialize overlay planes, index starting after primary planes.
3209 * These planes have a higher DRM index than the primary planes since
3210 * they should be considered as having a higher z-order.
3211 * Order is reversed to match iteration order in atomic check.
3212 *
3213 * Only support DCN for now, and only expose one so we don't encourage
3214 * userspace to use up all the pipes.
3215 */
3216 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3217 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3218
3219 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3220 continue;
3221
3222 if (!plane->blends_with_above || !plane->blends_with_below)
3223 continue;
3224
3225 if (!plane->pixel_format_support.argb8888)
3226 continue;
3227
3228 if (initialize_plane(dm, NULL, primary_planes + i,
3229 DRM_PLANE_TYPE_OVERLAY, plane)) {
3230 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3231 goto fail;
3232 }
3233
3234 /* Only create one overlay plane. */
3235 break;
3236 }
3237
3238 for (i = 0; i < dm->dc->caps.max_streams; i++)
3239 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3240 DRM_ERROR("KMS: Failed to initialize crtc\n");
3241 goto fail;
3242 }
3243
3244 dm->display_indexes_num = dm->dc->caps.max_streams;
3245
3246 /* loops over all connectors on the board */
3247 for (i = 0; i < link_cnt; i++) {
3248 struct dc_link *link = NULL;
3249
3250 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3251 DRM_ERROR(
3252 "KMS: Cannot support more than %d display indexes\n",
3253 AMDGPU_DM_MAX_DISPLAY_INDEX);
3254 continue;
3255 }
3256
3257 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3258 if (!aconnector)
3259 goto fail;
3260
3261 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3262 if (!aencoder)
3263 goto fail;
3264
3265 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3266 DRM_ERROR("KMS: Failed to initialize encoder\n");
3267 goto fail;
3268 }
3269
3270 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3271 DRM_ERROR("KMS: Failed to initialize connector\n");
3272 goto fail;
3273 }
3274
3275 link = dc_get_link_at_index(dm->dc, i);
3276
3277 if (!dc_link_detect_sink(link, &new_connection_type))
3278 DRM_ERROR("KMS: Failed to detect connector\n");
3279
3280 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3281 emulated_link_detect(link);
3282 amdgpu_dm_update_connector_after_detect(aconnector);
3283
3284 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3285 amdgpu_dm_update_connector_after_detect(aconnector);
3286 register_backlight_device(dm, link);
3287 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3288 amdgpu_dm_set_psr_caps(link);
3289 }
3290
3291
3292 }
3293
3294 /* Software is initialized. Now we can register interrupt handlers. */
3295 switch (adev->asic_type) {
3296 #if defined(CONFIG_DRM_AMD_DC_SI)
3297 case CHIP_TAHITI:
3298 case CHIP_PITCAIRN:
3299 case CHIP_VERDE:
3300 case CHIP_OLAND:
3301 if (dce60_register_irq_handlers(dm->adev)) {
3302 DRM_ERROR("DM: Failed to initialize IRQ\n");
3303 goto fail;
3304 }
3305 break;
3306 #endif
3307 case CHIP_BONAIRE:
3308 case CHIP_HAWAII:
3309 case CHIP_KAVERI:
3310 case CHIP_KABINI:
3311 case CHIP_MULLINS:
3312 case CHIP_TONGA:
3313 case CHIP_FIJI:
3314 case CHIP_CARRIZO:
3315 case CHIP_STONEY:
3316 case CHIP_POLARIS11:
3317 case CHIP_POLARIS10:
3318 case CHIP_POLARIS12:
3319 case CHIP_VEGAM:
3320 case CHIP_VEGA10:
3321 case CHIP_VEGA12:
3322 case CHIP_VEGA20:
3323 if (dce110_register_irq_handlers(dm->adev)) {
3324 DRM_ERROR("DM: Failed to initialize IRQ\n");
3325 goto fail;
3326 }
3327 break;
3328 #if defined(CONFIG_DRM_AMD_DC_DCN)
3329 case CHIP_RAVEN:
3330 case CHIP_NAVI12:
3331 case CHIP_NAVI10:
3332 case CHIP_NAVI14:
3333 case CHIP_RENOIR:
3334 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3335 case CHIP_SIENNA_CICHLID:
3336 case CHIP_NAVY_FLOUNDER:
3337 #endif
3338 if (dcn10_register_irq_handlers(dm->adev)) {
3339 DRM_ERROR("DM: Failed to initialize IRQ\n");
3340 goto fail;
3341 }
3342 break;
3343 #endif
3344 default:
3345 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3346 goto fail;
3347 }
3348
3349 /* No userspace support. */
3350 dm->dc->debug.disable_tri_buf = true;
3351
3352 return 0;
3353 fail:
3354 kfree(aencoder);
3355 kfree(aconnector);
3356
3357 return -EINVAL;
3358 }
3359
3360 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3361 {
3362 drm_mode_config_cleanup(dm->ddev);
3363 drm_atomic_private_obj_fini(&dm->atomic_obj);
3364 return;
3365 }
3366
3367 /******************************************************************************
3368 * amdgpu_display_funcs functions
3369 *****************************************************************************/
3370
3371 /*
3372 * dm_bandwidth_update - program display watermarks
3373 *
3374 * @adev: amdgpu_device pointer
3375 *
3376 * Calculate and program the display watermarks and line buffer allocation.
3377 */
3378 static void dm_bandwidth_update(struct amdgpu_device *adev)
3379 {
3380 /* TODO: implement later */
3381 }
3382
3383 static const struct amdgpu_display_funcs dm_display_funcs = {
3384 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3385 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3386 .backlight_set_level = NULL, /* never called for DC */
3387 .backlight_get_level = NULL, /* never called for DC */
3388 .hpd_sense = NULL,/* called unconditionally */
3389 .hpd_set_polarity = NULL, /* called unconditionally */
3390 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3391 .page_flip_get_scanoutpos =
3392 dm_crtc_get_scanoutpos,/* called unconditionally */
3393 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3394 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3395 };
3396
3397 #if defined(CONFIG_DEBUG_KERNEL_DC)
3398
3399 static ssize_t s3_debug_store(struct device *device,
3400 struct device_attribute *attr,
3401 const char *buf,
3402 size_t count)
3403 {
3404 int ret;
3405 int s3_state;
3406 struct drm_device *drm_dev = dev_get_drvdata(device);
3407 struct amdgpu_device *adev = drm_dev->dev_private;
3408
3409 ret = kstrtoint(buf, 0, &s3_state);
3410
3411 if (ret == 0) {
3412 if (s3_state) {
3413 dm_resume(adev);
3414 drm_kms_helper_hotplug_event(adev->ddev);
3415 } else
3416 dm_suspend(adev);
3417 }
3418
3419 return ret == 0 ? count : 0;
3420 }
3421
3422 DEVICE_ATTR_WO(s3_debug);
3423
3424 #endif
3425
3426 static int dm_early_init(void *handle)
3427 {
3428 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3429
3430 switch (adev->asic_type) {
3431 #if defined(CONFIG_DRM_AMD_DC_SI)
3432 case CHIP_TAHITI:
3433 case CHIP_PITCAIRN:
3434 case CHIP_VERDE:
3435 adev->mode_info.num_crtc = 6;
3436 adev->mode_info.num_hpd = 6;
3437 adev->mode_info.num_dig = 6;
3438 break;
3439 case CHIP_OLAND:
3440 adev->mode_info.num_crtc = 2;
3441 adev->mode_info.num_hpd = 2;
3442 adev->mode_info.num_dig = 2;
3443 break;
3444 #endif
3445 case CHIP_BONAIRE:
3446 case CHIP_HAWAII:
3447 adev->mode_info.num_crtc = 6;
3448 adev->mode_info.num_hpd = 6;
3449 adev->mode_info.num_dig = 6;
3450 break;
3451 case CHIP_KAVERI:
3452 adev->mode_info.num_crtc = 4;
3453 adev->mode_info.num_hpd = 6;
3454 adev->mode_info.num_dig = 7;
3455 break;
3456 case CHIP_KABINI:
3457 case CHIP_MULLINS:
3458 adev->mode_info.num_crtc = 2;
3459 adev->mode_info.num_hpd = 6;
3460 adev->mode_info.num_dig = 6;
3461 break;
3462 case CHIP_FIJI:
3463 case CHIP_TONGA:
3464 adev->mode_info.num_crtc = 6;
3465 adev->mode_info.num_hpd = 6;
3466 adev->mode_info.num_dig = 7;
3467 break;
3468 case CHIP_CARRIZO:
3469 adev->mode_info.num_crtc = 3;
3470 adev->mode_info.num_hpd = 6;
3471 adev->mode_info.num_dig = 9;
3472 break;
3473 case CHIP_STONEY:
3474 adev->mode_info.num_crtc = 2;
3475 adev->mode_info.num_hpd = 6;
3476 adev->mode_info.num_dig = 9;
3477 break;
3478 case CHIP_POLARIS11:
3479 case CHIP_POLARIS12:
3480 adev->mode_info.num_crtc = 5;
3481 adev->mode_info.num_hpd = 5;
3482 adev->mode_info.num_dig = 5;
3483 break;
3484 case CHIP_POLARIS10:
3485 case CHIP_VEGAM:
3486 adev->mode_info.num_crtc = 6;
3487 adev->mode_info.num_hpd = 6;
3488 adev->mode_info.num_dig = 6;
3489 break;
3490 case CHIP_VEGA10:
3491 case CHIP_VEGA12:
3492 case CHIP_VEGA20:
3493 adev->mode_info.num_crtc = 6;
3494 adev->mode_info.num_hpd = 6;
3495 adev->mode_info.num_dig = 6;
3496 break;
3497 #if defined(CONFIG_DRM_AMD_DC_DCN)
3498 case CHIP_RAVEN:
3499 adev->mode_info.num_crtc = 4;
3500 adev->mode_info.num_hpd = 4;
3501 adev->mode_info.num_dig = 4;
3502 break;
3503 #endif
3504 case CHIP_NAVI10:
3505 case CHIP_NAVI12:
3506 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3507 case CHIP_SIENNA_CICHLID:
3508 case CHIP_NAVY_FLOUNDER:
3509 #endif
3510 adev->mode_info.num_crtc = 6;
3511 adev->mode_info.num_hpd = 6;
3512 adev->mode_info.num_dig = 6;
3513 break;
3514 case CHIP_NAVI14:
3515 adev->mode_info.num_crtc = 5;
3516 adev->mode_info.num_hpd = 5;
3517 adev->mode_info.num_dig = 5;
3518 break;
3519 case CHIP_RENOIR:
3520 adev->mode_info.num_crtc = 4;
3521 adev->mode_info.num_hpd = 4;
3522 adev->mode_info.num_dig = 4;
3523 break;
3524 default:
3525 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3526 return -EINVAL;
3527 }
3528
3529 amdgpu_dm_set_irq_funcs(adev);
3530
3531 if (adev->mode_info.funcs == NULL)
3532 adev->mode_info.funcs = &dm_display_funcs;
3533
3534 /*
3535 * Note: Do NOT change adev->audio_endpt_rreg and
3536 * adev->audio_endpt_wreg because they are initialised in
3537 * amdgpu_device_init()
3538 */
3539 #if defined(CONFIG_DEBUG_KERNEL_DC)
3540 device_create_file(
3541 adev->ddev->dev,
3542 &dev_attr_s3_debug);
3543 #endif
3544
3545 return 0;
3546 }
3547
3548 static bool modeset_required(struct drm_crtc_state *crtc_state,
3549 struct dc_stream_state *new_stream,
3550 struct dc_stream_state *old_stream)
3551 {
3552 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3553 }
3554
3555 static bool modereset_required(struct drm_crtc_state *crtc_state)
3556 {
3557 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3558 }
3559
3560 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3561 {
3562 drm_encoder_cleanup(encoder);
3563 kfree(encoder);
3564 }
3565
3566 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3567 .destroy = amdgpu_dm_encoder_destroy,
3568 };
3569
3570
3571 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3572 struct dc_scaling_info *scaling_info)
3573 {
3574 int scale_w, scale_h;
3575
3576 memset(scaling_info, 0, sizeof(*scaling_info));
3577
3578 /* Source is fixed 16.16 but we ignore mantissa for now... */
3579 scaling_info->src_rect.x = state->src_x >> 16;
3580 scaling_info->src_rect.y = state->src_y >> 16;
3581
3582 scaling_info->src_rect.width = state->src_w >> 16;
3583 if (scaling_info->src_rect.width == 0)
3584 return -EINVAL;
3585
3586 scaling_info->src_rect.height = state->src_h >> 16;
3587 if (scaling_info->src_rect.height == 0)
3588 return -EINVAL;
3589
3590 scaling_info->dst_rect.x = state->crtc_x;
3591 scaling_info->dst_rect.y = state->crtc_y;
3592
3593 if (state->crtc_w == 0)
3594 return -EINVAL;
3595
3596 scaling_info->dst_rect.width = state->crtc_w;
3597
3598 if (state->crtc_h == 0)
3599 return -EINVAL;
3600
3601 scaling_info->dst_rect.height = state->crtc_h;
3602
3603 /* DRM doesn't specify clipping on destination output. */
3604 scaling_info->clip_rect = scaling_info->dst_rect;
3605
3606 /* TODO: Validate scaling per-format with DC plane caps */
3607 scale_w = scaling_info->dst_rect.width * 1000 /
3608 scaling_info->src_rect.width;
3609
3610 if (scale_w < 250 || scale_w > 16000)
3611 return -EINVAL;
3612
3613 scale_h = scaling_info->dst_rect.height * 1000 /
3614 scaling_info->src_rect.height;
3615
3616 if (scale_h < 250 || scale_h > 16000)
3617 return -EINVAL;
3618
3619 /*
3620 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3621 * assume reasonable defaults based on the format.
3622 */
3623
3624 return 0;
3625 }
3626
3627 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3628 uint64_t *tiling_flags, bool *tmz_surface)
3629 {
3630 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3631 int r = amdgpu_bo_reserve(rbo, false);
3632
3633 if (unlikely(r)) {
3634 /* Don't show error message when returning -ERESTARTSYS */
3635 if (r != -ERESTARTSYS)
3636 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3637 return r;
3638 }
3639
3640 if (tiling_flags)
3641 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3642
3643 if (tmz_surface)
3644 *tmz_surface = amdgpu_bo_encrypted(rbo);
3645
3646 amdgpu_bo_unreserve(rbo);
3647
3648 return r;
3649 }
3650
3651 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3652 {
3653 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3654
3655 return offset ? (address + offset * 256) : 0;
3656 }
3657
3658 static int
3659 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3660 const struct amdgpu_framebuffer *afb,
3661 const enum surface_pixel_format format,
3662 const enum dc_rotation_angle rotation,
3663 const struct plane_size *plane_size,
3664 const union dc_tiling_info *tiling_info,
3665 const uint64_t info,
3666 struct dc_plane_dcc_param *dcc,
3667 struct dc_plane_address *address,
3668 bool force_disable_dcc)
3669 {
3670 struct dc *dc = adev->dm.dc;
3671 struct dc_dcc_surface_param input;
3672 struct dc_surface_dcc_cap output;
3673 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3674 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3675 uint64_t dcc_address;
3676
3677 memset(&input, 0, sizeof(input));
3678 memset(&output, 0, sizeof(output));
3679
3680 if (force_disable_dcc)
3681 return 0;
3682
3683 if (!offset)
3684 return 0;
3685
3686 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3687 return 0;
3688
3689 if (!dc->cap_funcs.get_dcc_compression_cap)
3690 return -EINVAL;
3691
3692 input.format = format;
3693 input.surface_size.width = plane_size->surface_size.width;
3694 input.surface_size.height = plane_size->surface_size.height;
3695 input.swizzle_mode = tiling_info->gfx9.swizzle;
3696
3697 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3698 input.scan = SCAN_DIRECTION_HORIZONTAL;
3699 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3700 input.scan = SCAN_DIRECTION_VERTICAL;
3701
3702 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3703 return -EINVAL;
3704
3705 if (!output.capable)
3706 return -EINVAL;
3707
3708 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3709 return -EINVAL;
3710
3711 dcc->enable = 1;
3712 dcc->meta_pitch =
3713 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3714 dcc->independent_64b_blks = i64b;
3715
3716 dcc_address = get_dcc_address(afb->address, info);
3717 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3718 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3719
3720 return 0;
3721 }
3722
3723 static int
3724 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3725 const struct amdgpu_framebuffer *afb,
3726 const enum surface_pixel_format format,
3727 const enum dc_rotation_angle rotation,
3728 const uint64_t tiling_flags,
3729 union dc_tiling_info *tiling_info,
3730 struct plane_size *plane_size,
3731 struct dc_plane_dcc_param *dcc,
3732 struct dc_plane_address *address,
3733 bool tmz_surface,
3734 bool force_disable_dcc)
3735 {
3736 const struct drm_framebuffer *fb = &afb->base;
3737 int ret;
3738
3739 memset(tiling_info, 0, sizeof(*tiling_info));
3740 memset(plane_size, 0, sizeof(*plane_size));
3741 memset(dcc, 0, sizeof(*dcc));
3742 memset(address, 0, sizeof(*address));
3743
3744 address->tmz_surface = tmz_surface;
3745
3746 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3747 plane_size->surface_size.x = 0;
3748 plane_size->surface_size.y = 0;
3749 plane_size->surface_size.width = fb->width;
3750 plane_size->surface_size.height = fb->height;
3751 plane_size->surface_pitch =
3752 fb->pitches[0] / fb->format->cpp[0];
3753
3754 address->type = PLN_ADDR_TYPE_GRAPHICS;
3755 address->grph.addr.low_part = lower_32_bits(afb->address);
3756 address->grph.addr.high_part = upper_32_bits(afb->address);
3757 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3758 uint64_t chroma_addr = afb->address + fb->offsets[1];
3759
3760 plane_size->surface_size.x = 0;
3761 plane_size->surface_size.y = 0;
3762 plane_size->surface_size.width = fb->width;
3763 plane_size->surface_size.height = fb->height;
3764 plane_size->surface_pitch =
3765 fb->pitches[0] / fb->format->cpp[0];
3766
3767 plane_size->chroma_size.x = 0;
3768 plane_size->chroma_size.y = 0;
3769 /* TODO: set these based on surface format */
3770 plane_size->chroma_size.width = fb->width / 2;
3771 plane_size->chroma_size.height = fb->height / 2;
3772
3773 plane_size->chroma_pitch =
3774 fb->pitches[1] / fb->format->cpp[1];
3775
3776 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3777 address->video_progressive.luma_addr.low_part =
3778 lower_32_bits(afb->address);
3779 address->video_progressive.luma_addr.high_part =
3780 upper_32_bits(afb->address);
3781 address->video_progressive.chroma_addr.low_part =
3782 lower_32_bits(chroma_addr);
3783 address->video_progressive.chroma_addr.high_part =
3784 upper_32_bits(chroma_addr);
3785 }
3786
3787 /* Fill GFX8 params */
3788 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3789 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3790
3791 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3792 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3793 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3794 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3795 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3796
3797 /* XXX fix me for VI */
3798 tiling_info->gfx8.num_banks = num_banks;
3799 tiling_info->gfx8.array_mode =
3800 DC_ARRAY_2D_TILED_THIN1;
3801 tiling_info->gfx8.tile_split = tile_split;
3802 tiling_info->gfx8.bank_width = bankw;
3803 tiling_info->gfx8.bank_height = bankh;
3804 tiling_info->gfx8.tile_aspect = mtaspect;
3805 tiling_info->gfx8.tile_mode =
3806 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3807 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3808 == DC_ARRAY_1D_TILED_THIN1) {
3809 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3810 }
3811
3812 tiling_info->gfx8.pipe_config =
3813 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3814
3815 if (adev->asic_type == CHIP_VEGA10 ||
3816 adev->asic_type == CHIP_VEGA12 ||
3817 adev->asic_type == CHIP_VEGA20 ||
3818 adev->asic_type == CHIP_NAVI10 ||
3819 adev->asic_type == CHIP_NAVI14 ||
3820 adev->asic_type == CHIP_NAVI12 ||
3821 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3822 adev->asic_type == CHIP_SIENNA_CICHLID ||
3823 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3824 #endif
3825 adev->asic_type == CHIP_RENOIR ||
3826 adev->asic_type == CHIP_RAVEN) {
3827 /* Fill GFX9 params */
3828 tiling_info->gfx9.num_pipes =
3829 adev->gfx.config.gb_addr_config_fields.num_pipes;
3830 tiling_info->gfx9.num_banks =
3831 adev->gfx.config.gb_addr_config_fields.num_banks;
3832 tiling_info->gfx9.pipe_interleave =
3833 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3834 tiling_info->gfx9.num_shader_engines =
3835 adev->gfx.config.gb_addr_config_fields.num_se;
3836 tiling_info->gfx9.max_compressed_frags =
3837 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3838 tiling_info->gfx9.num_rb_per_se =
3839 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3840 tiling_info->gfx9.swizzle =
3841 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3842 tiling_info->gfx9.shaderEnable = 1;
3843
3844 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3845 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3846 adev->asic_type == CHIP_NAVY_FLOUNDER)
3847 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3848 #endif
3849 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3850 plane_size, tiling_info,
3851 tiling_flags, dcc, address,
3852 force_disable_dcc);
3853 if (ret)
3854 return ret;
3855 }
3856
3857 return 0;
3858 }
3859
3860 static void
3861 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3862 bool *per_pixel_alpha, bool *global_alpha,
3863 int *global_alpha_value)
3864 {
3865 *per_pixel_alpha = false;
3866 *global_alpha = false;
3867 *global_alpha_value = 0xff;
3868
3869 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3870 return;
3871
3872 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3873 static const uint32_t alpha_formats[] = {
3874 DRM_FORMAT_ARGB8888,
3875 DRM_FORMAT_RGBA8888,
3876 DRM_FORMAT_ABGR8888,
3877 };
3878 uint32_t format = plane_state->fb->format->format;
3879 unsigned int i;
3880
3881 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3882 if (format == alpha_formats[i]) {
3883 *per_pixel_alpha = true;
3884 break;
3885 }
3886 }
3887 }
3888
3889 if (plane_state->alpha < 0xffff) {
3890 *global_alpha = true;
3891 *global_alpha_value = plane_state->alpha >> 8;
3892 }
3893 }
3894
3895 static int
3896 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3897 const enum surface_pixel_format format,
3898 enum dc_color_space *color_space)
3899 {
3900 bool full_range;
3901
3902 *color_space = COLOR_SPACE_SRGB;
3903
3904 /* DRM color properties only affect non-RGB formats. */
3905 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3906 return 0;
3907
3908 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3909
3910 switch (plane_state->color_encoding) {
3911 case DRM_COLOR_YCBCR_BT601:
3912 if (full_range)
3913 *color_space = COLOR_SPACE_YCBCR601;
3914 else
3915 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3916 break;
3917
3918 case DRM_COLOR_YCBCR_BT709:
3919 if (full_range)
3920 *color_space = COLOR_SPACE_YCBCR709;
3921 else
3922 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3923 break;
3924
3925 case DRM_COLOR_YCBCR_BT2020:
3926 if (full_range)
3927 *color_space = COLOR_SPACE_2020_YCBCR;
3928 else
3929 return -EINVAL;
3930 break;
3931
3932 default:
3933 return -EINVAL;
3934 }
3935
3936 return 0;
3937 }
3938
3939 static int
3940 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3941 const struct drm_plane_state *plane_state,
3942 const uint64_t tiling_flags,
3943 struct dc_plane_info *plane_info,
3944 struct dc_plane_address *address,
3945 bool tmz_surface,
3946 bool force_disable_dcc)
3947 {
3948 const struct drm_framebuffer *fb = plane_state->fb;
3949 const struct amdgpu_framebuffer *afb =
3950 to_amdgpu_framebuffer(plane_state->fb);
3951 struct drm_format_name_buf format_name;
3952 int ret;
3953
3954 memset(plane_info, 0, sizeof(*plane_info));
3955
3956 switch (fb->format->format) {
3957 case DRM_FORMAT_C8:
3958 plane_info->format =
3959 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3960 break;
3961 case DRM_FORMAT_RGB565:
3962 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3963 break;
3964 case DRM_FORMAT_XRGB8888:
3965 case DRM_FORMAT_ARGB8888:
3966 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3967 break;
3968 case DRM_FORMAT_XRGB2101010:
3969 case DRM_FORMAT_ARGB2101010:
3970 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3971 break;
3972 case DRM_FORMAT_XBGR2101010:
3973 case DRM_FORMAT_ABGR2101010:
3974 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3975 break;
3976 case DRM_FORMAT_XBGR8888:
3977 case DRM_FORMAT_ABGR8888:
3978 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3979 break;
3980 case DRM_FORMAT_NV21:
3981 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3982 break;
3983 case DRM_FORMAT_NV12:
3984 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3985 break;
3986 case DRM_FORMAT_P010:
3987 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3988 break;
3989 case DRM_FORMAT_XRGB16161616F:
3990 case DRM_FORMAT_ARGB16161616F:
3991 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3992 break;
3993 case DRM_FORMAT_XBGR16161616F:
3994 case DRM_FORMAT_ABGR16161616F:
3995 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3996 break;
3997 default:
3998 DRM_ERROR(
3999 "Unsupported screen format %s\n",
4000 drm_get_format_name(fb->format->format, &format_name));
4001 return -EINVAL;
4002 }
4003
4004 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4005 case DRM_MODE_ROTATE_0:
4006 plane_info->rotation = ROTATION_ANGLE_0;
4007 break;
4008 case DRM_MODE_ROTATE_90:
4009 plane_info->rotation = ROTATION_ANGLE_90;
4010 break;
4011 case DRM_MODE_ROTATE_180:
4012 plane_info->rotation = ROTATION_ANGLE_180;
4013 break;
4014 case DRM_MODE_ROTATE_270:
4015 plane_info->rotation = ROTATION_ANGLE_270;
4016 break;
4017 default:
4018 plane_info->rotation = ROTATION_ANGLE_0;
4019 break;
4020 }
4021
4022 plane_info->visible = true;
4023 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4024
4025 plane_info->layer_index = 0;
4026
4027 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4028 &plane_info->color_space);
4029 if (ret)
4030 return ret;
4031
4032 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4033 plane_info->rotation, tiling_flags,
4034 &plane_info->tiling_info,
4035 &plane_info->plane_size,
4036 &plane_info->dcc, address, tmz_surface,
4037 force_disable_dcc);
4038 if (ret)
4039 return ret;
4040
4041 fill_blending_from_plane_state(
4042 plane_state, &plane_info->per_pixel_alpha,
4043 &plane_info->global_alpha, &plane_info->global_alpha_value);
4044
4045 return 0;
4046 }
4047
4048 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4049 struct dc_plane_state *dc_plane_state,
4050 struct drm_plane_state *plane_state,
4051 struct drm_crtc_state *crtc_state)
4052 {
4053 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4054 const struct amdgpu_framebuffer *amdgpu_fb =
4055 to_amdgpu_framebuffer(plane_state->fb);
4056 struct dc_scaling_info scaling_info;
4057 struct dc_plane_info plane_info;
4058 uint64_t tiling_flags;
4059 int ret;
4060 bool tmz_surface = false;
4061 bool force_disable_dcc = false;
4062
4063 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4064 if (ret)
4065 return ret;
4066
4067 dc_plane_state->src_rect = scaling_info.src_rect;
4068 dc_plane_state->dst_rect = scaling_info.dst_rect;
4069 dc_plane_state->clip_rect = scaling_info.clip_rect;
4070 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4071
4072 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
4073 if (ret)
4074 return ret;
4075
4076 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4077 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
4078 &plane_info,
4079 &dc_plane_state->address,
4080 tmz_surface,
4081 force_disable_dcc);
4082 if (ret)
4083 return ret;
4084
4085 dc_plane_state->format = plane_info.format;
4086 dc_plane_state->color_space = plane_info.color_space;
4087 dc_plane_state->format = plane_info.format;
4088 dc_plane_state->plane_size = plane_info.plane_size;
4089 dc_plane_state->rotation = plane_info.rotation;
4090 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4091 dc_plane_state->stereo_format = plane_info.stereo_format;
4092 dc_plane_state->tiling_info = plane_info.tiling_info;
4093 dc_plane_state->visible = plane_info.visible;
4094 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4095 dc_plane_state->global_alpha = plane_info.global_alpha;
4096 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4097 dc_plane_state->dcc = plane_info.dcc;
4098 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4099
4100 /*
4101 * Always set input transfer function, since plane state is refreshed
4102 * every time.
4103 */
4104 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4105 if (ret)
4106 return ret;
4107
4108 return 0;
4109 }
4110
4111 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4112 const struct dm_connector_state *dm_state,
4113 struct dc_stream_state *stream)
4114 {
4115 enum amdgpu_rmx_type rmx_type;
4116
4117 struct rect src = { 0 }; /* viewport in composition space*/
4118 struct rect dst = { 0 }; /* stream addressable area */
4119
4120 /* no mode. nothing to be done */
4121 if (!mode)
4122 return;
4123
4124 /* Full screen scaling by default */
4125 src.width = mode->hdisplay;
4126 src.height = mode->vdisplay;
4127 dst.width = stream->timing.h_addressable;
4128 dst.height = stream->timing.v_addressable;
4129
4130 if (dm_state) {
4131 rmx_type = dm_state->scaling;
4132 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4133 if (src.width * dst.height <
4134 src.height * dst.width) {
4135 /* height needs less upscaling/more downscaling */
4136 dst.width = src.width *
4137 dst.height / src.height;
4138 } else {
4139 /* width needs less upscaling/more downscaling */
4140 dst.height = src.height *
4141 dst.width / src.width;
4142 }
4143 } else if (rmx_type == RMX_CENTER) {
4144 dst = src;
4145 }
4146
4147 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4148 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4149
4150 if (dm_state->underscan_enable) {
4151 dst.x += dm_state->underscan_hborder / 2;
4152 dst.y += dm_state->underscan_vborder / 2;
4153 dst.width -= dm_state->underscan_hborder;
4154 dst.height -= dm_state->underscan_vborder;
4155 }
4156 }
4157
4158 stream->src = src;
4159 stream->dst = dst;
4160
4161 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4162 dst.x, dst.y, dst.width, dst.height);
4163
4164 }
4165
4166 static enum dc_color_depth
4167 convert_color_depth_from_display_info(const struct drm_connector *connector,
4168 bool is_y420, int requested_bpc)
4169 {
4170 uint8_t bpc;
4171
4172 if (is_y420) {
4173 bpc = 8;
4174
4175 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4176 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4177 bpc = 16;
4178 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4179 bpc = 12;
4180 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4181 bpc = 10;
4182 } else {
4183 bpc = (uint8_t)connector->display_info.bpc;
4184 /* Assume 8 bpc by default if no bpc is specified. */
4185 bpc = bpc ? bpc : 8;
4186 }
4187
4188 if (requested_bpc > 0) {
4189 /*
4190 * Cap display bpc based on the user requested value.
4191 *
4192 * The value for state->max_bpc may not correctly updated
4193 * depending on when the connector gets added to the state
4194 * or if this was called outside of atomic check, so it
4195 * can't be used directly.
4196 */
4197 bpc = min_t(u8, bpc, requested_bpc);
4198
4199 /* Round down to the nearest even number. */
4200 bpc = bpc - (bpc & 1);
4201 }
4202
4203 switch (bpc) {
4204 case 0:
4205 /*
4206 * Temporary Work around, DRM doesn't parse color depth for
4207 * EDID revision before 1.4
4208 * TODO: Fix edid parsing
4209 */
4210 return COLOR_DEPTH_888;
4211 case 6:
4212 return COLOR_DEPTH_666;
4213 case 8:
4214 return COLOR_DEPTH_888;
4215 case 10:
4216 return COLOR_DEPTH_101010;
4217 case 12:
4218 return COLOR_DEPTH_121212;
4219 case 14:
4220 return COLOR_DEPTH_141414;
4221 case 16:
4222 return COLOR_DEPTH_161616;
4223 default:
4224 return COLOR_DEPTH_UNDEFINED;
4225 }
4226 }
4227
4228 static enum dc_aspect_ratio
4229 get_aspect_ratio(const struct drm_display_mode *mode_in)
4230 {
4231 /* 1-1 mapping, since both enums follow the HDMI spec. */
4232 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4233 }
4234
4235 static enum dc_color_space
4236 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4237 {
4238 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4239
4240 switch (dc_crtc_timing->pixel_encoding) {
4241 case PIXEL_ENCODING_YCBCR422:
4242 case PIXEL_ENCODING_YCBCR444:
4243 case PIXEL_ENCODING_YCBCR420:
4244 {
4245 /*
4246 * 27030khz is the separation point between HDTV and SDTV
4247 * according to HDMI spec, we use YCbCr709 and YCbCr601
4248 * respectively
4249 */
4250 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4251 if (dc_crtc_timing->flags.Y_ONLY)
4252 color_space =
4253 COLOR_SPACE_YCBCR709_LIMITED;
4254 else
4255 color_space = COLOR_SPACE_YCBCR709;
4256 } else {
4257 if (dc_crtc_timing->flags.Y_ONLY)
4258 color_space =
4259 COLOR_SPACE_YCBCR601_LIMITED;
4260 else
4261 color_space = COLOR_SPACE_YCBCR601;
4262 }
4263
4264 }
4265 break;
4266 case PIXEL_ENCODING_RGB:
4267 color_space = COLOR_SPACE_SRGB;
4268 break;
4269
4270 default:
4271 WARN_ON(1);
4272 break;
4273 }
4274
4275 return color_space;
4276 }
4277
4278 static bool adjust_colour_depth_from_display_info(
4279 struct dc_crtc_timing *timing_out,
4280 const struct drm_display_info *info)
4281 {
4282 enum dc_color_depth depth = timing_out->display_color_depth;
4283 int normalized_clk;
4284 do {
4285 normalized_clk = timing_out->pix_clk_100hz / 10;
4286 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4287 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4288 normalized_clk /= 2;
4289 /* Adjusting pix clock following on HDMI spec based on colour depth */
4290 switch (depth) {
4291 case COLOR_DEPTH_888:
4292 break;
4293 case COLOR_DEPTH_101010:
4294 normalized_clk = (normalized_clk * 30) / 24;
4295 break;
4296 case COLOR_DEPTH_121212:
4297 normalized_clk = (normalized_clk * 36) / 24;
4298 break;
4299 case COLOR_DEPTH_161616:
4300 normalized_clk = (normalized_clk * 48) / 24;
4301 break;
4302 default:
4303 /* The above depths are the only ones valid for HDMI. */
4304 return false;
4305 }
4306 if (normalized_clk <= info->max_tmds_clock) {
4307 timing_out->display_color_depth = depth;
4308 return true;
4309 }
4310 } while (--depth > COLOR_DEPTH_666);
4311 return false;
4312 }
4313
4314 static void fill_stream_properties_from_drm_display_mode(
4315 struct dc_stream_state *stream,
4316 const struct drm_display_mode *mode_in,
4317 const struct drm_connector *connector,
4318 const struct drm_connector_state *connector_state,
4319 const struct dc_stream_state *old_stream,
4320 int requested_bpc)
4321 {
4322 struct dc_crtc_timing *timing_out = &stream->timing;
4323 const struct drm_display_info *info = &connector->display_info;
4324 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4325 struct hdmi_vendor_infoframe hv_frame;
4326 struct hdmi_avi_infoframe avi_frame;
4327
4328 memset(&hv_frame, 0, sizeof(hv_frame));
4329 memset(&avi_frame, 0, sizeof(avi_frame));
4330
4331 timing_out->h_border_left = 0;
4332 timing_out->h_border_right = 0;
4333 timing_out->v_border_top = 0;
4334 timing_out->v_border_bottom = 0;
4335 /* TODO: un-hardcode */
4336 if (drm_mode_is_420_only(info, mode_in)
4337 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4338 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4339 else if (drm_mode_is_420_also(info, mode_in)
4340 && aconnector->force_yuv420_output)
4341 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4342 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4343 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4344 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4345 else
4346 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4347
4348 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4349 timing_out->display_color_depth = convert_color_depth_from_display_info(
4350 connector,
4351 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4352 requested_bpc);
4353 timing_out->scan_type = SCANNING_TYPE_NODATA;
4354 timing_out->hdmi_vic = 0;
4355
4356 if(old_stream) {
4357 timing_out->vic = old_stream->timing.vic;
4358 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4359 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4360 } else {
4361 timing_out->vic = drm_match_cea_mode(mode_in);
4362 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4363 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4364 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4365 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4366 }
4367
4368 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4369 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4370 timing_out->vic = avi_frame.video_code;
4371 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4372 timing_out->hdmi_vic = hv_frame.vic;
4373 }
4374
4375 timing_out->h_addressable = mode_in->crtc_hdisplay;
4376 timing_out->h_total = mode_in->crtc_htotal;
4377 timing_out->h_sync_width =
4378 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4379 timing_out->h_front_porch =
4380 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4381 timing_out->v_total = mode_in->crtc_vtotal;
4382 timing_out->v_addressable = mode_in->crtc_vdisplay;
4383 timing_out->v_front_porch =
4384 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4385 timing_out->v_sync_width =
4386 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4387 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4388 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4389
4390 stream->output_color_space = get_output_color_space(timing_out);
4391
4392 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4393 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4394 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4395 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4396 drm_mode_is_420_also(info, mode_in) &&
4397 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4398 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4399 adjust_colour_depth_from_display_info(timing_out, info);
4400 }
4401 }
4402 }
4403
4404 static void fill_audio_info(struct audio_info *audio_info,
4405 const struct drm_connector *drm_connector,
4406 const struct dc_sink *dc_sink)
4407 {
4408 int i = 0;
4409 int cea_revision = 0;
4410 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4411
4412 audio_info->manufacture_id = edid_caps->manufacturer_id;
4413 audio_info->product_id = edid_caps->product_id;
4414
4415 cea_revision = drm_connector->display_info.cea_rev;
4416
4417 strscpy(audio_info->display_name,
4418 edid_caps->display_name,
4419 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4420
4421 if (cea_revision >= 3) {
4422 audio_info->mode_count = edid_caps->audio_mode_count;
4423
4424 for (i = 0; i < audio_info->mode_count; ++i) {
4425 audio_info->modes[i].format_code =
4426 (enum audio_format_code)
4427 (edid_caps->audio_modes[i].format_code);
4428 audio_info->modes[i].channel_count =
4429 edid_caps->audio_modes[i].channel_count;
4430 audio_info->modes[i].sample_rates.all =
4431 edid_caps->audio_modes[i].sample_rate;
4432 audio_info->modes[i].sample_size =
4433 edid_caps->audio_modes[i].sample_size;
4434 }
4435 }
4436
4437 audio_info->flags.all = edid_caps->speaker_flags;
4438
4439 /* TODO: We only check for the progressive mode, check for interlace mode too */
4440 if (drm_connector->latency_present[0]) {
4441 audio_info->video_latency = drm_connector->video_latency[0];
4442 audio_info->audio_latency = drm_connector->audio_latency[0];
4443 }
4444
4445 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4446
4447 }
4448
4449 static void
4450 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4451 struct drm_display_mode *dst_mode)
4452 {
4453 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4454 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4455 dst_mode->crtc_clock = src_mode->crtc_clock;
4456 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4457 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4458 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4459 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4460 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4461 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4462 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4463 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4464 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4465 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4466 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4467 }
4468
4469 static void
4470 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4471 const struct drm_display_mode *native_mode,
4472 bool scale_enabled)
4473 {
4474 if (scale_enabled) {
4475 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4476 } else if (native_mode->clock == drm_mode->clock &&
4477 native_mode->htotal == drm_mode->htotal &&
4478 native_mode->vtotal == drm_mode->vtotal) {
4479 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4480 } else {
4481 /* no scaling nor amdgpu inserted, no need to patch */
4482 }
4483 }
4484
4485 static struct dc_sink *
4486 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4487 {
4488 struct dc_sink_init_data sink_init_data = { 0 };
4489 struct dc_sink *sink = NULL;
4490 sink_init_data.link = aconnector->dc_link;
4491 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4492
4493 sink = dc_sink_create(&sink_init_data);
4494 if (!sink) {
4495 DRM_ERROR("Failed to create sink!\n");
4496 return NULL;
4497 }
4498 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4499
4500 return sink;
4501 }
4502
4503 static void set_multisync_trigger_params(
4504 struct dc_stream_state *stream)
4505 {
4506 if (stream->triggered_crtc_reset.enabled) {
4507 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4508 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4509 }
4510 }
4511
4512 static void set_master_stream(struct dc_stream_state *stream_set[],
4513 int stream_count)
4514 {
4515 int j, highest_rfr = 0, master_stream = 0;
4516
4517 for (j = 0; j < stream_count; j++) {
4518 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4519 int refresh_rate = 0;
4520
4521 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4522 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4523 if (refresh_rate > highest_rfr) {
4524 highest_rfr = refresh_rate;
4525 master_stream = j;
4526 }
4527 }
4528 }
4529 for (j = 0; j < stream_count; j++) {
4530 if (stream_set[j])
4531 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4532 }
4533 }
4534
4535 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4536 {
4537 int i = 0;
4538
4539 if (context->stream_count < 2)
4540 return;
4541 for (i = 0; i < context->stream_count ; i++) {
4542 if (!context->streams[i])
4543 continue;
4544 /*
4545 * TODO: add a function to read AMD VSDB bits and set
4546 * crtc_sync_master.multi_sync_enabled flag
4547 * For now it's set to false
4548 */
4549 set_multisync_trigger_params(context->streams[i]);
4550 }
4551 set_master_stream(context->streams, context->stream_count);
4552 }
4553
4554 static struct dc_stream_state *
4555 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4556 const struct drm_display_mode *drm_mode,
4557 const struct dm_connector_state *dm_state,
4558 const struct dc_stream_state *old_stream,
4559 int requested_bpc)
4560 {
4561 struct drm_display_mode *preferred_mode = NULL;
4562 struct drm_connector *drm_connector;
4563 const struct drm_connector_state *con_state =
4564 dm_state ? &dm_state->base : NULL;
4565 struct dc_stream_state *stream = NULL;
4566 struct drm_display_mode mode = *drm_mode;
4567 bool native_mode_found = false;
4568 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4569 int mode_refresh;
4570 int preferred_refresh = 0;
4571 #if defined(CONFIG_DRM_AMD_DC_DCN)
4572 struct dsc_dec_dpcd_caps dsc_caps;
4573 #endif
4574 uint32_t link_bandwidth_kbps;
4575
4576 struct dc_sink *sink = NULL;
4577 if (aconnector == NULL) {
4578 DRM_ERROR("aconnector is NULL!\n");
4579 return stream;
4580 }
4581
4582 drm_connector = &aconnector->base;
4583
4584 if (!aconnector->dc_sink) {
4585 sink = create_fake_sink(aconnector);
4586 if (!sink)
4587 return stream;
4588 } else {
4589 sink = aconnector->dc_sink;
4590 dc_sink_retain(sink);
4591 }
4592
4593 stream = dc_create_stream_for_sink(sink);
4594
4595 if (stream == NULL) {
4596 DRM_ERROR("Failed to create stream for sink!\n");
4597 goto finish;
4598 }
4599
4600 stream->dm_stream_context = aconnector;
4601
4602 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4603 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4604
4605 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4606 /* Search for preferred mode */
4607 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4608 native_mode_found = true;
4609 break;
4610 }
4611 }
4612 if (!native_mode_found)
4613 preferred_mode = list_first_entry_or_null(
4614 &aconnector->base.modes,
4615 struct drm_display_mode,
4616 head);
4617
4618 mode_refresh = drm_mode_vrefresh(&mode);
4619
4620 if (preferred_mode == NULL) {
4621 /*
4622 * This may not be an error, the use case is when we have no
4623 * usermode calls to reset and set mode upon hotplug. In this
4624 * case, we call set mode ourselves to restore the previous mode
4625 * and the modelist may not be filled in in time.
4626 */
4627 DRM_DEBUG_DRIVER("No preferred mode found\n");
4628 } else {
4629 decide_crtc_timing_for_drm_display_mode(
4630 &mode, preferred_mode,
4631 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4632 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4633 }
4634
4635 if (!dm_state)
4636 drm_mode_set_crtcinfo(&mode, 0);
4637
4638 /*
4639 * If scaling is enabled and refresh rate didn't change
4640 * we copy the vic and polarities of the old timings
4641 */
4642 if (!scale || mode_refresh != preferred_refresh)
4643 fill_stream_properties_from_drm_display_mode(stream,
4644 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4645 else
4646 fill_stream_properties_from_drm_display_mode(stream,
4647 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4648
4649 stream->timing.flags.DSC = 0;
4650
4651 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4652 #if defined(CONFIG_DRM_AMD_DC_DCN)
4653 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4654 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4655 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4656 &dsc_caps);
4657 #endif
4658 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4659 dc_link_get_link_cap(aconnector->dc_link));
4660
4661 #if defined(CONFIG_DRM_AMD_DC_DCN)
4662 if (dsc_caps.is_dsc_supported) {
4663 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4664 &dsc_caps,
4665 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4666 link_bandwidth_kbps,
4667 &stream->timing,
4668 &stream->timing.dsc_cfg))
4669 stream->timing.flags.DSC = 1;
4670 /* Overwrite the stream flag if DSC is enabled through debugfs */
4671 if (aconnector->dsc_settings.dsc_clock_en)
4672 stream->timing.flags.DSC = 1;
4673
4674 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
4675 stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
4676 aconnector->dsc_settings.dsc_slice_width);
4677
4678 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
4679 stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
4680 aconnector->dsc_settings.dsc_slice_height);
4681 }
4682 #endif
4683 }
4684
4685 update_stream_scaling_settings(&mode, dm_state, stream);
4686
4687 fill_audio_info(
4688 &stream->audio_info,
4689 drm_connector,
4690 sink);
4691
4692 update_stream_signal(stream, sink);
4693
4694 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4695 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4696 if (stream->link->psr_settings.psr_feature_enabled) {
4697 //
4698 // should decide stream support vsc sdp colorimetry capability
4699 // before building vsc info packet
4700 //
4701 stream->use_vsc_sdp_for_colorimetry = false;
4702 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4703 stream->use_vsc_sdp_for_colorimetry =
4704 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4705 } else {
4706 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4707 stream->use_vsc_sdp_for_colorimetry = true;
4708 }
4709 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4710 }
4711 finish:
4712 dc_sink_release(sink);
4713
4714 return stream;
4715 }
4716
4717 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4718 {
4719 drm_crtc_cleanup(crtc);
4720 kfree(crtc);
4721 }
4722
4723 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4724 struct drm_crtc_state *state)
4725 {
4726 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4727
4728 /* TODO Destroy dc_stream objects are stream object is flattened */
4729 if (cur->stream)
4730 dc_stream_release(cur->stream);
4731
4732
4733 __drm_atomic_helper_crtc_destroy_state(state);
4734
4735
4736 kfree(state);
4737 }
4738
4739 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4740 {
4741 struct dm_crtc_state *state;
4742
4743 if (crtc->state)
4744 dm_crtc_destroy_state(crtc, crtc->state);
4745
4746 state = kzalloc(sizeof(*state), GFP_KERNEL);
4747 if (WARN_ON(!state))
4748 return;
4749
4750 crtc->state = &state->base;
4751 crtc->state->crtc = crtc;
4752
4753 }
4754
4755 static struct drm_crtc_state *
4756 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4757 {
4758 struct dm_crtc_state *state, *cur;
4759
4760 cur = to_dm_crtc_state(crtc->state);
4761
4762 if (WARN_ON(!crtc->state))
4763 return NULL;
4764
4765 state = kzalloc(sizeof(*state), GFP_KERNEL);
4766 if (!state)
4767 return NULL;
4768
4769 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4770
4771 if (cur->stream) {
4772 state->stream = cur->stream;
4773 dc_stream_retain(state->stream);
4774 }
4775
4776 state->active_planes = cur->active_planes;
4777 state->vrr_params = cur->vrr_params;
4778 state->vrr_infopacket = cur->vrr_infopacket;
4779 state->abm_level = cur->abm_level;
4780 state->vrr_supported = cur->vrr_supported;
4781 state->freesync_config = cur->freesync_config;
4782 state->crc_src = cur->crc_src;
4783 state->cm_has_degamma = cur->cm_has_degamma;
4784 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4785
4786 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4787
4788 return &state->base;
4789 }
4790
4791 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4792 {
4793 enum dc_irq_source irq_source;
4794 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4795 struct amdgpu_device *adev = crtc->dev->dev_private;
4796 int rc;
4797
4798 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4799
4800 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4801
4802 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4803 acrtc->crtc_id, enable ? "en" : "dis", rc);
4804 return rc;
4805 }
4806
4807 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4808 {
4809 enum dc_irq_source irq_source;
4810 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4811 struct amdgpu_device *adev = crtc->dev->dev_private;
4812 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4813 int rc = 0;
4814
4815 if (enable) {
4816 /* vblank irq on -> Only need vupdate irq in vrr mode */
4817 if (amdgpu_dm_vrr_active(acrtc_state))
4818 rc = dm_set_vupdate_irq(crtc, true);
4819 } else {
4820 /* vblank irq off -> vupdate irq off */
4821 rc = dm_set_vupdate_irq(crtc, false);
4822 }
4823
4824 if (rc)
4825 return rc;
4826
4827 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4828 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4829 }
4830
4831 static int dm_enable_vblank(struct drm_crtc *crtc)
4832 {
4833 return dm_set_vblank(crtc, true);
4834 }
4835
4836 static void dm_disable_vblank(struct drm_crtc *crtc)
4837 {
4838 dm_set_vblank(crtc, false);
4839 }
4840
4841 /* Implemented only the options currently availible for the driver */
4842 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4843 .reset = dm_crtc_reset_state,
4844 .destroy = amdgpu_dm_crtc_destroy,
4845 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4846 .set_config = drm_atomic_helper_set_config,
4847 .page_flip = drm_atomic_helper_page_flip,
4848 .atomic_duplicate_state = dm_crtc_duplicate_state,
4849 .atomic_destroy_state = dm_crtc_destroy_state,
4850 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4851 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4852 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4853 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4854 .enable_vblank = dm_enable_vblank,
4855 .disable_vblank = dm_disable_vblank,
4856 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4857 };
4858
4859 static enum drm_connector_status
4860 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4861 {
4862 bool connected;
4863 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4864
4865 /*
4866 * Notes:
4867 * 1. This interface is NOT called in context of HPD irq.
4868 * 2. This interface *is called* in context of user-mode ioctl. Which
4869 * makes it a bad place for *any* MST-related activity.
4870 */
4871
4872 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4873 !aconnector->fake_enable)
4874 connected = (aconnector->dc_sink != NULL);
4875 else
4876 connected = (aconnector->base.force == DRM_FORCE_ON);
4877
4878 return (connected ? connector_status_connected :
4879 connector_status_disconnected);
4880 }
4881
4882 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4883 struct drm_connector_state *connector_state,
4884 struct drm_property *property,
4885 uint64_t val)
4886 {
4887 struct drm_device *dev = connector->dev;
4888 struct amdgpu_device *adev = dev->dev_private;
4889 struct dm_connector_state *dm_old_state =
4890 to_dm_connector_state(connector->state);
4891 struct dm_connector_state *dm_new_state =
4892 to_dm_connector_state(connector_state);
4893
4894 int ret = -EINVAL;
4895
4896 if (property == dev->mode_config.scaling_mode_property) {
4897 enum amdgpu_rmx_type rmx_type;
4898
4899 switch (val) {
4900 case DRM_MODE_SCALE_CENTER:
4901 rmx_type = RMX_CENTER;
4902 break;
4903 case DRM_MODE_SCALE_ASPECT:
4904 rmx_type = RMX_ASPECT;
4905 break;
4906 case DRM_MODE_SCALE_FULLSCREEN:
4907 rmx_type = RMX_FULL;
4908 break;
4909 case DRM_MODE_SCALE_NONE:
4910 default:
4911 rmx_type = RMX_OFF;
4912 break;
4913 }
4914
4915 if (dm_old_state->scaling == rmx_type)
4916 return 0;
4917
4918 dm_new_state->scaling = rmx_type;
4919 ret = 0;
4920 } else if (property == adev->mode_info.underscan_hborder_property) {
4921 dm_new_state->underscan_hborder = val;
4922 ret = 0;
4923 } else if (property == adev->mode_info.underscan_vborder_property) {
4924 dm_new_state->underscan_vborder = val;
4925 ret = 0;
4926 } else if (property == adev->mode_info.underscan_property) {
4927 dm_new_state->underscan_enable = val;
4928 ret = 0;
4929 } else if (property == adev->mode_info.abm_level_property) {
4930 dm_new_state->abm_level = val;
4931 ret = 0;
4932 }
4933
4934 return ret;
4935 }
4936
4937 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4938 const struct drm_connector_state *state,
4939 struct drm_property *property,
4940 uint64_t *val)
4941 {
4942 struct drm_device *dev = connector->dev;
4943 struct amdgpu_device *adev = dev->dev_private;
4944 struct dm_connector_state *dm_state =
4945 to_dm_connector_state(state);
4946 int ret = -EINVAL;
4947
4948 if (property == dev->mode_config.scaling_mode_property) {
4949 switch (dm_state->scaling) {
4950 case RMX_CENTER:
4951 *val = DRM_MODE_SCALE_CENTER;
4952 break;
4953 case RMX_ASPECT:
4954 *val = DRM_MODE_SCALE_ASPECT;
4955 break;
4956 case RMX_FULL:
4957 *val = DRM_MODE_SCALE_FULLSCREEN;
4958 break;
4959 case RMX_OFF:
4960 default:
4961 *val = DRM_MODE_SCALE_NONE;
4962 break;
4963 }
4964 ret = 0;
4965 } else if (property == adev->mode_info.underscan_hborder_property) {
4966 *val = dm_state->underscan_hborder;
4967 ret = 0;
4968 } else if (property == adev->mode_info.underscan_vborder_property) {
4969 *val = dm_state->underscan_vborder;
4970 ret = 0;
4971 } else if (property == adev->mode_info.underscan_property) {
4972 *val = dm_state->underscan_enable;
4973 ret = 0;
4974 } else if (property == adev->mode_info.abm_level_property) {
4975 *val = dm_state->abm_level;
4976 ret = 0;
4977 }
4978
4979 return ret;
4980 }
4981
4982 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4983 {
4984 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4985
4986 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4987 }
4988
4989 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4990 {
4991 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4992 const struct dc_link *link = aconnector->dc_link;
4993 struct amdgpu_device *adev = connector->dev->dev_private;
4994 struct amdgpu_display_manager *dm = &adev->dm;
4995
4996 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4997 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4998
4999 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5000 link->type != dc_connection_none &&
5001 dm->backlight_dev) {
5002 backlight_device_unregister(dm->backlight_dev);
5003 dm->backlight_dev = NULL;
5004 }
5005 #endif
5006
5007 if (aconnector->dc_em_sink)
5008 dc_sink_release(aconnector->dc_em_sink);
5009 aconnector->dc_em_sink = NULL;
5010 if (aconnector->dc_sink)
5011 dc_sink_release(aconnector->dc_sink);
5012 aconnector->dc_sink = NULL;
5013
5014 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5015 drm_connector_unregister(connector);
5016 drm_connector_cleanup(connector);
5017 if (aconnector->i2c) {
5018 i2c_del_adapter(&aconnector->i2c->base);
5019 kfree(aconnector->i2c);
5020 }
5021 kfree(aconnector->dm_dp_aux.aux.name);
5022
5023 kfree(connector);
5024 }
5025
5026 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5027 {
5028 struct dm_connector_state *state =
5029 to_dm_connector_state(connector->state);
5030
5031 if (connector->state)
5032 __drm_atomic_helper_connector_destroy_state(connector->state);
5033
5034 kfree(state);
5035
5036 state = kzalloc(sizeof(*state), GFP_KERNEL);
5037
5038 if (state) {
5039 state->scaling = RMX_OFF;
5040 state->underscan_enable = false;
5041 state->underscan_hborder = 0;
5042 state->underscan_vborder = 0;
5043 state->base.max_requested_bpc = 8;
5044 state->vcpi_slots = 0;
5045 state->pbn = 0;
5046 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5047 state->abm_level = amdgpu_dm_abm_level;
5048
5049 __drm_atomic_helper_connector_reset(connector, &state->base);
5050 }
5051 }
5052
5053 struct drm_connector_state *
5054 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5055 {
5056 struct dm_connector_state *state =
5057 to_dm_connector_state(connector->state);
5058
5059 struct dm_connector_state *new_state =
5060 kmemdup(state, sizeof(*state), GFP_KERNEL);
5061
5062 if (!new_state)
5063 return NULL;
5064
5065 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5066
5067 new_state->freesync_capable = state->freesync_capable;
5068 new_state->abm_level = state->abm_level;
5069 new_state->scaling = state->scaling;
5070 new_state->underscan_enable = state->underscan_enable;
5071 new_state->underscan_hborder = state->underscan_hborder;
5072 new_state->underscan_vborder = state->underscan_vborder;
5073 new_state->vcpi_slots = state->vcpi_slots;
5074 new_state->pbn = state->pbn;
5075 return &new_state->base;
5076 }
5077
5078 static int
5079 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5080 {
5081 struct amdgpu_dm_connector *amdgpu_dm_connector =
5082 to_amdgpu_dm_connector(connector);
5083 int r;
5084
5085 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5086 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5087 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5088 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5089 if (r)
5090 return r;
5091 }
5092
5093 #if defined(CONFIG_DEBUG_FS)
5094 connector_debugfs_init(amdgpu_dm_connector);
5095 #endif
5096
5097 return 0;
5098 }
5099
5100 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5101 .reset = amdgpu_dm_connector_funcs_reset,
5102 .detect = amdgpu_dm_connector_detect,
5103 .fill_modes = drm_helper_probe_single_connector_modes,
5104 .destroy = amdgpu_dm_connector_destroy,
5105 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5106 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5107 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5108 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5109 .late_register = amdgpu_dm_connector_late_register,
5110 .early_unregister = amdgpu_dm_connector_unregister
5111 };
5112
5113 static int get_modes(struct drm_connector *connector)
5114 {
5115 return amdgpu_dm_connector_get_modes(connector);
5116 }
5117
5118 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5119 {
5120 struct dc_sink_init_data init_params = {
5121 .link = aconnector->dc_link,
5122 .sink_signal = SIGNAL_TYPE_VIRTUAL
5123 };
5124 struct edid *edid;
5125
5126 if (!aconnector->base.edid_blob_ptr) {
5127 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5128 aconnector->base.name);
5129
5130 aconnector->base.force = DRM_FORCE_OFF;
5131 aconnector->base.override_edid = false;
5132 return;
5133 }
5134
5135 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5136
5137 aconnector->edid = edid;
5138
5139 aconnector->dc_em_sink = dc_link_add_remote_sink(
5140 aconnector->dc_link,
5141 (uint8_t *)edid,
5142 (edid->extensions + 1) * EDID_LENGTH,
5143 &init_params);
5144
5145 if (aconnector->base.force == DRM_FORCE_ON) {
5146 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5147 aconnector->dc_link->local_sink :
5148 aconnector->dc_em_sink;
5149 dc_sink_retain(aconnector->dc_sink);
5150 }
5151 }
5152
5153 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5154 {
5155 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5156
5157 /*
5158 * In case of headless boot with force on for DP managed connector
5159 * Those settings have to be != 0 to get initial modeset
5160 */
5161 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5162 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5163 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5164 }
5165
5166
5167 aconnector->base.override_edid = true;
5168 create_eml_sink(aconnector);
5169 }
5170
5171 static struct dc_stream_state *
5172 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5173 const struct drm_display_mode *drm_mode,
5174 const struct dm_connector_state *dm_state,
5175 const struct dc_stream_state *old_stream)
5176 {
5177 struct drm_connector *connector = &aconnector->base;
5178 struct amdgpu_device *adev = connector->dev->dev_private;
5179 struct dc_stream_state *stream;
5180 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5181 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5182 enum dc_status dc_result = DC_OK;
5183
5184 do {
5185 stream = create_stream_for_sink(aconnector, drm_mode,
5186 dm_state, old_stream,
5187 requested_bpc);
5188 if (stream == NULL) {
5189 DRM_ERROR("Failed to create stream for sink!\n");
5190 break;
5191 }
5192
5193 dc_result = dc_validate_stream(adev->dm.dc, stream);
5194
5195 if (dc_result != DC_OK) {
5196 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5197 drm_mode->hdisplay,
5198 drm_mode->vdisplay,
5199 drm_mode->clock,
5200 dc_result,
5201 dc_status_to_str(dc_result));
5202
5203 dc_stream_release(stream);
5204 stream = NULL;
5205 requested_bpc -= 2; /* lower bpc to retry validation */
5206 }
5207
5208 } while (stream == NULL && requested_bpc >= 6);
5209
5210 return stream;
5211 }
5212
5213 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5214 struct drm_display_mode *mode)
5215 {
5216 int result = MODE_ERROR;
5217 struct dc_sink *dc_sink;
5218 /* TODO: Unhardcode stream count */
5219 struct dc_stream_state *stream;
5220 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5221
5222 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5223 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5224 return result;
5225
5226 /*
5227 * Only run this the first time mode_valid is called to initilialize
5228 * EDID mgmt
5229 */
5230 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5231 !aconnector->dc_em_sink)
5232 handle_edid_mgmt(aconnector);
5233
5234 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5235
5236 if (dc_sink == NULL) {
5237 DRM_ERROR("dc_sink is NULL!\n");
5238 goto fail;
5239 }
5240
5241 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5242 if (stream) {
5243 dc_stream_release(stream);
5244 result = MODE_OK;
5245 }
5246
5247 fail:
5248 /* TODO: error handling*/
5249 return result;
5250 }
5251
5252 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5253 struct dc_info_packet *out)
5254 {
5255 struct hdmi_drm_infoframe frame;
5256 unsigned char buf[30]; /* 26 + 4 */
5257 ssize_t len;
5258 int ret, i;
5259
5260 memset(out, 0, sizeof(*out));
5261
5262 if (!state->hdr_output_metadata)
5263 return 0;
5264
5265 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5266 if (ret)
5267 return ret;
5268
5269 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5270 if (len < 0)
5271 return (int)len;
5272
5273 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5274 if (len != 30)
5275 return -EINVAL;
5276
5277 /* Prepare the infopacket for DC. */
5278 switch (state->connector->connector_type) {
5279 case DRM_MODE_CONNECTOR_HDMIA:
5280 out->hb0 = 0x87; /* type */
5281 out->hb1 = 0x01; /* version */
5282 out->hb2 = 0x1A; /* length */
5283 out->sb[0] = buf[3]; /* checksum */
5284 i = 1;
5285 break;
5286
5287 case DRM_MODE_CONNECTOR_DisplayPort:
5288 case DRM_MODE_CONNECTOR_eDP:
5289 out->hb0 = 0x00; /* sdp id, zero */
5290 out->hb1 = 0x87; /* type */
5291 out->hb2 = 0x1D; /* payload len - 1 */
5292 out->hb3 = (0x13 << 2); /* sdp version */
5293 out->sb[0] = 0x01; /* version */
5294 out->sb[1] = 0x1A; /* length */
5295 i = 2;
5296 break;
5297
5298 default:
5299 return -EINVAL;
5300 }
5301
5302 memcpy(&out->sb[i], &buf[4], 26);
5303 out->valid = true;
5304
5305 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5306 sizeof(out->sb), false);
5307
5308 return 0;
5309 }
5310
5311 static bool
5312 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5313 const struct drm_connector_state *new_state)
5314 {
5315 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5316 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5317
5318 if (old_blob != new_blob) {
5319 if (old_blob && new_blob &&
5320 old_blob->length == new_blob->length)
5321 return memcmp(old_blob->data, new_blob->data,
5322 old_blob->length);
5323
5324 return true;
5325 }
5326
5327 return false;
5328 }
5329
5330 static int
5331 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5332 struct drm_atomic_state *state)
5333 {
5334 struct drm_connector_state *new_con_state =
5335 drm_atomic_get_new_connector_state(state, conn);
5336 struct drm_connector_state *old_con_state =
5337 drm_atomic_get_old_connector_state(state, conn);
5338 struct drm_crtc *crtc = new_con_state->crtc;
5339 struct drm_crtc_state *new_crtc_state;
5340 int ret;
5341
5342 if (!crtc)
5343 return 0;
5344
5345 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5346 struct dc_info_packet hdr_infopacket;
5347
5348 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5349 if (ret)
5350 return ret;
5351
5352 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5353 if (IS_ERR(new_crtc_state))
5354 return PTR_ERR(new_crtc_state);
5355
5356 /*
5357 * DC considers the stream backends changed if the
5358 * static metadata changes. Forcing the modeset also
5359 * gives a simple way for userspace to switch from
5360 * 8bpc to 10bpc when setting the metadata to enter
5361 * or exit HDR.
5362 *
5363 * Changing the static metadata after it's been
5364 * set is permissible, however. So only force a
5365 * modeset if we're entering or exiting HDR.
5366 */
5367 new_crtc_state->mode_changed =
5368 !old_con_state->hdr_output_metadata ||
5369 !new_con_state->hdr_output_metadata;
5370 }
5371
5372 return 0;
5373 }
5374
5375 static const struct drm_connector_helper_funcs
5376 amdgpu_dm_connector_helper_funcs = {
5377 /*
5378 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5379 * modes will be filtered by drm_mode_validate_size(), and those modes
5380 * are missing after user start lightdm. So we need to renew modes list.
5381 * in get_modes call back, not just return the modes count
5382 */
5383 .get_modes = get_modes,
5384 .mode_valid = amdgpu_dm_connector_mode_valid,
5385 .atomic_check = amdgpu_dm_connector_atomic_check,
5386 };
5387
5388 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5389 {
5390 }
5391
5392 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5393 {
5394 struct drm_device *dev = new_crtc_state->crtc->dev;
5395 struct drm_plane *plane;
5396
5397 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5398 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5399 return true;
5400 }
5401
5402 return false;
5403 }
5404
5405 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5406 {
5407 struct drm_atomic_state *state = new_crtc_state->state;
5408 struct drm_plane *plane;
5409 int num_active = 0;
5410
5411 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5412 struct drm_plane_state *new_plane_state;
5413
5414 /* Cursor planes are "fake". */
5415 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5416 continue;
5417
5418 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5419
5420 if (!new_plane_state) {
5421 /*
5422 * The plane is enable on the CRTC and hasn't changed
5423 * state. This means that it previously passed
5424 * validation and is therefore enabled.
5425 */
5426 num_active += 1;
5427 continue;
5428 }
5429
5430 /* We need a framebuffer to be considered enabled. */
5431 num_active += (new_plane_state->fb != NULL);
5432 }
5433
5434 return num_active;
5435 }
5436
5437 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5438 struct drm_crtc_state *new_crtc_state)
5439 {
5440 struct dm_crtc_state *dm_new_crtc_state =
5441 to_dm_crtc_state(new_crtc_state);
5442
5443 dm_new_crtc_state->active_planes = 0;
5444
5445 if (!dm_new_crtc_state->stream)
5446 return;
5447
5448 dm_new_crtc_state->active_planes =
5449 count_crtc_active_planes(new_crtc_state);
5450 }
5451
5452 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5453 struct drm_crtc_state *state)
5454 {
5455 struct amdgpu_device *adev = crtc->dev->dev_private;
5456 struct dc *dc = adev->dm.dc;
5457 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5458 int ret = -EINVAL;
5459
5460 dm_update_crtc_active_planes(crtc, state);
5461
5462 if (unlikely(!dm_crtc_state->stream &&
5463 modeset_required(state, NULL, dm_crtc_state->stream))) {
5464 WARN_ON(1);
5465 return ret;
5466 }
5467
5468 /* In some use cases, like reset, no stream is attached */
5469 if (!dm_crtc_state->stream)
5470 return 0;
5471
5472 /*
5473 * We want at least one hardware plane enabled to use
5474 * the stream with a cursor enabled.
5475 */
5476 if (state->enable && state->active &&
5477 does_crtc_have_active_cursor(state) &&
5478 dm_crtc_state->active_planes == 0)
5479 return -EINVAL;
5480
5481 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5482 return 0;
5483
5484 return ret;
5485 }
5486
5487 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5488 const struct drm_display_mode *mode,
5489 struct drm_display_mode *adjusted_mode)
5490 {
5491 return true;
5492 }
5493
5494 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5495 .disable = dm_crtc_helper_disable,
5496 .atomic_check = dm_crtc_helper_atomic_check,
5497 .mode_fixup = dm_crtc_helper_mode_fixup,
5498 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5499 };
5500
5501 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5502 {
5503
5504 }
5505
5506 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5507 {
5508 switch (display_color_depth) {
5509 case COLOR_DEPTH_666:
5510 return 6;
5511 case COLOR_DEPTH_888:
5512 return 8;
5513 case COLOR_DEPTH_101010:
5514 return 10;
5515 case COLOR_DEPTH_121212:
5516 return 12;
5517 case COLOR_DEPTH_141414:
5518 return 14;
5519 case COLOR_DEPTH_161616:
5520 return 16;
5521 default:
5522 break;
5523 }
5524 return 0;
5525 }
5526
5527 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5528 struct drm_crtc_state *crtc_state,
5529 struct drm_connector_state *conn_state)
5530 {
5531 struct drm_atomic_state *state = crtc_state->state;
5532 struct drm_connector *connector = conn_state->connector;
5533 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5534 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5535 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5536 struct drm_dp_mst_topology_mgr *mst_mgr;
5537 struct drm_dp_mst_port *mst_port;
5538 enum dc_color_depth color_depth;
5539 int clock, bpp = 0;
5540 bool is_y420 = false;
5541
5542 if (!aconnector->port || !aconnector->dc_sink)
5543 return 0;
5544
5545 mst_port = aconnector->port;
5546 mst_mgr = &aconnector->mst_port->mst_mgr;
5547
5548 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5549 return 0;
5550
5551 if (!state->duplicated) {
5552 int max_bpc = conn_state->max_requested_bpc;
5553 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5554 aconnector->force_yuv420_output;
5555 color_depth = convert_color_depth_from_display_info(connector,
5556 is_y420,
5557 max_bpc);
5558 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5559 clock = adjusted_mode->clock;
5560 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5561 }
5562 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5563 mst_mgr,
5564 mst_port,
5565 dm_new_connector_state->pbn,
5566 dm_mst_get_pbn_divider(aconnector->dc_link));
5567 if (dm_new_connector_state->vcpi_slots < 0) {
5568 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5569 return dm_new_connector_state->vcpi_slots;
5570 }
5571 return 0;
5572 }
5573
5574 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5575 .disable = dm_encoder_helper_disable,
5576 .atomic_check = dm_encoder_helper_atomic_check
5577 };
5578
5579 #if defined(CONFIG_DRM_AMD_DC_DCN)
5580 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5581 struct dc_state *dc_state)
5582 {
5583 struct dc_stream_state *stream = NULL;
5584 struct drm_connector *connector;
5585 struct drm_connector_state *new_con_state, *old_con_state;
5586 struct amdgpu_dm_connector *aconnector;
5587 struct dm_connector_state *dm_conn_state;
5588 int i, j, clock, bpp;
5589 int vcpi, pbn_div, pbn = 0;
5590
5591 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5592
5593 aconnector = to_amdgpu_dm_connector(connector);
5594
5595 if (!aconnector->port)
5596 continue;
5597
5598 if (!new_con_state || !new_con_state->crtc)
5599 continue;
5600
5601 dm_conn_state = to_dm_connector_state(new_con_state);
5602
5603 for (j = 0; j < dc_state->stream_count; j++) {
5604 stream = dc_state->streams[j];
5605 if (!stream)
5606 continue;
5607
5608 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5609 break;
5610
5611 stream = NULL;
5612 }
5613
5614 if (!stream)
5615 continue;
5616
5617 if (stream->timing.flags.DSC != 1) {
5618 drm_dp_mst_atomic_enable_dsc(state,
5619 aconnector->port,
5620 dm_conn_state->pbn,
5621 0,
5622 false);
5623 continue;
5624 }
5625
5626 pbn_div = dm_mst_get_pbn_divider(stream->link);
5627 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5628 clock = stream->timing.pix_clk_100hz / 10;
5629 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5630 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5631 aconnector->port,
5632 pbn, pbn_div,
5633 true);
5634 if (vcpi < 0)
5635 return vcpi;
5636
5637 dm_conn_state->pbn = pbn;
5638 dm_conn_state->vcpi_slots = vcpi;
5639 }
5640 return 0;
5641 }
5642 #endif
5643
5644 static void dm_drm_plane_reset(struct drm_plane *plane)
5645 {
5646 struct dm_plane_state *amdgpu_state = NULL;
5647
5648 if (plane->state)
5649 plane->funcs->atomic_destroy_state(plane, plane->state);
5650
5651 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5652 WARN_ON(amdgpu_state == NULL);
5653
5654 if (amdgpu_state)
5655 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5656 }
5657
5658 static struct drm_plane_state *
5659 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5660 {
5661 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5662
5663 old_dm_plane_state = to_dm_plane_state(plane->state);
5664 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5665 if (!dm_plane_state)
5666 return NULL;
5667
5668 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5669
5670 if (old_dm_plane_state->dc_state) {
5671 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5672 dc_plane_state_retain(dm_plane_state->dc_state);
5673 }
5674
5675 return &dm_plane_state->base;
5676 }
5677
5678 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5679 struct drm_plane_state *state)
5680 {
5681 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5682
5683 if (dm_plane_state->dc_state)
5684 dc_plane_state_release(dm_plane_state->dc_state);
5685
5686 drm_atomic_helper_plane_destroy_state(plane, state);
5687 }
5688
5689 static const struct drm_plane_funcs dm_plane_funcs = {
5690 .update_plane = drm_atomic_helper_update_plane,
5691 .disable_plane = drm_atomic_helper_disable_plane,
5692 .destroy = drm_primary_helper_destroy,
5693 .reset = dm_drm_plane_reset,
5694 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5695 .atomic_destroy_state = dm_drm_plane_destroy_state,
5696 };
5697
5698 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5699 struct drm_plane_state *new_state)
5700 {
5701 struct amdgpu_framebuffer *afb;
5702 struct drm_gem_object *obj;
5703 struct amdgpu_device *adev;
5704 struct amdgpu_bo *rbo;
5705 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5706 struct list_head list;
5707 struct ttm_validate_buffer tv;
5708 struct ww_acquire_ctx ticket;
5709 uint64_t tiling_flags;
5710 uint32_t domain;
5711 int r;
5712 bool tmz_surface = false;
5713 bool force_disable_dcc = false;
5714
5715 dm_plane_state_old = to_dm_plane_state(plane->state);
5716 dm_plane_state_new = to_dm_plane_state(new_state);
5717
5718 if (!new_state->fb) {
5719 DRM_DEBUG_DRIVER("No FB bound\n");
5720 return 0;
5721 }
5722
5723 afb = to_amdgpu_framebuffer(new_state->fb);
5724 obj = new_state->fb->obj[0];
5725 rbo = gem_to_amdgpu_bo(obj);
5726 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5727 INIT_LIST_HEAD(&list);
5728
5729 tv.bo = &rbo->tbo;
5730 tv.num_shared = 1;
5731 list_add(&tv.head, &list);
5732
5733 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5734 if (r) {
5735 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5736 return r;
5737 }
5738
5739 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5740 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5741 else
5742 domain = AMDGPU_GEM_DOMAIN_VRAM;
5743
5744 r = amdgpu_bo_pin(rbo, domain);
5745 if (unlikely(r != 0)) {
5746 if (r != -ERESTARTSYS)
5747 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5748 ttm_eu_backoff_reservation(&ticket, &list);
5749 return r;
5750 }
5751
5752 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5753 if (unlikely(r != 0)) {
5754 amdgpu_bo_unpin(rbo);
5755 ttm_eu_backoff_reservation(&ticket, &list);
5756 DRM_ERROR("%p bind failed\n", rbo);
5757 return r;
5758 }
5759
5760 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5761
5762 tmz_surface = amdgpu_bo_encrypted(rbo);
5763
5764 ttm_eu_backoff_reservation(&ticket, &list);
5765
5766 afb->address = amdgpu_bo_gpu_offset(rbo);
5767
5768 amdgpu_bo_ref(rbo);
5769
5770 if (dm_plane_state_new->dc_state &&
5771 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5772 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5773
5774 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5775 fill_plane_buffer_attributes(
5776 adev, afb, plane_state->format, plane_state->rotation,
5777 tiling_flags, &plane_state->tiling_info,
5778 &plane_state->plane_size, &plane_state->dcc,
5779 &plane_state->address, tmz_surface,
5780 force_disable_dcc);
5781 }
5782
5783 return 0;
5784 }
5785
5786 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5787 struct drm_plane_state *old_state)
5788 {
5789 struct amdgpu_bo *rbo;
5790 int r;
5791
5792 if (!old_state->fb)
5793 return;
5794
5795 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5796 r = amdgpu_bo_reserve(rbo, false);
5797 if (unlikely(r)) {
5798 DRM_ERROR("failed to reserve rbo before unpin\n");
5799 return;
5800 }
5801
5802 amdgpu_bo_unpin(rbo);
5803 amdgpu_bo_unreserve(rbo);
5804 amdgpu_bo_unref(&rbo);
5805 }
5806
5807 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5808 struct drm_crtc_state *new_crtc_state)
5809 {
5810 int max_downscale = 0;
5811 int max_upscale = INT_MAX;
5812
5813 /* TODO: These should be checked against DC plane caps */
5814 return drm_atomic_helper_check_plane_state(
5815 state, new_crtc_state, max_downscale, max_upscale, true, true);
5816 }
5817
5818 static int dm_plane_atomic_check(struct drm_plane *plane,
5819 struct drm_plane_state *state)
5820 {
5821 struct amdgpu_device *adev = plane->dev->dev_private;
5822 struct dc *dc = adev->dm.dc;
5823 struct dm_plane_state *dm_plane_state;
5824 struct dc_scaling_info scaling_info;
5825 struct drm_crtc_state *new_crtc_state;
5826 int ret;
5827
5828 dm_plane_state = to_dm_plane_state(state);
5829
5830 if (!dm_plane_state->dc_state)
5831 return 0;
5832
5833 new_crtc_state =
5834 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5835 if (!new_crtc_state)
5836 return -EINVAL;
5837
5838 ret = dm_plane_helper_check_state(state, new_crtc_state);
5839 if (ret)
5840 return ret;
5841
5842 ret = fill_dc_scaling_info(state, &scaling_info);
5843 if (ret)
5844 return ret;
5845
5846 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5847 return 0;
5848
5849 return -EINVAL;
5850 }
5851
5852 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5853 struct drm_plane_state *new_plane_state)
5854 {
5855 /* Only support async updates on cursor planes. */
5856 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5857 return -EINVAL;
5858
5859 return 0;
5860 }
5861
5862 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5863 struct drm_plane_state *new_state)
5864 {
5865 struct drm_plane_state *old_state =
5866 drm_atomic_get_old_plane_state(new_state->state, plane);
5867
5868 swap(plane->state->fb, new_state->fb);
5869
5870 plane->state->src_x = new_state->src_x;
5871 plane->state->src_y = new_state->src_y;
5872 plane->state->src_w = new_state->src_w;
5873 plane->state->src_h = new_state->src_h;
5874 plane->state->crtc_x = new_state->crtc_x;
5875 plane->state->crtc_y = new_state->crtc_y;
5876 plane->state->crtc_w = new_state->crtc_w;
5877 plane->state->crtc_h = new_state->crtc_h;
5878
5879 handle_cursor_update(plane, old_state);
5880 }
5881
5882 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5883 .prepare_fb = dm_plane_helper_prepare_fb,
5884 .cleanup_fb = dm_plane_helper_cleanup_fb,
5885 .atomic_check = dm_plane_atomic_check,
5886 .atomic_async_check = dm_plane_atomic_async_check,
5887 .atomic_async_update = dm_plane_atomic_async_update
5888 };
5889
5890 /*
5891 * TODO: these are currently initialized to rgb formats only.
5892 * For future use cases we should either initialize them dynamically based on
5893 * plane capabilities, or initialize this array to all formats, so internal drm
5894 * check will succeed, and let DC implement proper check
5895 */
5896 static const uint32_t rgb_formats[] = {
5897 DRM_FORMAT_XRGB8888,
5898 DRM_FORMAT_ARGB8888,
5899 DRM_FORMAT_RGBA8888,
5900 DRM_FORMAT_XRGB2101010,
5901 DRM_FORMAT_XBGR2101010,
5902 DRM_FORMAT_ARGB2101010,
5903 DRM_FORMAT_ABGR2101010,
5904 DRM_FORMAT_XBGR8888,
5905 DRM_FORMAT_ABGR8888,
5906 DRM_FORMAT_RGB565,
5907 };
5908
5909 static const uint32_t overlay_formats[] = {
5910 DRM_FORMAT_XRGB8888,
5911 DRM_FORMAT_ARGB8888,
5912 DRM_FORMAT_RGBA8888,
5913 DRM_FORMAT_XBGR8888,
5914 DRM_FORMAT_ABGR8888,
5915 DRM_FORMAT_RGB565
5916 };
5917
5918 static const u32 cursor_formats[] = {
5919 DRM_FORMAT_ARGB8888
5920 };
5921
5922 static int get_plane_formats(const struct drm_plane *plane,
5923 const struct dc_plane_cap *plane_cap,
5924 uint32_t *formats, int max_formats)
5925 {
5926 int i, num_formats = 0;
5927
5928 /*
5929 * TODO: Query support for each group of formats directly from
5930 * DC plane caps. This will require adding more formats to the
5931 * caps list.
5932 */
5933
5934 switch (plane->type) {
5935 case DRM_PLANE_TYPE_PRIMARY:
5936 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5937 if (num_formats >= max_formats)
5938 break;
5939
5940 formats[num_formats++] = rgb_formats[i];
5941 }
5942
5943 if (plane_cap && plane_cap->pixel_format_support.nv12)
5944 formats[num_formats++] = DRM_FORMAT_NV12;
5945 if (plane_cap && plane_cap->pixel_format_support.p010)
5946 formats[num_formats++] = DRM_FORMAT_P010;
5947 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5948 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5949 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5950 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5951 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5952 }
5953 break;
5954
5955 case DRM_PLANE_TYPE_OVERLAY:
5956 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5957 if (num_formats >= max_formats)
5958 break;
5959
5960 formats[num_formats++] = overlay_formats[i];
5961 }
5962 break;
5963
5964 case DRM_PLANE_TYPE_CURSOR:
5965 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5966 if (num_formats >= max_formats)
5967 break;
5968
5969 formats[num_formats++] = cursor_formats[i];
5970 }
5971 break;
5972 }
5973
5974 return num_formats;
5975 }
5976
5977 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5978 struct drm_plane *plane,
5979 unsigned long possible_crtcs,
5980 const struct dc_plane_cap *plane_cap)
5981 {
5982 uint32_t formats[32];
5983 int num_formats;
5984 int res = -EPERM;
5985 unsigned int supported_rotations;
5986
5987 num_formats = get_plane_formats(plane, plane_cap, formats,
5988 ARRAY_SIZE(formats));
5989
5990 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5991 &dm_plane_funcs, formats, num_formats,
5992 NULL, plane->type, NULL);
5993 if (res)
5994 return res;
5995
5996 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5997 plane_cap && plane_cap->per_pixel_alpha) {
5998 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5999 BIT(DRM_MODE_BLEND_PREMULTI);
6000
6001 drm_plane_create_alpha_property(plane);
6002 drm_plane_create_blend_mode_property(plane, blend_caps);
6003 }
6004
6005 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6006 plane_cap &&
6007 (plane_cap->pixel_format_support.nv12 ||
6008 plane_cap->pixel_format_support.p010)) {
6009 /* This only affects YUV formats. */
6010 drm_plane_create_color_properties(
6011 plane,
6012 BIT(DRM_COLOR_YCBCR_BT601) |
6013 BIT(DRM_COLOR_YCBCR_BT709) |
6014 BIT(DRM_COLOR_YCBCR_BT2020),
6015 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6016 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6017 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6018 }
6019
6020 supported_rotations =
6021 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6022 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6023
6024 if (dm->adev->asic_type >= CHIP_BONAIRE)
6025 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6026 supported_rotations);
6027
6028 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6029
6030 /* Create (reset) the plane state */
6031 if (plane->funcs->reset)
6032 plane->funcs->reset(plane);
6033
6034 return 0;
6035 }
6036
6037 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6038 struct drm_plane *plane,
6039 uint32_t crtc_index)
6040 {
6041 struct amdgpu_crtc *acrtc = NULL;
6042 struct drm_plane *cursor_plane;
6043
6044 int res = -ENOMEM;
6045
6046 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6047 if (!cursor_plane)
6048 goto fail;
6049
6050 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6051 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6052
6053 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6054 if (!acrtc)
6055 goto fail;
6056
6057 res = drm_crtc_init_with_planes(
6058 dm->ddev,
6059 &acrtc->base,
6060 plane,
6061 cursor_plane,
6062 &amdgpu_dm_crtc_funcs, NULL);
6063
6064 if (res)
6065 goto fail;
6066
6067 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6068
6069 /* Create (reset) the plane state */
6070 if (acrtc->base.funcs->reset)
6071 acrtc->base.funcs->reset(&acrtc->base);
6072
6073 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6074 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6075
6076 acrtc->crtc_id = crtc_index;
6077 acrtc->base.enabled = false;
6078 acrtc->otg_inst = -1;
6079
6080 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6081 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6082 true, MAX_COLOR_LUT_ENTRIES);
6083 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6084
6085 return 0;
6086
6087 fail:
6088 kfree(acrtc);
6089 kfree(cursor_plane);
6090 return res;
6091 }
6092
6093
6094 static int to_drm_connector_type(enum signal_type st)
6095 {
6096 switch (st) {
6097 case SIGNAL_TYPE_HDMI_TYPE_A:
6098 return DRM_MODE_CONNECTOR_HDMIA;
6099 case SIGNAL_TYPE_EDP:
6100 return DRM_MODE_CONNECTOR_eDP;
6101 case SIGNAL_TYPE_LVDS:
6102 return DRM_MODE_CONNECTOR_LVDS;
6103 case SIGNAL_TYPE_RGB:
6104 return DRM_MODE_CONNECTOR_VGA;
6105 case SIGNAL_TYPE_DISPLAY_PORT:
6106 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6107 return DRM_MODE_CONNECTOR_DisplayPort;
6108 case SIGNAL_TYPE_DVI_DUAL_LINK:
6109 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6110 return DRM_MODE_CONNECTOR_DVID;
6111 case SIGNAL_TYPE_VIRTUAL:
6112 return DRM_MODE_CONNECTOR_VIRTUAL;
6113
6114 default:
6115 return DRM_MODE_CONNECTOR_Unknown;
6116 }
6117 }
6118
6119 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6120 {
6121 struct drm_encoder *encoder;
6122
6123 /* There is only one encoder per connector */
6124 drm_connector_for_each_possible_encoder(connector, encoder)
6125 return encoder;
6126
6127 return NULL;
6128 }
6129
6130 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6131 {
6132 struct drm_encoder *encoder;
6133 struct amdgpu_encoder *amdgpu_encoder;
6134
6135 encoder = amdgpu_dm_connector_to_encoder(connector);
6136
6137 if (encoder == NULL)
6138 return;
6139
6140 amdgpu_encoder = to_amdgpu_encoder(encoder);
6141
6142 amdgpu_encoder->native_mode.clock = 0;
6143
6144 if (!list_empty(&connector->probed_modes)) {
6145 struct drm_display_mode *preferred_mode = NULL;
6146
6147 list_for_each_entry(preferred_mode,
6148 &connector->probed_modes,
6149 head) {
6150 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6151 amdgpu_encoder->native_mode = *preferred_mode;
6152
6153 break;
6154 }
6155
6156 }
6157 }
6158
6159 static struct drm_display_mode *
6160 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6161 char *name,
6162 int hdisplay, int vdisplay)
6163 {
6164 struct drm_device *dev = encoder->dev;
6165 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6166 struct drm_display_mode *mode = NULL;
6167 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6168
6169 mode = drm_mode_duplicate(dev, native_mode);
6170
6171 if (mode == NULL)
6172 return NULL;
6173
6174 mode->hdisplay = hdisplay;
6175 mode->vdisplay = vdisplay;
6176 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6177 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6178
6179 return mode;
6180
6181 }
6182
6183 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6184 struct drm_connector *connector)
6185 {
6186 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6187 struct drm_display_mode *mode = NULL;
6188 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6189 struct amdgpu_dm_connector *amdgpu_dm_connector =
6190 to_amdgpu_dm_connector(connector);
6191 int i;
6192 int n;
6193 struct mode_size {
6194 char name[DRM_DISPLAY_MODE_LEN];
6195 int w;
6196 int h;
6197 } common_modes[] = {
6198 { "640x480", 640, 480},
6199 { "800x600", 800, 600},
6200 { "1024x768", 1024, 768},
6201 { "1280x720", 1280, 720},
6202 { "1280x800", 1280, 800},
6203 {"1280x1024", 1280, 1024},
6204 { "1440x900", 1440, 900},
6205 {"1680x1050", 1680, 1050},
6206 {"1600x1200", 1600, 1200},
6207 {"1920x1080", 1920, 1080},
6208 {"1920x1200", 1920, 1200}
6209 };
6210
6211 n = ARRAY_SIZE(common_modes);
6212
6213 for (i = 0; i < n; i++) {
6214 struct drm_display_mode *curmode = NULL;
6215 bool mode_existed = false;
6216
6217 if (common_modes[i].w > native_mode->hdisplay ||
6218 common_modes[i].h > native_mode->vdisplay ||
6219 (common_modes[i].w == native_mode->hdisplay &&
6220 common_modes[i].h == native_mode->vdisplay))
6221 continue;
6222
6223 list_for_each_entry(curmode, &connector->probed_modes, head) {
6224 if (common_modes[i].w == curmode->hdisplay &&
6225 common_modes[i].h == curmode->vdisplay) {
6226 mode_existed = true;
6227 break;
6228 }
6229 }
6230
6231 if (mode_existed)
6232 continue;
6233
6234 mode = amdgpu_dm_create_common_mode(encoder,
6235 common_modes[i].name, common_modes[i].w,
6236 common_modes[i].h);
6237 drm_mode_probed_add(connector, mode);
6238 amdgpu_dm_connector->num_modes++;
6239 }
6240 }
6241
6242 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6243 struct edid *edid)
6244 {
6245 struct amdgpu_dm_connector *amdgpu_dm_connector =
6246 to_amdgpu_dm_connector(connector);
6247
6248 if (edid) {
6249 /* empty probed_modes */
6250 INIT_LIST_HEAD(&connector->probed_modes);
6251 amdgpu_dm_connector->num_modes =
6252 drm_add_edid_modes(connector, edid);
6253
6254 /* sorting the probed modes before calling function
6255 * amdgpu_dm_get_native_mode() since EDID can have
6256 * more than one preferred mode. The modes that are
6257 * later in the probed mode list could be of higher
6258 * and preferred resolution. For example, 3840x2160
6259 * resolution in base EDID preferred timing and 4096x2160
6260 * preferred resolution in DID extension block later.
6261 */
6262 drm_mode_sort(&connector->probed_modes);
6263 amdgpu_dm_get_native_mode(connector);
6264 } else {
6265 amdgpu_dm_connector->num_modes = 0;
6266 }
6267 }
6268
6269 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6270 {
6271 struct amdgpu_dm_connector *amdgpu_dm_connector =
6272 to_amdgpu_dm_connector(connector);
6273 struct drm_encoder *encoder;
6274 struct edid *edid = amdgpu_dm_connector->edid;
6275
6276 encoder = amdgpu_dm_connector_to_encoder(connector);
6277
6278 if (!edid || !drm_edid_is_valid(edid)) {
6279 amdgpu_dm_connector->num_modes =
6280 drm_add_modes_noedid(connector, 640, 480);
6281 } else {
6282 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6283 amdgpu_dm_connector_add_common_modes(encoder, connector);
6284 }
6285 amdgpu_dm_fbc_init(connector);
6286
6287 return amdgpu_dm_connector->num_modes;
6288 }
6289
6290 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6291 struct amdgpu_dm_connector *aconnector,
6292 int connector_type,
6293 struct dc_link *link,
6294 int link_index)
6295 {
6296 struct amdgpu_device *adev = dm->ddev->dev_private;
6297
6298 /*
6299 * Some of the properties below require access to state, like bpc.
6300 * Allocate some default initial connector state with our reset helper.
6301 */
6302 if (aconnector->base.funcs->reset)
6303 aconnector->base.funcs->reset(&aconnector->base);
6304
6305 aconnector->connector_id = link_index;
6306 aconnector->dc_link = link;
6307 aconnector->base.interlace_allowed = false;
6308 aconnector->base.doublescan_allowed = false;
6309 aconnector->base.stereo_allowed = false;
6310 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6311 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6312 aconnector->audio_inst = -1;
6313 mutex_init(&aconnector->hpd_lock);
6314
6315 /*
6316 * configure support HPD hot plug connector_>polled default value is 0
6317 * which means HPD hot plug not supported
6318 */
6319 switch (connector_type) {
6320 case DRM_MODE_CONNECTOR_HDMIA:
6321 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6322 aconnector->base.ycbcr_420_allowed =
6323 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6324 break;
6325 case DRM_MODE_CONNECTOR_DisplayPort:
6326 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6327 aconnector->base.ycbcr_420_allowed =
6328 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6329 break;
6330 case DRM_MODE_CONNECTOR_DVID:
6331 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6332 break;
6333 default:
6334 break;
6335 }
6336
6337 drm_object_attach_property(&aconnector->base.base,
6338 dm->ddev->mode_config.scaling_mode_property,
6339 DRM_MODE_SCALE_NONE);
6340
6341 drm_object_attach_property(&aconnector->base.base,
6342 adev->mode_info.underscan_property,
6343 UNDERSCAN_OFF);
6344 drm_object_attach_property(&aconnector->base.base,
6345 adev->mode_info.underscan_hborder_property,
6346 0);
6347 drm_object_attach_property(&aconnector->base.base,
6348 adev->mode_info.underscan_vborder_property,
6349 0);
6350
6351 if (!aconnector->mst_port)
6352 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6353
6354 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6355 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6356 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6357
6358 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6359 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6360 drm_object_attach_property(&aconnector->base.base,
6361 adev->mode_info.abm_level_property, 0);
6362 }
6363
6364 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6365 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6366 connector_type == DRM_MODE_CONNECTOR_eDP) {
6367 drm_object_attach_property(
6368 &aconnector->base.base,
6369 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6370
6371 if (!aconnector->mst_port)
6372 drm_connector_attach_vrr_capable_property(&aconnector->base);
6373
6374 #ifdef CONFIG_DRM_AMD_DC_HDCP
6375 if (adev->dm.hdcp_workqueue)
6376 drm_connector_attach_content_protection_property(&aconnector->base, true);
6377 #endif
6378 }
6379 }
6380
6381 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6382 struct i2c_msg *msgs, int num)
6383 {
6384 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6385 struct ddc_service *ddc_service = i2c->ddc_service;
6386 struct i2c_command cmd;
6387 int i;
6388 int result = -EIO;
6389
6390 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6391
6392 if (!cmd.payloads)
6393 return result;
6394
6395 cmd.number_of_payloads = num;
6396 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6397 cmd.speed = 100;
6398
6399 for (i = 0; i < num; i++) {
6400 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6401 cmd.payloads[i].address = msgs[i].addr;
6402 cmd.payloads[i].length = msgs[i].len;
6403 cmd.payloads[i].data = msgs[i].buf;
6404 }
6405
6406 if (dc_submit_i2c(
6407 ddc_service->ctx->dc,
6408 ddc_service->ddc_pin->hw_info.ddc_channel,
6409 &cmd))
6410 result = num;
6411
6412 kfree(cmd.payloads);
6413 return result;
6414 }
6415
6416 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6417 {
6418 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6419 }
6420
6421 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6422 .master_xfer = amdgpu_dm_i2c_xfer,
6423 .functionality = amdgpu_dm_i2c_func,
6424 };
6425
6426 static struct amdgpu_i2c_adapter *
6427 create_i2c(struct ddc_service *ddc_service,
6428 int link_index,
6429 int *res)
6430 {
6431 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6432 struct amdgpu_i2c_adapter *i2c;
6433
6434 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6435 if (!i2c)
6436 return NULL;
6437 i2c->base.owner = THIS_MODULE;
6438 i2c->base.class = I2C_CLASS_DDC;
6439 i2c->base.dev.parent = &adev->pdev->dev;
6440 i2c->base.algo = &amdgpu_dm_i2c_algo;
6441 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6442 i2c_set_adapdata(&i2c->base, i2c);
6443 i2c->ddc_service = ddc_service;
6444 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6445
6446 return i2c;
6447 }
6448
6449
6450 /*
6451 * Note: this function assumes that dc_link_detect() was called for the
6452 * dc_link which will be represented by this aconnector.
6453 */
6454 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6455 struct amdgpu_dm_connector *aconnector,
6456 uint32_t link_index,
6457 struct amdgpu_encoder *aencoder)
6458 {
6459 int res = 0;
6460 int connector_type;
6461 struct dc *dc = dm->dc;
6462 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6463 struct amdgpu_i2c_adapter *i2c;
6464
6465 link->priv = aconnector;
6466
6467 DRM_DEBUG_DRIVER("%s()\n", __func__);
6468
6469 i2c = create_i2c(link->ddc, link->link_index, &res);
6470 if (!i2c) {
6471 DRM_ERROR("Failed to create i2c adapter data\n");
6472 return -ENOMEM;
6473 }
6474
6475 aconnector->i2c = i2c;
6476 res = i2c_add_adapter(&i2c->base);
6477
6478 if (res) {
6479 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6480 goto out_free;
6481 }
6482
6483 connector_type = to_drm_connector_type(link->connector_signal);
6484
6485 res = drm_connector_init_with_ddc(
6486 dm->ddev,
6487 &aconnector->base,
6488 &amdgpu_dm_connector_funcs,
6489 connector_type,
6490 &i2c->base);
6491
6492 if (res) {
6493 DRM_ERROR("connector_init failed\n");
6494 aconnector->connector_id = -1;
6495 goto out_free;
6496 }
6497
6498 drm_connector_helper_add(
6499 &aconnector->base,
6500 &amdgpu_dm_connector_helper_funcs);
6501
6502 amdgpu_dm_connector_init_helper(
6503 dm,
6504 aconnector,
6505 connector_type,
6506 link,
6507 link_index);
6508
6509 drm_connector_attach_encoder(
6510 &aconnector->base, &aencoder->base);
6511
6512 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6513 || connector_type == DRM_MODE_CONNECTOR_eDP)
6514 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6515
6516 out_free:
6517 if (res) {
6518 kfree(i2c);
6519 aconnector->i2c = NULL;
6520 }
6521 return res;
6522 }
6523
6524 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6525 {
6526 switch (adev->mode_info.num_crtc) {
6527 case 1:
6528 return 0x1;
6529 case 2:
6530 return 0x3;
6531 case 3:
6532 return 0x7;
6533 case 4:
6534 return 0xf;
6535 case 5:
6536 return 0x1f;
6537 case 6:
6538 default:
6539 return 0x3f;
6540 }
6541 }
6542
6543 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6544 struct amdgpu_encoder *aencoder,
6545 uint32_t link_index)
6546 {
6547 struct amdgpu_device *adev = dev->dev_private;
6548
6549 int res = drm_encoder_init(dev,
6550 &aencoder->base,
6551 &amdgpu_dm_encoder_funcs,
6552 DRM_MODE_ENCODER_TMDS,
6553 NULL);
6554
6555 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6556
6557 if (!res)
6558 aencoder->encoder_id = link_index;
6559 else
6560 aencoder->encoder_id = -1;
6561
6562 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6563
6564 return res;
6565 }
6566
6567 static void manage_dm_interrupts(struct amdgpu_device *adev,
6568 struct amdgpu_crtc *acrtc,
6569 bool enable)
6570 {
6571 /*
6572 * We have no guarantee that the frontend index maps to the same
6573 * backend index - some even map to more than one.
6574 *
6575 * TODO: Use a different interrupt or check DC itself for the mapping.
6576 */
6577 int irq_type =
6578 amdgpu_display_crtc_idx_to_irq_type(
6579 adev,
6580 acrtc->crtc_id);
6581
6582 if (enable) {
6583 drm_crtc_vblank_on(&acrtc->base);
6584 amdgpu_irq_get(
6585 adev,
6586 &adev->pageflip_irq,
6587 irq_type);
6588 } else {
6589
6590 amdgpu_irq_put(
6591 adev,
6592 &adev->pageflip_irq,
6593 irq_type);
6594 drm_crtc_vblank_off(&acrtc->base);
6595 }
6596 }
6597
6598 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6599 struct amdgpu_crtc *acrtc)
6600 {
6601 int irq_type =
6602 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6603
6604 /**
6605 * This reads the current state for the IRQ and force reapplies
6606 * the setting to hardware.
6607 */
6608 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6609 }
6610
6611 static bool
6612 is_scaling_state_different(const struct dm_connector_state *dm_state,
6613 const struct dm_connector_state *old_dm_state)
6614 {
6615 if (dm_state->scaling != old_dm_state->scaling)
6616 return true;
6617 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6618 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6619 return true;
6620 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6621 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6622 return true;
6623 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6624 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6625 return true;
6626 return false;
6627 }
6628
6629 #ifdef CONFIG_DRM_AMD_DC_HDCP
6630 static bool is_content_protection_different(struct drm_connector_state *state,
6631 const struct drm_connector_state *old_state,
6632 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6633 {
6634 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6635
6636 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6637 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6638 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6639 return true;
6640 }
6641
6642 /* CP is being re enabled, ignore this */
6643 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6644 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6645 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6646 return false;
6647 }
6648
6649 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6650 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6651 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6652 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6653
6654 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6655 * hot-plug, headless s3, dpms
6656 */
6657 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6658 aconnector->dc_sink != NULL)
6659 return true;
6660
6661 if (old_state->content_protection == state->content_protection)
6662 return false;
6663
6664 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6665 return true;
6666
6667 return false;
6668 }
6669
6670 #endif
6671 static void remove_stream(struct amdgpu_device *adev,
6672 struct amdgpu_crtc *acrtc,
6673 struct dc_stream_state *stream)
6674 {
6675 /* this is the update mode case */
6676
6677 acrtc->otg_inst = -1;
6678 acrtc->enabled = false;
6679 }
6680
6681 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6682 struct dc_cursor_position *position)
6683 {
6684 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6685 int x, y;
6686 int xorigin = 0, yorigin = 0;
6687
6688 position->enable = false;
6689 position->x = 0;
6690 position->y = 0;
6691
6692 if (!crtc || !plane->state->fb)
6693 return 0;
6694
6695 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6696 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6697 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6698 __func__,
6699 plane->state->crtc_w,
6700 plane->state->crtc_h);
6701 return -EINVAL;
6702 }
6703
6704 x = plane->state->crtc_x;
6705 y = plane->state->crtc_y;
6706
6707 if (x <= -amdgpu_crtc->max_cursor_width ||
6708 y <= -amdgpu_crtc->max_cursor_height)
6709 return 0;
6710
6711 if (x < 0) {
6712 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6713 x = 0;
6714 }
6715 if (y < 0) {
6716 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6717 y = 0;
6718 }
6719 position->enable = true;
6720 position->translate_by_source = true;
6721 position->x = x;
6722 position->y = y;
6723 position->x_hotspot = xorigin;
6724 position->y_hotspot = yorigin;
6725
6726 return 0;
6727 }
6728
6729 static void handle_cursor_update(struct drm_plane *plane,
6730 struct drm_plane_state *old_plane_state)
6731 {
6732 struct amdgpu_device *adev = plane->dev->dev_private;
6733 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6734 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6735 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6736 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6737 uint64_t address = afb ? afb->address : 0;
6738 struct dc_cursor_position position;
6739 struct dc_cursor_attributes attributes;
6740 int ret;
6741
6742 if (!plane->state->fb && !old_plane_state->fb)
6743 return;
6744
6745 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6746 __func__,
6747 amdgpu_crtc->crtc_id,
6748 plane->state->crtc_w,
6749 plane->state->crtc_h);
6750
6751 ret = get_cursor_position(plane, crtc, &position);
6752 if (ret)
6753 return;
6754
6755 if (!position.enable) {
6756 /* turn off cursor */
6757 if (crtc_state && crtc_state->stream) {
6758 mutex_lock(&adev->dm.dc_lock);
6759 dc_stream_set_cursor_position(crtc_state->stream,
6760 &position);
6761 mutex_unlock(&adev->dm.dc_lock);
6762 }
6763 return;
6764 }
6765
6766 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6767 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6768
6769 memset(&attributes, 0, sizeof(attributes));
6770 attributes.address.high_part = upper_32_bits(address);
6771 attributes.address.low_part = lower_32_bits(address);
6772 attributes.width = plane->state->crtc_w;
6773 attributes.height = plane->state->crtc_h;
6774 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6775 attributes.rotation_angle = 0;
6776 attributes.attribute_flags.value = 0;
6777
6778 attributes.pitch = attributes.width;
6779
6780 if (crtc_state->stream) {
6781 mutex_lock(&adev->dm.dc_lock);
6782 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6783 &attributes))
6784 DRM_ERROR("DC failed to set cursor attributes\n");
6785
6786 if (!dc_stream_set_cursor_position(crtc_state->stream,
6787 &position))
6788 DRM_ERROR("DC failed to set cursor position\n");
6789 mutex_unlock(&adev->dm.dc_lock);
6790 }
6791 }
6792
6793 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6794 {
6795
6796 assert_spin_locked(&acrtc->base.dev->event_lock);
6797 WARN_ON(acrtc->event);
6798
6799 acrtc->event = acrtc->base.state->event;
6800
6801 /* Set the flip status */
6802 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6803
6804 /* Mark this event as consumed */
6805 acrtc->base.state->event = NULL;
6806
6807 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6808 acrtc->crtc_id);
6809 }
6810
6811 static void update_freesync_state_on_stream(
6812 struct amdgpu_display_manager *dm,
6813 struct dm_crtc_state *new_crtc_state,
6814 struct dc_stream_state *new_stream,
6815 struct dc_plane_state *surface,
6816 u32 flip_timestamp_in_us)
6817 {
6818 struct mod_vrr_params vrr_params;
6819 struct dc_info_packet vrr_infopacket = {0};
6820 struct amdgpu_device *adev = dm->adev;
6821 unsigned long flags;
6822
6823 if (!new_stream)
6824 return;
6825
6826 /*
6827 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6828 * For now it's sufficient to just guard against these conditions.
6829 */
6830
6831 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6832 return;
6833
6834 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6835 vrr_params = new_crtc_state->vrr_params;
6836
6837 if (surface) {
6838 mod_freesync_handle_preflip(
6839 dm->freesync_module,
6840 surface,
6841 new_stream,
6842 flip_timestamp_in_us,
6843 &vrr_params);
6844
6845 if (adev->family < AMDGPU_FAMILY_AI &&
6846 amdgpu_dm_vrr_active(new_crtc_state)) {
6847 mod_freesync_handle_v_update(dm->freesync_module,
6848 new_stream, &vrr_params);
6849
6850 /* Need to call this before the frame ends. */
6851 dc_stream_adjust_vmin_vmax(dm->dc,
6852 new_crtc_state->stream,
6853 &vrr_params.adjust);
6854 }
6855 }
6856
6857 mod_freesync_build_vrr_infopacket(
6858 dm->freesync_module,
6859 new_stream,
6860 &vrr_params,
6861 PACKET_TYPE_VRR,
6862 TRANSFER_FUNC_UNKNOWN,
6863 &vrr_infopacket);
6864
6865 new_crtc_state->freesync_timing_changed |=
6866 (memcmp(&new_crtc_state->vrr_params.adjust,
6867 &vrr_params.adjust,
6868 sizeof(vrr_params.adjust)) != 0);
6869
6870 new_crtc_state->freesync_vrr_info_changed |=
6871 (memcmp(&new_crtc_state->vrr_infopacket,
6872 &vrr_infopacket,
6873 sizeof(vrr_infopacket)) != 0);
6874
6875 new_crtc_state->vrr_params = vrr_params;
6876 new_crtc_state->vrr_infopacket = vrr_infopacket;
6877
6878 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6879 new_stream->vrr_infopacket = vrr_infopacket;
6880
6881 if (new_crtc_state->freesync_vrr_info_changed)
6882 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6883 new_crtc_state->base.crtc->base.id,
6884 (int)new_crtc_state->base.vrr_enabled,
6885 (int)vrr_params.state);
6886
6887 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6888 }
6889
6890 static void pre_update_freesync_state_on_stream(
6891 struct amdgpu_display_manager *dm,
6892 struct dm_crtc_state *new_crtc_state)
6893 {
6894 struct dc_stream_state *new_stream = new_crtc_state->stream;
6895 struct mod_vrr_params vrr_params;
6896 struct mod_freesync_config config = new_crtc_state->freesync_config;
6897 struct amdgpu_device *adev = dm->adev;
6898 unsigned long flags;
6899
6900 if (!new_stream)
6901 return;
6902
6903 /*
6904 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6905 * For now it's sufficient to just guard against these conditions.
6906 */
6907 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6908 return;
6909
6910 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6911 vrr_params = new_crtc_state->vrr_params;
6912
6913 if (new_crtc_state->vrr_supported &&
6914 config.min_refresh_in_uhz &&
6915 config.max_refresh_in_uhz) {
6916 config.state = new_crtc_state->base.vrr_enabled ?
6917 VRR_STATE_ACTIVE_VARIABLE :
6918 VRR_STATE_INACTIVE;
6919 } else {
6920 config.state = VRR_STATE_UNSUPPORTED;
6921 }
6922
6923 mod_freesync_build_vrr_params(dm->freesync_module,
6924 new_stream,
6925 &config, &vrr_params);
6926
6927 new_crtc_state->freesync_timing_changed |=
6928 (memcmp(&new_crtc_state->vrr_params.adjust,
6929 &vrr_params.adjust,
6930 sizeof(vrr_params.adjust)) != 0);
6931
6932 new_crtc_state->vrr_params = vrr_params;
6933 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6934 }
6935
6936 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6937 struct dm_crtc_state *new_state)
6938 {
6939 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6940 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6941
6942 if (!old_vrr_active && new_vrr_active) {
6943 /* Transition VRR inactive -> active:
6944 * While VRR is active, we must not disable vblank irq, as a
6945 * reenable after disable would compute bogus vblank/pflip
6946 * timestamps if it likely happened inside display front-porch.
6947 *
6948 * We also need vupdate irq for the actual core vblank handling
6949 * at end of vblank.
6950 */
6951 dm_set_vupdate_irq(new_state->base.crtc, true);
6952 drm_crtc_vblank_get(new_state->base.crtc);
6953 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6954 __func__, new_state->base.crtc->base.id);
6955 } else if (old_vrr_active && !new_vrr_active) {
6956 /* Transition VRR active -> inactive:
6957 * Allow vblank irq disable again for fixed refresh rate.
6958 */
6959 dm_set_vupdate_irq(new_state->base.crtc, false);
6960 drm_crtc_vblank_put(new_state->base.crtc);
6961 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6962 __func__, new_state->base.crtc->base.id);
6963 }
6964 }
6965
6966 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6967 {
6968 struct drm_plane *plane;
6969 struct drm_plane_state *old_plane_state, *new_plane_state;
6970 int i;
6971
6972 /*
6973 * TODO: Make this per-stream so we don't issue redundant updates for
6974 * commits with multiple streams.
6975 */
6976 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6977 new_plane_state, i)
6978 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6979 handle_cursor_update(plane, old_plane_state);
6980 }
6981
6982 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6983 struct dc_state *dc_state,
6984 struct drm_device *dev,
6985 struct amdgpu_display_manager *dm,
6986 struct drm_crtc *pcrtc,
6987 bool wait_for_vblank)
6988 {
6989 uint32_t i;
6990 uint64_t timestamp_ns;
6991 struct drm_plane *plane;
6992 struct drm_plane_state *old_plane_state, *new_plane_state;
6993 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6994 struct drm_crtc_state *new_pcrtc_state =
6995 drm_atomic_get_new_crtc_state(state, pcrtc);
6996 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6997 struct dm_crtc_state *dm_old_crtc_state =
6998 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6999 int planes_count = 0, vpos, hpos;
7000 long r;
7001 unsigned long flags;
7002 struct amdgpu_bo *abo;
7003 uint64_t tiling_flags;
7004 bool tmz_surface = false;
7005 uint32_t target_vblank, last_flip_vblank;
7006 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7007 bool pflip_present = false;
7008 struct {
7009 struct dc_surface_update surface_updates[MAX_SURFACES];
7010 struct dc_plane_info plane_infos[MAX_SURFACES];
7011 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7012 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7013 struct dc_stream_update stream_update;
7014 } *bundle;
7015
7016 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7017
7018 if (!bundle) {
7019 dm_error("Failed to allocate update bundle\n");
7020 goto cleanup;
7021 }
7022
7023 /*
7024 * Disable the cursor first if we're disabling all the planes.
7025 * It'll remain on the screen after the planes are re-enabled
7026 * if we don't.
7027 */
7028 if (acrtc_state->active_planes == 0)
7029 amdgpu_dm_commit_cursors(state);
7030
7031 /* update planes when needed */
7032 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7033 struct drm_crtc *crtc = new_plane_state->crtc;
7034 struct drm_crtc_state *new_crtc_state;
7035 struct drm_framebuffer *fb = new_plane_state->fb;
7036 bool plane_needs_flip;
7037 struct dc_plane_state *dc_plane;
7038 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7039
7040 /* Cursor plane is handled after stream updates */
7041 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7042 continue;
7043
7044 if (!fb || !crtc || pcrtc != crtc)
7045 continue;
7046
7047 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7048 if (!new_crtc_state->active)
7049 continue;
7050
7051 dc_plane = dm_new_plane_state->dc_state;
7052
7053 bundle->surface_updates[planes_count].surface = dc_plane;
7054 if (new_pcrtc_state->color_mgmt_changed) {
7055 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7056 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7057 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7058 }
7059
7060 fill_dc_scaling_info(new_plane_state,
7061 &bundle->scaling_infos[planes_count]);
7062
7063 bundle->surface_updates[planes_count].scaling_info =
7064 &bundle->scaling_infos[planes_count];
7065
7066 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7067
7068 pflip_present = pflip_present || plane_needs_flip;
7069
7070 if (!plane_needs_flip) {
7071 planes_count += 1;
7072 continue;
7073 }
7074
7075 abo = gem_to_amdgpu_bo(fb->obj[0]);
7076
7077 /*
7078 * Wait for all fences on this FB. Do limited wait to avoid
7079 * deadlock during GPU reset when this fence will not signal
7080 * but we hold reservation lock for the BO.
7081 */
7082 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7083 false,
7084 msecs_to_jiffies(5000));
7085 if (unlikely(r <= 0))
7086 DRM_ERROR("Waiting for fences timed out!");
7087
7088 /*
7089 * We cannot reserve buffers here, which means the normal flag
7090 * access functions don't work. Paper over this with READ_ONCE,
7091 * but maybe the flags are invariant enough that not even that
7092 * would be needed.
7093 */
7094 tiling_flags = READ_ONCE(abo->tiling_flags);
7095 tmz_surface = READ_ONCE(abo->flags) & AMDGPU_GEM_CREATE_ENCRYPTED;
7096
7097 fill_dc_plane_info_and_addr(
7098 dm->adev, new_plane_state, tiling_flags,
7099 &bundle->plane_infos[planes_count],
7100 &bundle->flip_addrs[planes_count].address,
7101 tmz_surface,
7102 false);
7103
7104 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7105 new_plane_state->plane->index,
7106 bundle->plane_infos[planes_count].dcc.enable);
7107
7108 bundle->surface_updates[planes_count].plane_info =
7109 &bundle->plane_infos[planes_count];
7110
7111 /*
7112 * Only allow immediate flips for fast updates that don't
7113 * change FB pitch, DCC state, rotation or mirroing.
7114 */
7115 bundle->flip_addrs[planes_count].flip_immediate =
7116 crtc->state->async_flip &&
7117 acrtc_state->update_type == UPDATE_TYPE_FAST;
7118
7119 timestamp_ns = ktime_get_ns();
7120 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7121 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7122 bundle->surface_updates[planes_count].surface = dc_plane;
7123
7124 if (!bundle->surface_updates[planes_count].surface) {
7125 DRM_ERROR("No surface for CRTC: id=%d\n",
7126 acrtc_attach->crtc_id);
7127 continue;
7128 }
7129
7130 if (plane == pcrtc->primary)
7131 update_freesync_state_on_stream(
7132 dm,
7133 acrtc_state,
7134 acrtc_state->stream,
7135 dc_plane,
7136 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7137
7138 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7139 __func__,
7140 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7141 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7142
7143 planes_count += 1;
7144
7145 }
7146
7147 if (pflip_present) {
7148 if (!vrr_active) {
7149 /* Use old throttling in non-vrr fixed refresh rate mode
7150 * to keep flip scheduling based on target vblank counts
7151 * working in a backwards compatible way, e.g., for
7152 * clients using the GLX_OML_sync_control extension or
7153 * DRI3/Present extension with defined target_msc.
7154 */
7155 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7156 }
7157 else {
7158 /* For variable refresh rate mode only:
7159 * Get vblank of last completed flip to avoid > 1 vrr
7160 * flips per video frame by use of throttling, but allow
7161 * flip programming anywhere in the possibly large
7162 * variable vrr vblank interval for fine-grained flip
7163 * timing control and more opportunity to avoid stutter
7164 * on late submission of flips.
7165 */
7166 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7167 last_flip_vblank = acrtc_attach->last_flip_vblank;
7168 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7169 }
7170
7171 target_vblank = last_flip_vblank + wait_for_vblank;
7172
7173 /*
7174 * Wait until we're out of the vertical blank period before the one
7175 * targeted by the flip
7176 */
7177 while ((acrtc_attach->enabled &&
7178 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7179 0, &vpos, &hpos, NULL,
7180 NULL, &pcrtc->hwmode)
7181 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7182 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7183 (int)(target_vblank -
7184 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7185 usleep_range(1000, 1100);
7186 }
7187
7188 /**
7189 * Prepare the flip event for the pageflip interrupt to handle.
7190 *
7191 * This only works in the case where we've already turned on the
7192 * appropriate hardware blocks (eg. HUBP) so in the transition case
7193 * from 0 -> n planes we have to skip a hardware generated event
7194 * and rely on sending it from software.
7195 */
7196 if (acrtc_attach->base.state->event &&
7197 acrtc_state->active_planes > 0) {
7198 drm_crtc_vblank_get(pcrtc);
7199
7200 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7201
7202 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7203 prepare_flip_isr(acrtc_attach);
7204
7205 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7206 }
7207
7208 if (acrtc_state->stream) {
7209 if (acrtc_state->freesync_vrr_info_changed)
7210 bundle->stream_update.vrr_infopacket =
7211 &acrtc_state->stream->vrr_infopacket;
7212 }
7213 }
7214
7215 /* Update the planes if changed or disable if we don't have any. */
7216 if ((planes_count || acrtc_state->active_planes == 0) &&
7217 acrtc_state->stream) {
7218 bundle->stream_update.stream = acrtc_state->stream;
7219 if (new_pcrtc_state->mode_changed) {
7220 bundle->stream_update.src = acrtc_state->stream->src;
7221 bundle->stream_update.dst = acrtc_state->stream->dst;
7222 }
7223
7224 if (new_pcrtc_state->color_mgmt_changed) {
7225 /*
7226 * TODO: This isn't fully correct since we've actually
7227 * already modified the stream in place.
7228 */
7229 bundle->stream_update.gamut_remap =
7230 &acrtc_state->stream->gamut_remap_matrix;
7231 bundle->stream_update.output_csc_transform =
7232 &acrtc_state->stream->csc_color_matrix;
7233 bundle->stream_update.out_transfer_func =
7234 acrtc_state->stream->out_transfer_func;
7235 }
7236
7237 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7238 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7239 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7240
7241 /*
7242 * If FreeSync state on the stream has changed then we need to
7243 * re-adjust the min/max bounds now that DC doesn't handle this
7244 * as part of commit.
7245 */
7246 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7247 amdgpu_dm_vrr_active(acrtc_state)) {
7248 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7249 dc_stream_adjust_vmin_vmax(
7250 dm->dc, acrtc_state->stream,
7251 &acrtc_state->vrr_params.adjust);
7252 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7253 }
7254 mutex_lock(&dm->dc_lock);
7255 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7256 acrtc_state->stream->link->psr_settings.psr_allow_active)
7257 amdgpu_dm_psr_disable(acrtc_state->stream);
7258
7259 dc_commit_updates_for_stream(dm->dc,
7260 bundle->surface_updates,
7261 planes_count,
7262 acrtc_state->stream,
7263 &bundle->stream_update,
7264 dc_state);
7265
7266 /**
7267 * Enable or disable the interrupts on the backend.
7268 *
7269 * Most pipes are put into power gating when unused.
7270 *
7271 * When power gating is enabled on a pipe we lose the
7272 * interrupt enablement state when power gating is disabled.
7273 *
7274 * So we need to update the IRQ control state in hardware
7275 * whenever the pipe turns on (since it could be previously
7276 * power gated) or off (since some pipes can't be power gated
7277 * on some ASICs).
7278 */
7279 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7280 dm_update_pflip_irq_state(
7281 (struct amdgpu_device *)dev->dev_private,
7282 acrtc_attach);
7283
7284 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7285 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7286 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7287 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7288 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7289 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7290 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7291 amdgpu_dm_psr_enable(acrtc_state->stream);
7292 }
7293
7294 mutex_unlock(&dm->dc_lock);
7295 }
7296
7297 /*
7298 * Update cursor state *after* programming all the planes.
7299 * This avoids redundant programming in the case where we're going
7300 * to be disabling a single plane - those pipes are being disabled.
7301 */
7302 if (acrtc_state->active_planes)
7303 amdgpu_dm_commit_cursors(state);
7304
7305 cleanup:
7306 kfree(bundle);
7307 }
7308
7309 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7310 struct drm_atomic_state *state)
7311 {
7312 struct amdgpu_device *adev = dev->dev_private;
7313 struct amdgpu_dm_connector *aconnector;
7314 struct drm_connector *connector;
7315 struct drm_connector_state *old_con_state, *new_con_state;
7316 struct drm_crtc_state *new_crtc_state;
7317 struct dm_crtc_state *new_dm_crtc_state;
7318 const struct dc_stream_status *status;
7319 int i, inst;
7320
7321 /* Notify device removals. */
7322 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7323 if (old_con_state->crtc != new_con_state->crtc) {
7324 /* CRTC changes require notification. */
7325 goto notify;
7326 }
7327
7328 if (!new_con_state->crtc)
7329 continue;
7330
7331 new_crtc_state = drm_atomic_get_new_crtc_state(
7332 state, new_con_state->crtc);
7333
7334 if (!new_crtc_state)
7335 continue;
7336
7337 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7338 continue;
7339
7340 notify:
7341 aconnector = to_amdgpu_dm_connector(connector);
7342
7343 mutex_lock(&adev->dm.audio_lock);
7344 inst = aconnector->audio_inst;
7345 aconnector->audio_inst = -1;
7346 mutex_unlock(&adev->dm.audio_lock);
7347
7348 amdgpu_dm_audio_eld_notify(adev, inst);
7349 }
7350
7351 /* Notify audio device additions. */
7352 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7353 if (!new_con_state->crtc)
7354 continue;
7355
7356 new_crtc_state = drm_atomic_get_new_crtc_state(
7357 state, new_con_state->crtc);
7358
7359 if (!new_crtc_state)
7360 continue;
7361
7362 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7363 continue;
7364
7365 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7366 if (!new_dm_crtc_state->stream)
7367 continue;
7368
7369 status = dc_stream_get_status(new_dm_crtc_state->stream);
7370 if (!status)
7371 continue;
7372
7373 aconnector = to_amdgpu_dm_connector(connector);
7374
7375 mutex_lock(&adev->dm.audio_lock);
7376 inst = status->audio_inst;
7377 aconnector->audio_inst = inst;
7378 mutex_unlock(&adev->dm.audio_lock);
7379
7380 amdgpu_dm_audio_eld_notify(adev, inst);
7381 }
7382 }
7383
7384 /*
7385 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7386 * @crtc_state: the DRM CRTC state
7387 * @stream_state: the DC stream state.
7388 *
7389 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7390 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7391 */
7392 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7393 struct dc_stream_state *stream_state)
7394 {
7395 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7396 }
7397
7398 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7399 struct drm_atomic_state *state,
7400 bool nonblock)
7401 {
7402 struct drm_crtc *crtc;
7403 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7404 struct amdgpu_device *adev = dev->dev_private;
7405 int i;
7406
7407 /*
7408 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7409 * a modeset, being disabled, or have no active planes.
7410 *
7411 * It's done in atomic commit rather than commit tail for now since
7412 * some of these interrupt handlers access the current CRTC state and
7413 * potentially the stream pointer itself.
7414 *
7415 * Since the atomic state is swapped within atomic commit and not within
7416 * commit tail this would leave to new state (that hasn't been committed yet)
7417 * being accesssed from within the handlers.
7418 *
7419 * TODO: Fix this so we can do this in commit tail and not have to block
7420 * in atomic check.
7421 */
7422 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7423 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7424
7425 if (old_crtc_state->active &&
7426 (!new_crtc_state->active ||
7427 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7428 manage_dm_interrupts(adev, acrtc, false);
7429 }
7430 /*
7431 * Add check here for SoC's that support hardware cursor plane, to
7432 * unset legacy_cursor_update
7433 */
7434
7435 return drm_atomic_helper_commit(dev, state, nonblock);
7436
7437 /*TODO Handle EINTR, reenable IRQ*/
7438 }
7439
7440 /**
7441 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7442 * @state: The atomic state to commit
7443 *
7444 * This will tell DC to commit the constructed DC state from atomic_check,
7445 * programming the hardware. Any failures here implies a hardware failure, since
7446 * atomic check should have filtered anything non-kosher.
7447 */
7448 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7449 {
7450 struct drm_device *dev = state->dev;
7451 struct amdgpu_device *adev = dev->dev_private;
7452 struct amdgpu_display_manager *dm = &adev->dm;
7453 struct dm_atomic_state *dm_state;
7454 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7455 uint32_t i, j;
7456 struct drm_crtc *crtc;
7457 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7458 unsigned long flags;
7459 bool wait_for_vblank = true;
7460 struct drm_connector *connector;
7461 struct drm_connector_state *old_con_state, *new_con_state;
7462 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7463 int crtc_disable_count = 0;
7464
7465 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7466
7467 dm_state = dm_atomic_get_new_state(state);
7468 if (dm_state && dm_state->context) {
7469 dc_state = dm_state->context;
7470 } else {
7471 /* No state changes, retain current state. */
7472 dc_state_temp = dc_create_state(dm->dc);
7473 ASSERT(dc_state_temp);
7474 dc_state = dc_state_temp;
7475 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7476 }
7477
7478 /* update changed items */
7479 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7480 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7481
7482 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7483 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7484
7485 DRM_DEBUG_DRIVER(
7486 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7487 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7488 "connectors_changed:%d\n",
7489 acrtc->crtc_id,
7490 new_crtc_state->enable,
7491 new_crtc_state->active,
7492 new_crtc_state->planes_changed,
7493 new_crtc_state->mode_changed,
7494 new_crtc_state->active_changed,
7495 new_crtc_state->connectors_changed);
7496
7497 /* Copy all transient state flags into dc state */
7498 if (dm_new_crtc_state->stream) {
7499 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7500 dm_new_crtc_state->stream);
7501 }
7502
7503 /* handles headless hotplug case, updating new_state and
7504 * aconnector as needed
7505 */
7506
7507 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7508
7509 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7510
7511 if (!dm_new_crtc_state->stream) {
7512 /*
7513 * this could happen because of issues with
7514 * userspace notifications delivery.
7515 * In this case userspace tries to set mode on
7516 * display which is disconnected in fact.
7517 * dc_sink is NULL in this case on aconnector.
7518 * We expect reset mode will come soon.
7519 *
7520 * This can also happen when unplug is done
7521 * during resume sequence ended
7522 *
7523 * In this case, we want to pretend we still
7524 * have a sink to keep the pipe running so that
7525 * hw state is consistent with the sw state
7526 */
7527 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7528 __func__, acrtc->base.base.id);
7529 continue;
7530 }
7531
7532 if (dm_old_crtc_state->stream)
7533 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7534
7535 pm_runtime_get_noresume(dev->dev);
7536
7537 acrtc->enabled = true;
7538 acrtc->hw_mode = new_crtc_state->mode;
7539 crtc->hwmode = new_crtc_state->mode;
7540 } else if (modereset_required(new_crtc_state)) {
7541 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7542 /* i.e. reset mode */
7543 if (dm_old_crtc_state->stream) {
7544 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7545 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7546
7547 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7548 }
7549 }
7550 } /* for_each_crtc_in_state() */
7551
7552 if (dc_state) {
7553 dm_enable_per_frame_crtc_master_sync(dc_state);
7554 mutex_lock(&dm->dc_lock);
7555 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7556 mutex_unlock(&dm->dc_lock);
7557 }
7558
7559 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7560 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7561
7562 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7563
7564 if (dm_new_crtc_state->stream != NULL) {
7565 const struct dc_stream_status *status =
7566 dc_stream_get_status(dm_new_crtc_state->stream);
7567
7568 if (!status)
7569 status = dc_stream_get_status_from_state(dc_state,
7570 dm_new_crtc_state->stream);
7571
7572 if (!status)
7573 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7574 else
7575 acrtc->otg_inst = status->primary_otg_inst;
7576 }
7577 }
7578 #ifdef CONFIG_DRM_AMD_DC_HDCP
7579 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7580 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7581 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7582 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7583
7584 new_crtc_state = NULL;
7585
7586 if (acrtc)
7587 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7588
7589 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7590
7591 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7592 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7593 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7594 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7595 continue;
7596 }
7597
7598 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7599 hdcp_update_display(
7600 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7601 new_con_state->hdcp_content_type,
7602 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7603 : false);
7604 }
7605 #endif
7606
7607 /* Handle connector state changes */
7608 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7609 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7610 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7611 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7612 struct dc_surface_update dummy_updates[MAX_SURFACES];
7613 struct dc_stream_update stream_update;
7614 struct dc_info_packet hdr_packet;
7615 struct dc_stream_status *status = NULL;
7616 bool abm_changed, hdr_changed, scaling_changed;
7617
7618 memset(&dummy_updates, 0, sizeof(dummy_updates));
7619 memset(&stream_update, 0, sizeof(stream_update));
7620
7621 if (acrtc) {
7622 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7623 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7624 }
7625
7626 /* Skip any modesets/resets */
7627 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7628 continue;
7629
7630 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7631 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7632
7633 scaling_changed = is_scaling_state_different(dm_new_con_state,
7634 dm_old_con_state);
7635
7636 abm_changed = dm_new_crtc_state->abm_level !=
7637 dm_old_crtc_state->abm_level;
7638
7639 hdr_changed =
7640 is_hdr_metadata_different(old_con_state, new_con_state);
7641
7642 if (!scaling_changed && !abm_changed && !hdr_changed)
7643 continue;
7644
7645 stream_update.stream = dm_new_crtc_state->stream;
7646 if (scaling_changed) {
7647 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7648 dm_new_con_state, dm_new_crtc_state->stream);
7649
7650 stream_update.src = dm_new_crtc_state->stream->src;
7651 stream_update.dst = dm_new_crtc_state->stream->dst;
7652 }
7653
7654 if (abm_changed) {
7655 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7656
7657 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7658 }
7659
7660 if (hdr_changed) {
7661 fill_hdr_info_packet(new_con_state, &hdr_packet);
7662 stream_update.hdr_static_metadata = &hdr_packet;
7663 }
7664
7665 status = dc_stream_get_status(dm_new_crtc_state->stream);
7666 WARN_ON(!status);
7667 WARN_ON(!status->plane_count);
7668
7669 /*
7670 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7671 * Here we create an empty update on each plane.
7672 * To fix this, DC should permit updating only stream properties.
7673 */
7674 for (j = 0; j < status->plane_count; j++)
7675 dummy_updates[j].surface = status->plane_states[0];
7676
7677
7678 mutex_lock(&dm->dc_lock);
7679 dc_commit_updates_for_stream(dm->dc,
7680 dummy_updates,
7681 status->plane_count,
7682 dm_new_crtc_state->stream,
7683 &stream_update,
7684 dc_state);
7685 mutex_unlock(&dm->dc_lock);
7686 }
7687
7688 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7689 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7690 new_crtc_state, i) {
7691 if (old_crtc_state->active && !new_crtc_state->active)
7692 crtc_disable_count++;
7693
7694 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7695 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7696
7697 /* Update freesync active state. */
7698 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7699
7700 /* Handle vrr on->off / off->on transitions */
7701 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7702 dm_new_crtc_state);
7703 }
7704
7705 /**
7706 * Enable interrupts for CRTCs that are newly enabled or went through
7707 * a modeset. It was intentionally deferred until after the front end
7708 * state was modified to wait until the OTG was on and so the IRQ
7709 * handlers didn't access stale or invalid state.
7710 */
7711 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7712 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7713
7714 if (new_crtc_state->active &&
7715 (!old_crtc_state->active ||
7716 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7717 manage_dm_interrupts(adev, acrtc, true);
7718 #ifdef CONFIG_DEBUG_FS
7719 /**
7720 * Frontend may have changed so reapply the CRC capture
7721 * settings for the stream.
7722 */
7723 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7724
7725 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7726 amdgpu_dm_crtc_configure_crc_source(
7727 crtc, dm_new_crtc_state,
7728 dm_new_crtc_state->crc_src);
7729 }
7730 #endif
7731 }
7732 }
7733
7734 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7735 if (new_crtc_state->async_flip)
7736 wait_for_vblank = false;
7737
7738 /* update planes when needed per crtc*/
7739 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7740 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7741
7742 if (dm_new_crtc_state->stream)
7743 amdgpu_dm_commit_planes(state, dc_state, dev,
7744 dm, crtc, wait_for_vblank);
7745 }
7746
7747 /* Update audio instances for each connector. */
7748 amdgpu_dm_commit_audio(dev, state);
7749
7750 /*
7751 * send vblank event on all events not handled in flip and
7752 * mark consumed event for drm_atomic_helper_commit_hw_done
7753 */
7754 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7755 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7756
7757 if (new_crtc_state->event)
7758 drm_send_event_locked(dev, &new_crtc_state->event->base);
7759
7760 new_crtc_state->event = NULL;
7761 }
7762 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7763
7764 /* Signal HW programming completion */
7765 drm_atomic_helper_commit_hw_done(state);
7766
7767 if (wait_for_vblank)
7768 drm_atomic_helper_wait_for_flip_done(dev, state);
7769
7770 drm_atomic_helper_cleanup_planes(dev, state);
7771
7772 /*
7773 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7774 * so we can put the GPU into runtime suspend if we're not driving any
7775 * displays anymore
7776 */
7777 for (i = 0; i < crtc_disable_count; i++)
7778 pm_runtime_put_autosuspend(dev->dev);
7779 pm_runtime_mark_last_busy(dev->dev);
7780
7781 if (dc_state_temp)
7782 dc_release_state(dc_state_temp);
7783 }
7784
7785
7786 static int dm_force_atomic_commit(struct drm_connector *connector)
7787 {
7788 int ret = 0;
7789 struct drm_device *ddev = connector->dev;
7790 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7791 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7792 struct drm_plane *plane = disconnected_acrtc->base.primary;
7793 struct drm_connector_state *conn_state;
7794 struct drm_crtc_state *crtc_state;
7795 struct drm_plane_state *plane_state;
7796
7797 if (!state)
7798 return -ENOMEM;
7799
7800 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7801
7802 /* Construct an atomic state to restore previous display setting */
7803
7804 /*
7805 * Attach connectors to drm_atomic_state
7806 */
7807 conn_state = drm_atomic_get_connector_state(state, connector);
7808
7809 ret = PTR_ERR_OR_ZERO(conn_state);
7810 if (ret)
7811 goto err;
7812
7813 /* Attach crtc to drm_atomic_state*/
7814 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7815
7816 ret = PTR_ERR_OR_ZERO(crtc_state);
7817 if (ret)
7818 goto err;
7819
7820 /* force a restore */
7821 crtc_state->mode_changed = true;
7822
7823 /* Attach plane to drm_atomic_state */
7824 plane_state = drm_atomic_get_plane_state(state, plane);
7825
7826 ret = PTR_ERR_OR_ZERO(plane_state);
7827 if (ret)
7828 goto err;
7829
7830
7831 /* Call commit internally with the state we just constructed */
7832 ret = drm_atomic_commit(state);
7833 if (!ret)
7834 return 0;
7835
7836 err:
7837 DRM_ERROR("Restoring old state failed with %i\n", ret);
7838 drm_atomic_state_put(state);
7839
7840 return ret;
7841 }
7842
7843 /*
7844 * This function handles all cases when set mode does not come upon hotplug.
7845 * This includes when a display is unplugged then plugged back into the
7846 * same port and when running without usermode desktop manager supprot
7847 */
7848 void dm_restore_drm_connector_state(struct drm_device *dev,
7849 struct drm_connector *connector)
7850 {
7851 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7852 struct amdgpu_crtc *disconnected_acrtc;
7853 struct dm_crtc_state *acrtc_state;
7854
7855 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7856 return;
7857
7858 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7859 if (!disconnected_acrtc)
7860 return;
7861
7862 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7863 if (!acrtc_state->stream)
7864 return;
7865
7866 /*
7867 * If the previous sink is not released and different from the current,
7868 * we deduce we are in a state where we can not rely on usermode call
7869 * to turn on the display, so we do it here
7870 */
7871 if (acrtc_state->stream->sink != aconnector->dc_sink)
7872 dm_force_atomic_commit(&aconnector->base);
7873 }
7874
7875 /*
7876 * Grabs all modesetting locks to serialize against any blocking commits,
7877 * Waits for completion of all non blocking commits.
7878 */
7879 static int do_aquire_global_lock(struct drm_device *dev,
7880 struct drm_atomic_state *state)
7881 {
7882 struct drm_crtc *crtc;
7883 struct drm_crtc_commit *commit;
7884 long ret;
7885
7886 /*
7887 * Adding all modeset locks to aquire_ctx will
7888 * ensure that when the framework release it the
7889 * extra locks we are locking here will get released to
7890 */
7891 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7892 if (ret)
7893 return ret;
7894
7895 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7896 spin_lock(&crtc->commit_lock);
7897 commit = list_first_entry_or_null(&crtc->commit_list,
7898 struct drm_crtc_commit, commit_entry);
7899 if (commit)
7900 drm_crtc_commit_get(commit);
7901 spin_unlock(&crtc->commit_lock);
7902
7903 if (!commit)
7904 continue;
7905
7906 /*
7907 * Make sure all pending HW programming completed and
7908 * page flips done
7909 */
7910 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7911
7912 if (ret > 0)
7913 ret = wait_for_completion_interruptible_timeout(
7914 &commit->flip_done, 10*HZ);
7915
7916 if (ret == 0)
7917 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7918 "timed out\n", crtc->base.id, crtc->name);
7919
7920 drm_crtc_commit_put(commit);
7921 }
7922
7923 return ret < 0 ? ret : 0;
7924 }
7925
7926 static void get_freesync_config_for_crtc(
7927 struct dm_crtc_state *new_crtc_state,
7928 struct dm_connector_state *new_con_state)
7929 {
7930 struct mod_freesync_config config = {0};
7931 struct amdgpu_dm_connector *aconnector =
7932 to_amdgpu_dm_connector(new_con_state->base.connector);
7933 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7934 int vrefresh = drm_mode_vrefresh(mode);
7935
7936 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7937 vrefresh >= aconnector->min_vfreq &&
7938 vrefresh <= aconnector->max_vfreq;
7939
7940 if (new_crtc_state->vrr_supported) {
7941 new_crtc_state->stream->ignore_msa_timing_param = true;
7942 config.state = new_crtc_state->base.vrr_enabled ?
7943 VRR_STATE_ACTIVE_VARIABLE :
7944 VRR_STATE_INACTIVE;
7945 config.min_refresh_in_uhz =
7946 aconnector->min_vfreq * 1000000;
7947 config.max_refresh_in_uhz =
7948 aconnector->max_vfreq * 1000000;
7949 config.vsif_supported = true;
7950 config.btr = true;
7951 }
7952
7953 new_crtc_state->freesync_config = config;
7954 }
7955
7956 static void reset_freesync_config_for_crtc(
7957 struct dm_crtc_state *new_crtc_state)
7958 {
7959 new_crtc_state->vrr_supported = false;
7960
7961 memset(&new_crtc_state->vrr_params, 0,
7962 sizeof(new_crtc_state->vrr_params));
7963 memset(&new_crtc_state->vrr_infopacket, 0,
7964 sizeof(new_crtc_state->vrr_infopacket));
7965 }
7966
7967 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7968 struct drm_atomic_state *state,
7969 struct drm_crtc *crtc,
7970 struct drm_crtc_state *old_crtc_state,
7971 struct drm_crtc_state *new_crtc_state,
7972 bool enable,
7973 bool *lock_and_validation_needed)
7974 {
7975 struct dm_atomic_state *dm_state = NULL;
7976 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7977 struct dc_stream_state *new_stream;
7978 int ret = 0;
7979
7980 /*
7981 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7982 * update changed items
7983 */
7984 struct amdgpu_crtc *acrtc = NULL;
7985 struct amdgpu_dm_connector *aconnector = NULL;
7986 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7987 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7988
7989 new_stream = NULL;
7990
7991 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7992 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7993 acrtc = to_amdgpu_crtc(crtc);
7994 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7995
7996 /* TODO This hack should go away */
7997 if (aconnector && enable) {
7998 /* Make sure fake sink is created in plug-in scenario */
7999 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8000 &aconnector->base);
8001 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8002 &aconnector->base);
8003
8004 if (IS_ERR(drm_new_conn_state)) {
8005 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8006 goto fail;
8007 }
8008
8009 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8010 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8011
8012 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8013 goto skip_modeset;
8014
8015 new_stream = create_validate_stream_for_sink(aconnector,
8016 &new_crtc_state->mode,
8017 dm_new_conn_state,
8018 dm_old_crtc_state->stream);
8019
8020 /*
8021 * we can have no stream on ACTION_SET if a display
8022 * was disconnected during S3, in this case it is not an
8023 * error, the OS will be updated after detection, and
8024 * will do the right thing on next atomic commit
8025 */
8026
8027 if (!new_stream) {
8028 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8029 __func__, acrtc->base.base.id);
8030 ret = -ENOMEM;
8031 goto fail;
8032 }
8033
8034 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8035
8036 ret = fill_hdr_info_packet(drm_new_conn_state,
8037 &new_stream->hdr_static_metadata);
8038 if (ret)
8039 goto fail;
8040
8041 /*
8042 * If we already removed the old stream from the context
8043 * (and set the new stream to NULL) then we can't reuse
8044 * the old stream even if the stream and scaling are unchanged.
8045 * We'll hit the BUG_ON and black screen.
8046 *
8047 * TODO: Refactor this function to allow this check to work
8048 * in all conditions.
8049 */
8050 if (dm_new_crtc_state->stream &&
8051 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8052 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8053 new_crtc_state->mode_changed = false;
8054 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8055 new_crtc_state->mode_changed);
8056 }
8057 }
8058
8059 /* mode_changed flag may get updated above, need to check again */
8060 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8061 goto skip_modeset;
8062
8063 DRM_DEBUG_DRIVER(
8064 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8065 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8066 "connectors_changed:%d\n",
8067 acrtc->crtc_id,
8068 new_crtc_state->enable,
8069 new_crtc_state->active,
8070 new_crtc_state->planes_changed,
8071 new_crtc_state->mode_changed,
8072 new_crtc_state->active_changed,
8073 new_crtc_state->connectors_changed);
8074
8075 /* Remove stream for any changed/disabled CRTC */
8076 if (!enable) {
8077
8078 if (!dm_old_crtc_state->stream)
8079 goto skip_modeset;
8080
8081 ret = dm_atomic_get_state(state, &dm_state);
8082 if (ret)
8083 goto fail;
8084
8085 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8086 crtc->base.id);
8087
8088 /* i.e. reset mode */
8089 if (dc_remove_stream_from_ctx(
8090 dm->dc,
8091 dm_state->context,
8092 dm_old_crtc_state->stream) != DC_OK) {
8093 ret = -EINVAL;
8094 goto fail;
8095 }
8096
8097 dc_stream_release(dm_old_crtc_state->stream);
8098 dm_new_crtc_state->stream = NULL;
8099
8100 reset_freesync_config_for_crtc(dm_new_crtc_state);
8101
8102 *lock_and_validation_needed = true;
8103
8104 } else {/* Add stream for any updated/enabled CRTC */
8105 /*
8106 * Quick fix to prevent NULL pointer on new_stream when
8107 * added MST connectors not found in existing crtc_state in the chained mode
8108 * TODO: need to dig out the root cause of that
8109 */
8110 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8111 goto skip_modeset;
8112
8113 if (modereset_required(new_crtc_state))
8114 goto skip_modeset;
8115
8116 if (modeset_required(new_crtc_state, new_stream,
8117 dm_old_crtc_state->stream)) {
8118
8119 WARN_ON(dm_new_crtc_state->stream);
8120
8121 ret = dm_atomic_get_state(state, &dm_state);
8122 if (ret)
8123 goto fail;
8124
8125 dm_new_crtc_state->stream = new_stream;
8126
8127 dc_stream_retain(new_stream);
8128
8129 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8130 crtc->base.id);
8131
8132 if (dc_add_stream_to_ctx(
8133 dm->dc,
8134 dm_state->context,
8135 dm_new_crtc_state->stream) != DC_OK) {
8136 ret = -EINVAL;
8137 goto fail;
8138 }
8139
8140 *lock_and_validation_needed = true;
8141 }
8142 }
8143
8144 skip_modeset:
8145 /* Release extra reference */
8146 if (new_stream)
8147 dc_stream_release(new_stream);
8148
8149 /*
8150 * We want to do dc stream updates that do not require a
8151 * full modeset below.
8152 */
8153 if (!(enable && aconnector && new_crtc_state->active))
8154 return 0;
8155 /*
8156 * Given above conditions, the dc state cannot be NULL because:
8157 * 1. We're in the process of enabling CRTCs (just been added
8158 * to the dc context, or already is on the context)
8159 * 2. Has a valid connector attached, and
8160 * 3. Is currently active and enabled.
8161 * => The dc stream state currently exists.
8162 */
8163 BUG_ON(dm_new_crtc_state->stream == NULL);
8164
8165 /* Scaling or underscan settings */
8166 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8167 update_stream_scaling_settings(
8168 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8169
8170 /* ABM settings */
8171 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8172
8173 /*
8174 * Color management settings. We also update color properties
8175 * when a modeset is needed, to ensure it gets reprogrammed.
8176 */
8177 if (dm_new_crtc_state->base.color_mgmt_changed ||
8178 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8179 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8180 if (ret)
8181 goto fail;
8182 }
8183
8184 /* Update Freesync settings. */
8185 get_freesync_config_for_crtc(dm_new_crtc_state,
8186 dm_new_conn_state);
8187
8188 return ret;
8189
8190 fail:
8191 if (new_stream)
8192 dc_stream_release(new_stream);
8193 return ret;
8194 }
8195
8196 static bool should_reset_plane(struct drm_atomic_state *state,
8197 struct drm_plane *plane,
8198 struct drm_plane_state *old_plane_state,
8199 struct drm_plane_state *new_plane_state)
8200 {
8201 struct drm_plane *other;
8202 struct drm_plane_state *old_other_state, *new_other_state;
8203 struct drm_crtc_state *new_crtc_state;
8204 int i;
8205
8206 /*
8207 * TODO: Remove this hack once the checks below are sufficient
8208 * enough to determine when we need to reset all the planes on
8209 * the stream.
8210 */
8211 if (state->allow_modeset)
8212 return true;
8213
8214 /* Exit early if we know that we're adding or removing the plane. */
8215 if (old_plane_state->crtc != new_plane_state->crtc)
8216 return true;
8217
8218 /* old crtc == new_crtc == NULL, plane not in context. */
8219 if (!new_plane_state->crtc)
8220 return false;
8221
8222 new_crtc_state =
8223 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8224
8225 if (!new_crtc_state)
8226 return true;
8227
8228 /* CRTC Degamma changes currently require us to recreate planes. */
8229 if (new_crtc_state->color_mgmt_changed)
8230 return true;
8231
8232 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8233 return true;
8234
8235 /*
8236 * If there are any new primary or overlay planes being added or
8237 * removed then the z-order can potentially change. To ensure
8238 * correct z-order and pipe acquisition the current DC architecture
8239 * requires us to remove and recreate all existing planes.
8240 *
8241 * TODO: Come up with a more elegant solution for this.
8242 */
8243 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8244 if (other->type == DRM_PLANE_TYPE_CURSOR)
8245 continue;
8246
8247 if (old_other_state->crtc != new_plane_state->crtc &&
8248 new_other_state->crtc != new_plane_state->crtc)
8249 continue;
8250
8251 if (old_other_state->crtc != new_other_state->crtc)
8252 return true;
8253
8254 /* TODO: Remove this once we can handle fast format changes. */
8255 if (old_other_state->fb && new_other_state->fb &&
8256 old_other_state->fb->format != new_other_state->fb->format)
8257 return true;
8258 }
8259
8260 return false;
8261 }
8262
8263 static int dm_update_plane_state(struct dc *dc,
8264 struct drm_atomic_state *state,
8265 struct drm_plane *plane,
8266 struct drm_plane_state *old_plane_state,
8267 struct drm_plane_state *new_plane_state,
8268 bool enable,
8269 bool *lock_and_validation_needed)
8270 {
8271
8272 struct dm_atomic_state *dm_state = NULL;
8273 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8274 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8275 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8276 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8277 struct amdgpu_crtc *new_acrtc;
8278 bool needs_reset;
8279 int ret = 0;
8280
8281
8282 new_plane_crtc = new_plane_state->crtc;
8283 old_plane_crtc = old_plane_state->crtc;
8284 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8285 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8286
8287 /*TODO Implement better atomic check for cursor plane */
8288 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8289 if (!enable || !new_plane_crtc ||
8290 drm_atomic_plane_disabling(plane->state, new_plane_state))
8291 return 0;
8292
8293 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8294
8295 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8296 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8297 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8298 new_plane_state->crtc_w, new_plane_state->crtc_h);
8299 return -EINVAL;
8300 }
8301
8302 return 0;
8303 }
8304
8305 needs_reset = should_reset_plane(state, plane, old_plane_state,
8306 new_plane_state);
8307
8308 /* Remove any changed/removed planes */
8309 if (!enable) {
8310 if (!needs_reset)
8311 return 0;
8312
8313 if (!old_plane_crtc)
8314 return 0;
8315
8316 old_crtc_state = drm_atomic_get_old_crtc_state(
8317 state, old_plane_crtc);
8318 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8319
8320 if (!dm_old_crtc_state->stream)
8321 return 0;
8322
8323 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8324 plane->base.id, old_plane_crtc->base.id);
8325
8326 ret = dm_atomic_get_state(state, &dm_state);
8327 if (ret)
8328 return ret;
8329
8330 if (!dc_remove_plane_from_context(
8331 dc,
8332 dm_old_crtc_state->stream,
8333 dm_old_plane_state->dc_state,
8334 dm_state->context)) {
8335
8336 ret = EINVAL;
8337 return ret;
8338 }
8339
8340
8341 dc_plane_state_release(dm_old_plane_state->dc_state);
8342 dm_new_plane_state->dc_state = NULL;
8343
8344 *lock_and_validation_needed = true;
8345
8346 } else { /* Add new planes */
8347 struct dc_plane_state *dc_new_plane_state;
8348
8349 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8350 return 0;
8351
8352 if (!new_plane_crtc)
8353 return 0;
8354
8355 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8356 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8357
8358 if (!dm_new_crtc_state->stream)
8359 return 0;
8360
8361 if (!needs_reset)
8362 return 0;
8363
8364 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8365 if (ret)
8366 return ret;
8367
8368 WARN_ON(dm_new_plane_state->dc_state);
8369
8370 dc_new_plane_state = dc_create_plane_state(dc);
8371 if (!dc_new_plane_state)
8372 return -ENOMEM;
8373
8374 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8375 plane->base.id, new_plane_crtc->base.id);
8376
8377 ret = fill_dc_plane_attributes(
8378 new_plane_crtc->dev->dev_private,
8379 dc_new_plane_state,
8380 new_plane_state,
8381 new_crtc_state);
8382 if (ret) {
8383 dc_plane_state_release(dc_new_plane_state);
8384 return ret;
8385 }
8386
8387 ret = dm_atomic_get_state(state, &dm_state);
8388 if (ret) {
8389 dc_plane_state_release(dc_new_plane_state);
8390 return ret;
8391 }
8392
8393 /*
8394 * Any atomic check errors that occur after this will
8395 * not need a release. The plane state will be attached
8396 * to the stream, and therefore part of the atomic
8397 * state. It'll be released when the atomic state is
8398 * cleaned.
8399 */
8400 if (!dc_add_plane_to_context(
8401 dc,
8402 dm_new_crtc_state->stream,
8403 dc_new_plane_state,
8404 dm_state->context)) {
8405
8406 dc_plane_state_release(dc_new_plane_state);
8407 return -EINVAL;
8408 }
8409
8410 dm_new_plane_state->dc_state = dc_new_plane_state;
8411
8412 /* Tell DC to do a full surface update every time there
8413 * is a plane change. Inefficient, but works for now.
8414 */
8415 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8416
8417 *lock_and_validation_needed = true;
8418 }
8419
8420
8421 return ret;
8422 }
8423
8424 static int
8425 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8426 struct drm_atomic_state *state,
8427 enum surface_update_type *out_type)
8428 {
8429 struct dc *dc = dm->dc;
8430 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8431 int i, j, num_plane, ret = 0;
8432 struct drm_plane_state *old_plane_state, *new_plane_state;
8433 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8434 struct drm_crtc *new_plane_crtc;
8435 struct drm_plane *plane;
8436
8437 struct drm_crtc *crtc;
8438 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8439 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8440 struct dc_stream_status *status = NULL;
8441 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8442 struct surface_info_bundle {
8443 struct dc_surface_update surface_updates[MAX_SURFACES];
8444 struct dc_plane_info plane_infos[MAX_SURFACES];
8445 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8446 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8447 struct dc_stream_update stream_update;
8448 } *bundle;
8449
8450 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8451
8452 if (!bundle) {
8453 DRM_ERROR("Failed to allocate update bundle\n");
8454 /* Set type to FULL to avoid crashing in DC*/
8455 update_type = UPDATE_TYPE_FULL;
8456 goto cleanup;
8457 }
8458
8459 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8460
8461 memset(bundle, 0, sizeof(struct surface_info_bundle));
8462
8463 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8464 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8465 num_plane = 0;
8466
8467 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8468 update_type = UPDATE_TYPE_FULL;
8469 goto cleanup;
8470 }
8471
8472 if (!new_dm_crtc_state->stream)
8473 continue;
8474
8475 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8476 const struct amdgpu_framebuffer *amdgpu_fb =
8477 to_amdgpu_framebuffer(new_plane_state->fb);
8478 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8479 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8480 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8481 uint64_t tiling_flags;
8482 bool tmz_surface = false;
8483
8484 new_plane_crtc = new_plane_state->crtc;
8485 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8486 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8487
8488 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8489 continue;
8490
8491 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8492 update_type = UPDATE_TYPE_FULL;
8493 goto cleanup;
8494 }
8495
8496 if (crtc != new_plane_crtc)
8497 continue;
8498
8499 bundle->surface_updates[num_plane].surface =
8500 new_dm_plane_state->dc_state;
8501
8502 if (new_crtc_state->mode_changed) {
8503 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8504 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8505 }
8506
8507 if (new_crtc_state->color_mgmt_changed) {
8508 bundle->surface_updates[num_plane].gamma =
8509 new_dm_plane_state->dc_state->gamma_correction;
8510 bundle->surface_updates[num_plane].in_transfer_func =
8511 new_dm_plane_state->dc_state->in_transfer_func;
8512 bundle->surface_updates[num_plane].gamut_remap_matrix =
8513 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8514 bundle->stream_update.gamut_remap =
8515 &new_dm_crtc_state->stream->gamut_remap_matrix;
8516 bundle->stream_update.output_csc_transform =
8517 &new_dm_crtc_state->stream->csc_color_matrix;
8518 bundle->stream_update.out_transfer_func =
8519 new_dm_crtc_state->stream->out_transfer_func;
8520 }
8521
8522 ret = fill_dc_scaling_info(new_plane_state,
8523 scaling_info);
8524 if (ret)
8525 goto cleanup;
8526
8527 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8528
8529 if (amdgpu_fb) {
8530 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8531 if (ret)
8532 goto cleanup;
8533
8534 ret = fill_dc_plane_info_and_addr(
8535 dm->adev, new_plane_state, tiling_flags,
8536 plane_info,
8537 &flip_addr->address, tmz_surface,
8538 false);
8539 if (ret)
8540 goto cleanup;
8541
8542 bundle->surface_updates[num_plane].plane_info = plane_info;
8543 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8544 }
8545
8546 num_plane++;
8547 }
8548
8549 if (num_plane == 0)
8550 continue;
8551
8552 ret = dm_atomic_get_state(state, &dm_state);
8553 if (ret)
8554 goto cleanup;
8555
8556 old_dm_state = dm_atomic_get_old_state(state);
8557 if (!old_dm_state) {
8558 ret = -EINVAL;
8559 goto cleanup;
8560 }
8561
8562 status = dc_stream_get_status_from_state(old_dm_state->context,
8563 new_dm_crtc_state->stream);
8564 bundle->stream_update.stream = new_dm_crtc_state->stream;
8565 /*
8566 * TODO: DC modifies the surface during this call so we need
8567 * to lock here - find a way to do this without locking.
8568 */
8569 mutex_lock(&dm->dc_lock);
8570 update_type = dc_check_update_surfaces_for_stream(
8571 dc, bundle->surface_updates, num_plane,
8572 &bundle->stream_update, status);
8573 mutex_unlock(&dm->dc_lock);
8574
8575 if (update_type > UPDATE_TYPE_MED) {
8576 update_type = UPDATE_TYPE_FULL;
8577 goto cleanup;
8578 }
8579 }
8580
8581 cleanup:
8582 kfree(bundle);
8583
8584 *out_type = update_type;
8585 return ret;
8586 }
8587 #if defined(CONFIG_DRM_AMD_DC_DCN)
8588 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8589 {
8590 struct drm_connector *connector;
8591 struct drm_connector_state *conn_state;
8592 struct amdgpu_dm_connector *aconnector = NULL;
8593 int i;
8594 for_each_new_connector_in_state(state, connector, conn_state, i) {
8595 if (conn_state->crtc != crtc)
8596 continue;
8597
8598 aconnector = to_amdgpu_dm_connector(connector);
8599 if (!aconnector->port || !aconnector->mst_port)
8600 aconnector = NULL;
8601 else
8602 break;
8603 }
8604
8605 if (!aconnector)
8606 return 0;
8607
8608 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8609 }
8610 #endif
8611
8612 /**
8613 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8614 * @dev: The DRM device
8615 * @state: The atomic state to commit
8616 *
8617 * Validate that the given atomic state is programmable by DC into hardware.
8618 * This involves constructing a &struct dc_state reflecting the new hardware
8619 * state we wish to commit, then querying DC to see if it is programmable. It's
8620 * important not to modify the existing DC state. Otherwise, atomic_check
8621 * may unexpectedly commit hardware changes.
8622 *
8623 * When validating the DC state, it's important that the right locks are
8624 * acquired. For full updates case which removes/adds/updates streams on one
8625 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8626 * that any such full update commit will wait for completion of any outstanding
8627 * flip using DRMs synchronization events. See
8628 * dm_determine_update_type_for_commit()
8629 *
8630 * Note that DM adds the affected connectors for all CRTCs in state, when that
8631 * might not seem necessary. This is because DC stream creation requires the
8632 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8633 * be possible but non-trivial - a possible TODO item.
8634 *
8635 * Return: -Error code if validation failed.
8636 */
8637 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8638 struct drm_atomic_state *state)
8639 {
8640 struct amdgpu_device *adev = dev->dev_private;
8641 struct dm_atomic_state *dm_state = NULL;
8642 struct dc *dc = adev->dm.dc;
8643 struct drm_connector *connector;
8644 struct drm_connector_state *old_con_state, *new_con_state;
8645 struct drm_crtc *crtc;
8646 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8647 struct drm_plane *plane;
8648 struct drm_plane_state *old_plane_state, *new_plane_state;
8649 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8650 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8651 enum dc_status status;
8652 int ret, i;
8653
8654 /*
8655 * This bool will be set for true for any modeset/reset
8656 * or plane update which implies non fast surface update.
8657 */
8658 bool lock_and_validation_needed = false;
8659
8660 ret = drm_atomic_helper_check_modeset(dev, state);
8661 if (ret)
8662 goto fail;
8663
8664 /* Check connector changes */
8665 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8666 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8667 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8668
8669 /* Skip connectors that are disabled or part of modeset already. */
8670 if (!old_con_state->crtc && !new_con_state->crtc)
8671 continue;
8672
8673 if (!new_con_state->crtc)
8674 continue;
8675
8676 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8677 if (IS_ERR(new_crtc_state)) {
8678 ret = PTR_ERR(new_crtc_state);
8679 goto fail;
8680 }
8681
8682 if (dm_old_con_state->abm_level !=
8683 dm_new_con_state->abm_level)
8684 new_crtc_state->connectors_changed = true;
8685 }
8686
8687 #if defined(CONFIG_DRM_AMD_DC_DCN)
8688 if (adev->asic_type >= CHIP_NAVI10) {
8689 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8690 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8691 ret = add_affected_mst_dsc_crtcs(state, crtc);
8692 if (ret)
8693 goto fail;
8694 }
8695 }
8696 }
8697 #endif
8698 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8699 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8700 !new_crtc_state->color_mgmt_changed &&
8701 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8702 continue;
8703
8704 if (!new_crtc_state->enable)
8705 continue;
8706
8707 ret = drm_atomic_add_affected_connectors(state, crtc);
8708 if (ret)
8709 return ret;
8710
8711 ret = drm_atomic_add_affected_planes(state, crtc);
8712 if (ret)
8713 goto fail;
8714 }
8715
8716 /*
8717 * Add all primary and overlay planes on the CRTC to the state
8718 * whenever a plane is enabled to maintain correct z-ordering
8719 * and to enable fast surface updates.
8720 */
8721 drm_for_each_crtc(crtc, dev) {
8722 bool modified = false;
8723
8724 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8725 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8726 continue;
8727
8728 if (new_plane_state->crtc == crtc ||
8729 old_plane_state->crtc == crtc) {
8730 modified = true;
8731 break;
8732 }
8733 }
8734
8735 if (!modified)
8736 continue;
8737
8738 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8739 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8740 continue;
8741
8742 new_plane_state =
8743 drm_atomic_get_plane_state(state, plane);
8744
8745 if (IS_ERR(new_plane_state)) {
8746 ret = PTR_ERR(new_plane_state);
8747 goto fail;
8748 }
8749 }
8750 }
8751
8752 /* Remove exiting planes if they are modified */
8753 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8754 ret = dm_update_plane_state(dc, state, plane,
8755 old_plane_state,
8756 new_plane_state,
8757 false,
8758 &lock_and_validation_needed);
8759 if (ret)
8760 goto fail;
8761 }
8762
8763 /* Disable all crtcs which require disable */
8764 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8765 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8766 old_crtc_state,
8767 new_crtc_state,
8768 false,
8769 &lock_and_validation_needed);
8770 if (ret)
8771 goto fail;
8772 }
8773
8774 /* Enable all crtcs which require enable */
8775 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8776 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8777 old_crtc_state,
8778 new_crtc_state,
8779 true,
8780 &lock_and_validation_needed);
8781 if (ret)
8782 goto fail;
8783 }
8784
8785 /* Add new/modified planes */
8786 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8787 ret = dm_update_plane_state(dc, state, plane,
8788 old_plane_state,
8789 new_plane_state,
8790 true,
8791 &lock_and_validation_needed);
8792 if (ret)
8793 goto fail;
8794 }
8795
8796 /* Run this here since we want to validate the streams we created */
8797 ret = drm_atomic_helper_check_planes(dev, state);
8798 if (ret)
8799 goto fail;
8800
8801 if (state->legacy_cursor_update) {
8802 /*
8803 * This is a fast cursor update coming from the plane update
8804 * helper, check if it can be done asynchronously for better
8805 * performance.
8806 */
8807 state->async_update =
8808 !drm_atomic_helper_async_check(dev, state);
8809
8810 /*
8811 * Skip the remaining global validation if this is an async
8812 * update. Cursor updates can be done without affecting
8813 * state or bandwidth calcs and this avoids the performance
8814 * penalty of locking the private state object and
8815 * allocating a new dc_state.
8816 */
8817 if (state->async_update)
8818 return 0;
8819 }
8820
8821 /* Check scaling and underscan changes*/
8822 /* TODO Removed scaling changes validation due to inability to commit
8823 * new stream into context w\o causing full reset. Need to
8824 * decide how to handle.
8825 */
8826 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8827 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8828 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8829 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8830
8831 /* Skip any modesets/resets */
8832 if (!acrtc || drm_atomic_crtc_needs_modeset(
8833 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8834 continue;
8835
8836 /* Skip any thing not scale or underscan changes */
8837 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8838 continue;
8839
8840 overall_update_type = UPDATE_TYPE_FULL;
8841 lock_and_validation_needed = true;
8842 }
8843
8844 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8845 if (ret)
8846 goto fail;
8847
8848 if (overall_update_type < update_type)
8849 overall_update_type = update_type;
8850
8851 /*
8852 * lock_and_validation_needed was an old way to determine if we need to set
8853 * the global lock. Leaving it in to check if we broke any corner cases
8854 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8855 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8856 */
8857 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8858 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8859
8860 if (overall_update_type > UPDATE_TYPE_FAST) {
8861 ret = dm_atomic_get_state(state, &dm_state);
8862 if (ret)
8863 goto fail;
8864
8865 ret = do_aquire_global_lock(dev, state);
8866 if (ret)
8867 goto fail;
8868
8869 #if defined(CONFIG_DRM_AMD_DC_DCN)
8870 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8871 goto fail;
8872
8873 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8874 if (ret)
8875 goto fail;
8876 #endif
8877
8878 /*
8879 * Perform validation of MST topology in the state:
8880 * We need to perform MST atomic check before calling
8881 * dc_validate_global_state(), or there is a chance
8882 * to get stuck in an infinite loop and hang eventually.
8883 */
8884 ret = drm_dp_mst_atomic_check(state);
8885 if (ret)
8886 goto fail;
8887 status = dc_validate_global_state(dc, dm_state->context, false);
8888 if (status != DC_OK) {
8889 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8890 dc_status_to_str(status), status);
8891 ret = -EINVAL;
8892 goto fail;
8893 }
8894 } else {
8895 /*
8896 * The commit is a fast update. Fast updates shouldn't change
8897 * the DC context, affect global validation, and can have their
8898 * commit work done in parallel with other commits not touching
8899 * the same resource. If we have a new DC context as part of
8900 * the DM atomic state from validation we need to free it and
8901 * retain the existing one instead.
8902 *
8903 * Furthermore, since the DM atomic state only contains the DC
8904 * context and can safely be annulled, we can free the state
8905 * and clear the associated private object now to free
8906 * some memory and avoid a possible use-after-free later.
8907 */
8908
8909 for (i = 0; i < state->num_private_objs; i++) {
8910 struct drm_private_obj *obj = state->private_objs[i].ptr;
8911
8912 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8913 int j = state->num_private_objs-1;
8914
8915 dm_atomic_destroy_state(obj,
8916 state->private_objs[i].state);
8917
8918 /* If i is not at the end of the array then the
8919 * last element needs to be moved to where i was
8920 * before the array can safely be truncated.
8921 */
8922 if (i != j)
8923 state->private_objs[i] =
8924 state->private_objs[j];
8925
8926 state->private_objs[j].ptr = NULL;
8927 state->private_objs[j].state = NULL;
8928 state->private_objs[j].old_state = NULL;
8929 state->private_objs[j].new_state = NULL;
8930
8931 state->num_private_objs = j;
8932 break;
8933 }
8934 }
8935 }
8936
8937 /* Store the overall update type for use later in atomic check. */
8938 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8939 struct dm_crtc_state *dm_new_crtc_state =
8940 to_dm_crtc_state(new_crtc_state);
8941
8942 dm_new_crtc_state->update_type = (int)overall_update_type;
8943 }
8944
8945 /* Must be success */
8946 WARN_ON(ret);
8947 return ret;
8948
8949 fail:
8950 if (ret == -EDEADLK)
8951 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8952 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8953 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8954 else
8955 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8956
8957 return ret;
8958 }
8959
8960 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8961 struct amdgpu_dm_connector *amdgpu_dm_connector)
8962 {
8963 uint8_t dpcd_data;
8964 bool capable = false;
8965
8966 if (amdgpu_dm_connector->dc_link &&
8967 dm_helpers_dp_read_dpcd(
8968 NULL,
8969 amdgpu_dm_connector->dc_link,
8970 DP_DOWN_STREAM_PORT_COUNT,
8971 &dpcd_data,
8972 sizeof(dpcd_data))) {
8973 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8974 }
8975
8976 return capable;
8977 }
8978 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8979 struct edid *edid)
8980 {
8981 int i;
8982 bool edid_check_required;
8983 struct detailed_timing *timing;
8984 struct detailed_non_pixel *data;
8985 struct detailed_data_monitor_range *range;
8986 struct amdgpu_dm_connector *amdgpu_dm_connector =
8987 to_amdgpu_dm_connector(connector);
8988 struct dm_connector_state *dm_con_state = NULL;
8989
8990 struct drm_device *dev = connector->dev;
8991 struct amdgpu_device *adev = dev->dev_private;
8992 bool freesync_capable = false;
8993
8994 if (!connector->state) {
8995 DRM_ERROR("%s - Connector has no state", __func__);
8996 goto update;
8997 }
8998
8999 if (!edid) {
9000 dm_con_state = to_dm_connector_state(connector->state);
9001
9002 amdgpu_dm_connector->min_vfreq = 0;
9003 amdgpu_dm_connector->max_vfreq = 0;
9004 amdgpu_dm_connector->pixel_clock_mhz = 0;
9005
9006 goto update;
9007 }
9008
9009 dm_con_state = to_dm_connector_state(connector->state);
9010
9011 edid_check_required = false;
9012 if (!amdgpu_dm_connector->dc_sink) {
9013 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9014 goto update;
9015 }
9016 if (!adev->dm.freesync_module)
9017 goto update;
9018 /*
9019 * if edid non zero restrict freesync only for dp and edp
9020 */
9021 if (edid) {
9022 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9023 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9024 edid_check_required = is_dp_capable_without_timing_msa(
9025 adev->dm.dc,
9026 amdgpu_dm_connector);
9027 }
9028 }
9029 if (edid_check_required == true && (edid->version > 1 ||
9030 (edid->version == 1 && edid->revision > 1))) {
9031 for (i = 0; i < 4; i++) {
9032
9033 timing = &edid->detailed_timings[i];
9034 data = &timing->data.other_data;
9035 range = &data->data.range;
9036 /*
9037 * Check if monitor has continuous frequency mode
9038 */
9039 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9040 continue;
9041 /*
9042 * Check for flag range limits only. If flag == 1 then
9043 * no additional timing information provided.
9044 * Default GTF, GTF Secondary curve and CVT are not
9045 * supported
9046 */
9047 if (range->flags != 1)
9048 continue;
9049
9050 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9051 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9052 amdgpu_dm_connector->pixel_clock_mhz =
9053 range->pixel_clock_mhz * 10;
9054 break;
9055 }
9056
9057 if (amdgpu_dm_connector->max_vfreq -
9058 amdgpu_dm_connector->min_vfreq > 10) {
9059
9060 freesync_capable = true;
9061 }
9062 }
9063
9064 update:
9065 if (dm_con_state)
9066 dm_con_state->freesync_capable = freesync_capable;
9067
9068 if (connector->vrr_capable_property)
9069 drm_connector_set_vrr_capable_property(connector,
9070 freesync_capable);
9071 }
9072
9073 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9074 {
9075 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9076
9077 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9078 return;
9079 if (link->type == dc_connection_none)
9080 return;
9081 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9082 dpcd_data, sizeof(dpcd_data))) {
9083 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9084
9085 if (dpcd_data[0] == 0) {
9086 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9087 link->psr_settings.psr_feature_enabled = false;
9088 } else {
9089 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9090 link->psr_settings.psr_feature_enabled = true;
9091 }
9092
9093 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9094 }
9095 }
9096
9097 /*
9098 * amdgpu_dm_link_setup_psr() - configure psr link
9099 * @stream: stream state
9100 *
9101 * Return: true if success
9102 */
9103 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9104 {
9105 struct dc_link *link = NULL;
9106 struct psr_config psr_config = {0};
9107 struct psr_context psr_context = {0};
9108 bool ret = false;
9109
9110 if (stream == NULL)
9111 return false;
9112
9113 link = stream->link;
9114
9115 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9116
9117 if (psr_config.psr_version > 0) {
9118 psr_config.psr_exit_link_training_required = 0x1;
9119 psr_config.psr_frame_capture_indication_req = 0;
9120 psr_config.psr_rfb_setup_time = 0x37;
9121 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9122 psr_config.allow_smu_optimizations = 0x0;
9123
9124 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9125
9126 }
9127 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9128
9129 return ret;
9130 }
9131
9132 /*
9133 * amdgpu_dm_psr_enable() - enable psr f/w
9134 * @stream: stream state
9135 *
9136 * Return: true if success
9137 */
9138 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9139 {
9140 struct dc_link *link = stream->link;
9141 unsigned int vsync_rate_hz = 0;
9142 struct dc_static_screen_params params = {0};
9143 /* Calculate number of static frames before generating interrupt to
9144 * enter PSR.
9145 */
9146 // Init fail safe of 2 frames static
9147 unsigned int num_frames_static = 2;
9148
9149 DRM_DEBUG_DRIVER("Enabling psr...\n");
9150
9151 vsync_rate_hz = div64_u64(div64_u64((
9152 stream->timing.pix_clk_100hz * 100),
9153 stream->timing.v_total),
9154 stream->timing.h_total);
9155
9156 /* Round up
9157 * Calculate number of frames such that at least 30 ms of time has
9158 * passed.
9159 */
9160 if (vsync_rate_hz != 0) {
9161 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9162 num_frames_static = (30000 / frame_time_microsec) + 1;
9163 }
9164
9165 params.triggers.cursor_update = true;
9166 params.triggers.overlay_update = true;
9167 params.triggers.surface_update = true;
9168 params.num_frames = num_frames_static;
9169
9170 dc_stream_set_static_screen_params(link->ctx->dc,
9171 &stream, 1,
9172 &params);
9173
9174 return dc_link_set_psr_allow_active(link, true, false);
9175 }
9176
9177 /*
9178 * amdgpu_dm_psr_disable() - disable psr f/w
9179 * @stream: stream state
9180 *
9181 * Return: true if success
9182 */
9183 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9184 {
9185
9186 DRM_DEBUG_DRIVER("Disabling psr...\n");
9187
9188 return dc_link_set_psr_allow_active(stream->link, false, true);
9189 }