2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB
);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU
);
101 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU
);
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
117 * The root control structure is &struct amdgpu_display_manager.
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
122 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
125 * initializes drm_device display related structures, based on the information
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
129 * Returns 0 on success
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
136 struct drm_plane
*plane
,
137 unsigned long possible_crtcs
,
138 const struct dc_plane_cap
*plane_cap
);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
140 struct drm_plane
*plane
,
141 uint32_t link_index
);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
143 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
145 struct amdgpu_encoder
*amdgpu_encoder
);
146 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
147 struct amdgpu_encoder
*aencoder
,
148 uint32_t link_index
);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
152 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
153 struct drm_atomic_state
*state
,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
158 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
159 struct drm_atomic_state
*state
);
161 static void handle_cursor_update(struct drm_plane
*plane
,
162 struct drm_plane_state
*old_plane_state
);
164 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
185 if (crtc
>= adev
->mode_info
.num_crtc
)
188 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
189 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
193 if (acrtc_state
->stream
== NULL
) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
204 u32
*vbl
, u32
*position
)
206 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
208 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
211 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
212 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
215 if (acrtc_state
->stream
== NULL
) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state
->stream
,
231 *position
= v_position
| (h_position
<< 16);
232 *vbl
= v_blank_start
| (v_blank_end
<< 16);
238 static bool dm_is_idle(void *handle
)
244 static int dm_wait_for_idle(void *handle
)
250 static bool dm_check_soft_reset(void *handle
)
255 static int dm_soft_reset(void *handle
)
261 static struct amdgpu_crtc
*
262 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
265 struct drm_device
*dev
= adev
->ddev
;
266 struct drm_crtc
*crtc
;
267 struct amdgpu_crtc
*amdgpu_crtc
;
269 if (otg_inst
== -1) {
271 return adev
->mode_info
.crtcs
[0];
274 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
275 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
277 if (amdgpu_crtc
->otg_inst
== otg_inst
)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state
*dm_state
)
286 return dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
||
287 dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_FIXED
;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params
)
299 struct amdgpu_crtc
*amdgpu_crtc
;
300 struct common_irq_params
*irq_params
= interrupt_params
;
301 struct amdgpu_device
*adev
= irq_params
->adev
;
303 struct drm_pending_vblank_event
*e
;
304 struct dm_crtc_state
*acrtc_state
;
305 uint32_t vpos
, hpos
, v_blank_start
, v_blank_end
;
308 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc
== NULL
) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
319 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc
->pflip_status
,
322 AMDGPU_FLIP_SUBMITTED
,
323 amdgpu_crtc
->crtc_id
,
325 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
329 /* page flip completed. */
330 e
= amdgpu_crtc
->event
;
331 amdgpu_crtc
->event
= NULL
;
336 acrtc_state
= to_dm_crtc_state(amdgpu_crtc
->base
.state
);
337 vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state
->stream
, &v_blank_start
,
342 &v_blank_end
, &hpos
, &vpos
) ||
343 (vpos
< v_blank_start
)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, e
);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e
->sequence
= drm_crtc_vblank_count(&amdgpu_crtc
->base
);
375 e
->pipe
= amdgpu_crtc
->crtc_id
;
377 list_add_tail(&e
->base
.link
, &adev
->ddev
->vblank_event_list
);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc
->last_flip_vblank
=
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc
->base
);
389 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
390 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc
->crtc_id
, amdgpu_crtc
,
394 vrr_active
, (int) !e
);
397 static void dm_vupdate_high_irq(void *interrupt_params
)
399 struct common_irq_params
*irq_params
= interrupt_params
;
400 struct amdgpu_device
*adev
= irq_params
->adev
;
401 struct amdgpu_crtc
*acrtc
;
402 struct dm_crtc_state
*acrtc_state
;
405 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VUPDATE
);
408 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
412 amdgpu_dm_vrr_active(acrtc_state
));
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
420 if (amdgpu_dm_vrr_active(acrtc_state
)) {
421 drm_crtc_handle_vblank(&acrtc
->base
);
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state
->stream
&&
425 adev
->family
< AMDGPU_FAMILY_AI
) {
426 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
427 mod_freesync_handle_v_update(
428 adev
->dm
.freesync_module
,
430 &acrtc_state
->vrr_params
);
432 dc_stream_adjust_vmin_vmax(
435 &acrtc_state
->vrr_params
.adjust
);
436 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: ignored
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
449 static void dm_crtc_high_irq(void *interrupt_params
)
451 struct common_irq_params
*irq_params
= interrupt_params
;
452 struct amdgpu_device
*adev
= irq_params
->adev
;
453 struct amdgpu_crtc
*acrtc
;
454 struct dm_crtc_state
*acrtc_state
;
457 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
460 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
462 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
464 amdgpu_dm_vrr_active(acrtc_state
));
466 /* Core vblank handling at start of front-porch is only possible
467 * in non-vrr mode, as only there vblank timestamping will give
468 * valid results while done in front-porch. Otherwise defer it
469 * to dm_vupdate_high_irq after end of front-porch.
471 if (!amdgpu_dm_vrr_active(acrtc_state
))
472 drm_crtc_handle_vblank(&acrtc
->base
);
474 /* Following stuff must happen at start of vblank, for crc
475 * computation and below-the-range btr support in vrr mode.
477 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
479 if (acrtc_state
->stream
&& adev
->family
>= AMDGPU_FAMILY_AI
&&
480 acrtc_state
->vrr_params
.supported
&&
481 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
482 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
483 mod_freesync_handle_v_update(
484 adev
->dm
.freesync_module
,
486 &acrtc_state
->vrr_params
);
488 dc_stream_adjust_vmin_vmax(
491 &acrtc_state
->vrr_params
.adjust
);
492 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
499 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500 * @interrupt params - interrupt parameters
502 * Notify DRM's vblank event handler at VSTARTUP
504 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505 * * We are close enough to VUPDATE - the point of no return for hw
506 * * We are in the fixed portion of variable front porch when vrr is enabled
507 * * We are before VUPDATE, where double-buffered vrr registers are swapped
509 * It is therefore the correct place to signal vblank, send user flip events,
512 static void dm_dcn_crtc_high_irq(void *interrupt_params
)
514 struct common_irq_params
*irq_params
= interrupt_params
;
515 struct amdgpu_device
*adev
= irq_params
->adev
;
516 struct amdgpu_crtc
*acrtc
;
517 struct dm_crtc_state
*acrtc_state
;
520 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
525 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
527 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc
->crtc_id
,
528 amdgpu_dm_vrr_active(acrtc_state
),
529 acrtc_state
->active_planes
);
531 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
532 drm_crtc_handle_vblank(&acrtc
->base
);
534 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
536 if (acrtc_state
->vrr_params
.supported
&&
537 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
538 mod_freesync_handle_v_update(
539 adev
->dm
.freesync_module
,
541 &acrtc_state
->vrr_params
);
543 dc_stream_adjust_vmin_vmax(
546 &acrtc_state
->vrr_params
.adjust
);
550 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 * In that case, pageflip completion interrupts won't fire and pageflip
552 * completion events won't get delivered. Prevent this by sending
553 * pending pageflip events from here if a flip is still pending.
555 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 * avoid race conditions between flip programming and completion,
557 * which could cause too early flip completion events.
559 if (acrtc
->pflip_status
== AMDGPU_FLIP_SUBMITTED
&&
560 acrtc_state
->active_planes
== 0) {
562 drm_crtc_send_vblank_event(&acrtc
->base
, acrtc
->event
);
564 drm_crtc_vblank_put(&acrtc
->base
);
566 acrtc
->pflip_status
= AMDGPU_FLIP_NONE
;
569 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
573 static int dm_set_clockgating_state(void *handle
,
574 enum amd_clockgating_state state
)
579 static int dm_set_powergating_state(void *handle
,
580 enum amd_powergating_state state
)
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle
);
588 /* Allocate memory for FBC compressed data */
589 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
591 struct drm_device
*dev
= connector
->dev
;
592 struct amdgpu_device
*adev
= dev
->dev_private
;
593 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
594 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
595 struct drm_display_mode
*mode
;
596 unsigned long max_size
= 0;
598 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
601 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
604 if (compressor
->bo_ptr
)
608 list_for_each_entry(mode
, &connector
->modes
, head
) {
609 if (max_size
< mode
->htotal
* mode
->vtotal
)
610 max_size
= mode
->htotal
* mode
->vtotal
;
614 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
615 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
616 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
619 DRM_ERROR("DM: Failed to initialize FBC\n");
621 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
622 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
629 static int amdgpu_dm_audio_component_get_eld(struct device
*kdev
, int port
,
630 int pipe
, bool *enabled
,
631 unsigned char *buf
, int max_bytes
)
633 struct drm_device
*dev
= dev_get_drvdata(kdev
);
634 struct amdgpu_device
*adev
= dev
->dev_private
;
635 struct drm_connector
*connector
;
636 struct drm_connector_list_iter conn_iter
;
637 struct amdgpu_dm_connector
*aconnector
;
642 mutex_lock(&adev
->dm
.audio_lock
);
644 drm_connector_list_iter_begin(dev
, &conn_iter
);
645 drm_for_each_connector_iter(connector
, &conn_iter
) {
646 aconnector
= to_amdgpu_dm_connector(connector
);
647 if (aconnector
->audio_inst
!= port
)
651 ret
= drm_eld_size(connector
->eld
);
652 memcpy(buf
, connector
->eld
, min(max_bytes
, ret
));
656 drm_connector_list_iter_end(&conn_iter
);
658 mutex_unlock(&adev
->dm
.audio_lock
);
660 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port
, ret
, *enabled
);
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops
= {
666 .get_eld
= amdgpu_dm_audio_component_get_eld
,
669 static int amdgpu_dm_audio_component_bind(struct device
*kdev
,
670 struct device
*hda_kdev
, void *data
)
672 struct drm_device
*dev
= dev_get_drvdata(kdev
);
673 struct amdgpu_device
*adev
= dev
->dev_private
;
674 struct drm_audio_component
*acomp
= data
;
676 acomp
->ops
= &amdgpu_dm_audio_component_ops
;
678 adev
->dm
.audio_component
= acomp
;
683 static void amdgpu_dm_audio_component_unbind(struct device
*kdev
,
684 struct device
*hda_kdev
, void *data
)
686 struct drm_device
*dev
= dev_get_drvdata(kdev
);
687 struct amdgpu_device
*adev
= dev
->dev_private
;
688 struct drm_audio_component
*acomp
= data
;
692 adev
->dm
.audio_component
= NULL
;
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops
= {
696 .bind
= amdgpu_dm_audio_component_bind
,
697 .unbind
= amdgpu_dm_audio_component_unbind
,
700 static int amdgpu_dm_audio_init(struct amdgpu_device
*adev
)
707 adev
->mode_info
.audio
.enabled
= true;
709 adev
->mode_info
.audio
.num_pins
= adev
->dm
.dc
->res_pool
->audio_count
;
711 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
712 adev
->mode_info
.audio
.pin
[i
].channels
= -1;
713 adev
->mode_info
.audio
.pin
[i
].rate
= -1;
714 adev
->mode_info
.audio
.pin
[i
].bits_per_sample
= -1;
715 adev
->mode_info
.audio
.pin
[i
].status_bits
= 0;
716 adev
->mode_info
.audio
.pin
[i
].category_code
= 0;
717 adev
->mode_info
.audio
.pin
[i
].connected
= false;
718 adev
->mode_info
.audio
.pin
[i
].id
=
719 adev
->dm
.dc
->res_pool
->audios
[i
]->inst
;
720 adev
->mode_info
.audio
.pin
[i
].offset
= 0;
723 ret
= component_add(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
727 adev
->dm
.audio_registered
= true;
732 static void amdgpu_dm_audio_fini(struct amdgpu_device
*adev
)
737 if (!adev
->mode_info
.audio
.enabled
)
740 if (adev
->dm
.audio_registered
) {
741 component_del(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
742 adev
->dm
.audio_registered
= false;
745 /* TODO: Disable audio? */
747 adev
->mode_info
.audio
.enabled
= false;
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device
*adev
, int pin
)
752 struct drm_audio_component
*acomp
= adev
->dm
.audio_component
;
754 if (acomp
&& acomp
->audio_ops
&& acomp
->audio_ops
->pin_eld_notify
) {
755 DRM_DEBUG_KMS("Notify ELD: %d\n", pin
);
757 acomp
->audio_ops
->pin_eld_notify(acomp
->audio_ops
->audio_ptr
,
762 static int dm_dmub_hw_init(struct amdgpu_device
*adev
)
764 const struct dmcub_firmware_header_v1_0
*hdr
;
765 struct dmub_srv
*dmub_srv
= adev
->dm
.dmub_srv
;
766 struct dmub_srv_fb_info
*fb_info
= adev
->dm
.dmub_fb_info
;
767 const struct firmware
*dmub_fw
= adev
->dm
.dmub_fw
;
768 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
769 struct abm
*abm
= adev
->dm
.dc
->res_pool
->abm
;
770 struct dmub_srv_hw_params hw_params
;
771 enum dmub_status status
;
772 const unsigned char *fw_inst_const
, *fw_bss_data
;
773 uint32_t i
, fw_inst_const_size
, fw_bss_data_size
;
777 /* DMUB isn't supported on the ASIC. */
781 DRM_ERROR("No framebuffer info for DMUB service.\n");
786 /* Firmware required for DMUB support. */
787 DRM_ERROR("No firmware provided for DMUB.\n");
791 status
= dmub_srv_has_hw_support(dmub_srv
, &has_hw_support
);
792 if (status
!= DMUB_STATUS_OK
) {
793 DRM_ERROR("Error checking HW support for DMUB: %d\n", status
);
797 if (!has_hw_support
) {
798 DRM_INFO("DMUB unsupported on ASIC\n");
802 hdr
= (const struct dmcub_firmware_header_v1_0
*)dmub_fw
->data
;
804 fw_inst_const
= dmub_fw
->data
+
805 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
808 fw_bss_data
= dmub_fw
->data
+
809 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
810 le32_to_cpu(hdr
->inst_const_bytes
);
812 /* Copy firmware and bios info into FB memory. */
813 fw_inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
814 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
816 fw_bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
818 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 * amdgpu_ucode_init_single_fw will load dmub firmware
820 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 * will be done by dm_dmub_hw_init
823 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
824 memcpy(fb_info
->fb
[DMUB_WINDOW_0_INST_CONST
].cpu_addr
, fw_inst_const
,
828 if (fw_bss_data_size
)
829 memcpy(fb_info
->fb
[DMUB_WINDOW_2_BSS_DATA
].cpu_addr
,
830 fw_bss_data
, fw_bss_data_size
);
832 /* Copy firmware bios info into FB memory. */
833 memcpy(fb_info
->fb
[DMUB_WINDOW_3_VBIOS
].cpu_addr
, adev
->bios
,
836 /* Reset regions that need to be reset. */
837 memset(fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].cpu_addr
, 0,
838 fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].size
);
840 memset(fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].cpu_addr
, 0,
841 fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].size
);
843 memset(fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].cpu_addr
, 0,
844 fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].size
);
846 /* Initialize hardware. */
847 memset(&hw_params
, 0, sizeof(hw_params
));
848 hw_params
.fb_base
= adev
->gmc
.fb_start
;
849 hw_params
.fb_offset
= adev
->gmc
.aper_base
;
851 /* backdoor load firmware and trigger dmub running */
852 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
)
853 hw_params
.load_inst_const
= true;
856 hw_params
.psp_version
= dmcu
->psp_version
;
858 for (i
= 0; i
< fb_info
->num_fb
; ++i
)
859 hw_params
.fb
[i
] = &fb_info
->fb
[i
];
861 status
= dmub_srv_hw_init(dmub_srv
, &hw_params
);
862 if (status
!= DMUB_STATUS_OK
) {
863 DRM_ERROR("Error initializing DMUB HW: %d\n", status
);
867 /* Wait for firmware load to finish. */
868 status
= dmub_srv_wait_for_auto_load(dmub_srv
, 100000);
869 if (status
!= DMUB_STATUS_OK
)
870 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status
);
872 /* Init DMCU and ABM if available. */
874 dmcu
->funcs
->dmcu_init(dmcu
);
875 abm
->dmcu_is_running
= dmcu
->funcs
->is_dmcu_initialized(dmcu
);
878 adev
->dm
.dc
->ctx
->dmub_srv
= dc_dmub_srv_create(adev
->dm
.dc
, dmub_srv
);
879 if (!adev
->dm
.dc
->ctx
->dmub_srv
) {
880 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
884 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
885 adev
->dm
.dmcub_fw_version
);
890 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
892 struct dc_init_data init_data
;
893 #ifdef CONFIG_DRM_AMD_DC_HDCP
894 struct dc_callback_init init_params
;
898 adev
->dm
.ddev
= adev
->ddev
;
899 adev
->dm
.adev
= adev
;
901 /* Zero all the fields */
902 memset(&init_data
, 0, sizeof(init_data
));
903 #ifdef CONFIG_DRM_AMD_DC_HDCP
904 memset(&init_params
, 0, sizeof(init_params
));
907 mutex_init(&adev
->dm
.dc_lock
);
908 mutex_init(&adev
->dm
.audio_lock
);
910 if(amdgpu_dm_irq_init(adev
)) {
911 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
915 init_data
.asic_id
.chip_family
= adev
->family
;
917 init_data
.asic_id
.pci_revision_id
= adev
->pdev
->revision
;
918 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
920 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
921 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
922 init_data
.asic_id
.atombios_base_address
=
923 adev
->mode_info
.atom_context
->bios
;
925 init_data
.driver
= adev
;
927 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
929 if (!adev
->dm
.cgs_device
) {
930 DRM_ERROR("amdgpu: failed to create cgs device.\n");
934 init_data
.cgs_device
= adev
->dm
.cgs_device
;
936 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
938 switch (adev
->asic_type
) {
943 init_data
.flags
.gpu_vm_support
= true;
949 if (amdgpu_dc_feature_mask
& DC_FBC_MASK
)
950 init_data
.flags
.fbc_support
= true;
952 if (amdgpu_dc_feature_mask
& DC_MULTI_MON_PP_MCLK_SWITCH_MASK
)
953 init_data
.flags
.multi_mon_pp_mclk_switch
= true;
955 if (amdgpu_dc_feature_mask
& DC_DISABLE_FRACTIONAL_PWM_MASK
)
956 init_data
.flags
.disable_fractional_pwm
= true;
958 init_data
.flags
.power_down_display_on_boot
= true;
960 init_data
.soc_bounding_box
= adev
->dm
.soc_bounding_box
;
962 /* Display Core create. */
963 adev
->dm
.dc
= dc_create(&init_data
);
966 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
968 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
972 r
= dm_dmub_hw_init(adev
);
974 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
978 dc_hardware_init(adev
->dm
.dc
);
980 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
981 if (!adev
->dm
.freesync_module
) {
983 "amdgpu: failed to initialize freesync_module.\n");
985 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
986 adev
->dm
.freesync_module
);
988 amdgpu_dm_init_color_mod();
990 #ifdef CONFIG_DRM_AMD_DC_HDCP
991 if (adev
->asic_type
>= CHIP_RAVEN
) {
992 adev
->dm
.hdcp_workqueue
= hdcp_create_workqueue(adev
, &init_params
.cp_psp
, adev
->dm
.dc
);
994 if (!adev
->dm
.hdcp_workqueue
)
995 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
997 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev
->dm
.hdcp_workqueue
);
999 dc_init_callbacks(adev
->dm
.dc
, &init_params
);
1002 if (amdgpu_dm_initialize_drm_device(adev
)) {
1004 "amdgpu: failed to initialize sw for display support.\n");
1008 /* Update the actual used number of crtc */
1009 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
1011 /* TODO: Add_display_info? */
1013 /* TODO use dynamic cursor width */
1014 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
1015 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
1017 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
1019 "amdgpu: failed to initialize sw for display support.\n");
1023 DRM_DEBUG_DRIVER("KMS initialized.\n");
1027 amdgpu_dm_fini(adev
);
1032 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
1034 amdgpu_dm_audio_fini(adev
);
1036 amdgpu_dm_destroy_drm_device(&adev
->dm
);
1038 #ifdef CONFIG_DRM_AMD_DC_HDCP
1039 if (adev
->dm
.hdcp_workqueue
) {
1040 hdcp_destroy(adev
->dm
.hdcp_workqueue
);
1041 adev
->dm
.hdcp_workqueue
= NULL
;
1045 dc_deinit_callbacks(adev
->dm
.dc
);
1047 if (adev
->dm
.dc
->ctx
->dmub_srv
) {
1048 dc_dmub_srv_destroy(&adev
->dm
.dc
->ctx
->dmub_srv
);
1049 adev
->dm
.dc
->ctx
->dmub_srv
= NULL
;
1052 if (adev
->dm
.dmub_bo
)
1053 amdgpu_bo_free_kernel(&adev
->dm
.dmub_bo
,
1054 &adev
->dm
.dmub_bo_gpu_addr
,
1055 &adev
->dm
.dmub_bo_cpu_addr
);
1057 /* DC Destroy TODO: Replace destroy DAL */
1059 dc_destroy(&adev
->dm
.dc
);
1061 * TODO: pageflip, vlank interrupt
1063 * amdgpu_dm_irq_fini(adev);
1066 if (adev
->dm
.cgs_device
) {
1067 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
1068 adev
->dm
.cgs_device
= NULL
;
1070 if (adev
->dm
.freesync_module
) {
1071 mod_freesync_destroy(adev
->dm
.freesync_module
);
1072 adev
->dm
.freesync_module
= NULL
;
1075 mutex_destroy(&adev
->dm
.audio_lock
);
1076 mutex_destroy(&adev
->dm
.dc_lock
);
1081 static int load_dmcu_fw(struct amdgpu_device
*adev
)
1083 const char *fw_name_dmcu
= NULL
;
1085 const struct dmcu_firmware_header_v1_0
*hdr
;
1087 switch(adev
->asic_type
) {
1097 case CHIP_POLARIS11
:
1098 case CHIP_POLARIS10
:
1099 case CHIP_POLARIS12
:
1109 fw_name_dmcu
= FIRMWARE_NAVI12_DMCU
;
1112 if (ASICREV_IS_PICASSO(adev
->external_rev_id
))
1113 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1114 else if (ASICREV_IS_RAVEN2(adev
->external_rev_id
))
1115 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1120 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1124 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
1125 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1129 r
= request_firmware_direct(&adev
->dm
.fw_dmcu
, fw_name_dmcu
, adev
->dev
);
1131 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1132 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1133 adev
->dm
.fw_dmcu
= NULL
;
1137 dev_err(adev
->dev
, "amdgpu_dm: Can't load firmware \"%s\"\n",
1142 r
= amdgpu_ucode_validate(adev
->dm
.fw_dmcu
);
1144 dev_err(adev
->dev
, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1146 release_firmware(adev
->dm
.fw_dmcu
);
1147 adev
->dm
.fw_dmcu
= NULL
;
1151 hdr
= (const struct dmcu_firmware_header_v1_0
*)adev
->dm
.fw_dmcu
->data
;
1152 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].ucode_id
= AMDGPU_UCODE_ID_DMCU_ERAM
;
1153 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].fw
= adev
->dm
.fw_dmcu
;
1154 adev
->firmware
.fw_size
+=
1155 ALIGN(le32_to_cpu(hdr
->header
.ucode_size_bytes
) - le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1157 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].ucode_id
= AMDGPU_UCODE_ID_DMCU_INTV
;
1158 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].fw
= adev
->dm
.fw_dmcu
;
1159 adev
->firmware
.fw_size
+=
1160 ALIGN(le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1162 adev
->dm
.dmcu_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1164 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1169 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx
, uint32_t address
)
1171 struct amdgpu_device
*adev
= ctx
;
1173 return dm_read_reg(adev
->dm
.dc
->ctx
, address
);
1176 static void amdgpu_dm_dmub_reg_write(void *ctx
, uint32_t address
,
1179 struct amdgpu_device
*adev
= ctx
;
1181 return dm_write_reg(adev
->dm
.dc
->ctx
, address
, value
);
1184 static int dm_dmub_sw_init(struct amdgpu_device
*adev
)
1186 struct dmub_srv_create_params create_params
;
1187 struct dmub_srv_region_params region_params
;
1188 struct dmub_srv_region_info region_info
;
1189 struct dmub_srv_fb_params fb_params
;
1190 struct dmub_srv_fb_info
*fb_info
;
1191 struct dmub_srv
*dmub_srv
;
1192 const struct dmcub_firmware_header_v1_0
*hdr
;
1193 const char *fw_name_dmub
;
1194 enum dmub_asic dmub_asic
;
1195 enum dmub_status status
;
1198 switch (adev
->asic_type
) {
1200 dmub_asic
= DMUB_ASIC_DCN21
;
1201 fw_name_dmub
= FIRMWARE_RENOIR_DMUB
;
1205 /* ASIC doesn't support DMUB. */
1209 r
= request_firmware_direct(&adev
->dm
.dmub_fw
, fw_name_dmub
, adev
->dev
);
1211 DRM_ERROR("DMUB firmware loading failed: %d\n", r
);
1215 r
= amdgpu_ucode_validate(adev
->dm
.dmub_fw
);
1217 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r
);
1221 hdr
= (const struct dmcub_firmware_header_v1_0
*)adev
->dm
.dmub_fw
->data
;
1223 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
1224 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].ucode_id
=
1225 AMDGPU_UCODE_ID_DMCUB
;
1226 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].fw
=
1228 adev
->firmware
.fw_size
+=
1229 ALIGN(le32_to_cpu(hdr
->inst_const_bytes
), PAGE_SIZE
);
1231 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1232 adev
->dm
.dmcub_fw_version
);
1235 adev
->dm
.dmcub_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1237 adev
->dm
.dmub_srv
= kzalloc(sizeof(*adev
->dm
.dmub_srv
), GFP_KERNEL
);
1238 dmub_srv
= adev
->dm
.dmub_srv
;
1241 DRM_ERROR("Failed to allocate DMUB service!\n");
1245 memset(&create_params
, 0, sizeof(create_params
));
1246 create_params
.user_ctx
= adev
;
1247 create_params
.funcs
.reg_read
= amdgpu_dm_dmub_reg_read
;
1248 create_params
.funcs
.reg_write
= amdgpu_dm_dmub_reg_write
;
1249 create_params
.asic
= dmub_asic
;
1251 /* Create the DMUB service. */
1252 status
= dmub_srv_create(dmub_srv
, &create_params
);
1253 if (status
!= DMUB_STATUS_OK
) {
1254 DRM_ERROR("Error creating DMUB service: %d\n", status
);
1258 /* Calculate the size of all the regions for the DMUB service. */
1259 memset(®ion_params
, 0, sizeof(region_params
));
1261 region_params
.inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
1262 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
1263 region_params
.bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
1264 region_params
.vbios_size
= adev
->bios_size
;
1265 region_params
.fw_bss_data
=
1266 adev
->dm
.dmub_fw
->data
+
1267 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
1268 le32_to_cpu(hdr
->inst_const_bytes
);
1269 region_params
.fw_inst_const
=
1270 adev
->dm
.dmub_fw
->data
+
1271 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
1274 status
= dmub_srv_calc_region_info(dmub_srv
, ®ion_params
,
1277 if (status
!= DMUB_STATUS_OK
) {
1278 DRM_ERROR("Error calculating DMUB region info: %d\n", status
);
1283 * Allocate a framebuffer based on the total size of all the regions.
1284 * TODO: Move this into GART.
1286 r
= amdgpu_bo_create_kernel(adev
, region_info
.fb_size
, PAGE_SIZE
,
1287 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->dm
.dmub_bo
,
1288 &adev
->dm
.dmub_bo_gpu_addr
,
1289 &adev
->dm
.dmub_bo_cpu_addr
);
1293 /* Rebase the regions on the framebuffer address. */
1294 memset(&fb_params
, 0, sizeof(fb_params
));
1295 fb_params
.cpu_addr
= adev
->dm
.dmub_bo_cpu_addr
;
1296 fb_params
.gpu_addr
= adev
->dm
.dmub_bo_gpu_addr
;
1297 fb_params
.region_info
= ®ion_info
;
1299 adev
->dm
.dmub_fb_info
=
1300 kzalloc(sizeof(*adev
->dm
.dmub_fb_info
), GFP_KERNEL
);
1301 fb_info
= adev
->dm
.dmub_fb_info
;
1305 "Failed to allocate framebuffer info for DMUB service!\n");
1309 status
= dmub_srv_calc_fb_info(dmub_srv
, &fb_params
, fb_info
);
1310 if (status
!= DMUB_STATUS_OK
) {
1311 DRM_ERROR("Error calculating DMUB FB info: %d\n", status
);
1318 static int dm_sw_init(void *handle
)
1320 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1323 r
= dm_dmub_sw_init(adev
);
1327 return load_dmcu_fw(adev
);
1330 static int dm_sw_fini(void *handle
)
1332 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1334 kfree(adev
->dm
.dmub_fb_info
);
1335 adev
->dm
.dmub_fb_info
= NULL
;
1337 if (adev
->dm
.dmub_srv
) {
1338 dmub_srv_destroy(adev
->dm
.dmub_srv
);
1339 adev
->dm
.dmub_srv
= NULL
;
1342 if (adev
->dm
.dmub_fw
) {
1343 release_firmware(adev
->dm
.dmub_fw
);
1344 adev
->dm
.dmub_fw
= NULL
;
1347 if(adev
->dm
.fw_dmcu
) {
1348 release_firmware(adev
->dm
.fw_dmcu
);
1349 adev
->dm
.fw_dmcu
= NULL
;
1355 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
1357 struct amdgpu_dm_connector
*aconnector
;
1358 struct drm_connector
*connector
;
1359 struct drm_connector_list_iter iter
;
1362 drm_connector_list_iter_begin(dev
, &iter
);
1363 drm_for_each_connector_iter(connector
, &iter
) {
1364 aconnector
= to_amdgpu_dm_connector(connector
);
1365 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
1366 aconnector
->mst_mgr
.aux
) {
1367 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1369 aconnector
->base
.base
.id
);
1371 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
1373 DRM_ERROR("DM_MST: Failed to start MST\n");
1374 aconnector
->dc_link
->type
=
1375 dc_connection_single
;
1380 drm_connector_list_iter_end(&iter
);
1385 static int dm_late_init(void *handle
)
1387 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1389 struct dmcu_iram_parameters params
;
1390 unsigned int linear_lut
[16];
1392 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
1395 for (i
= 0; i
< 16; i
++)
1396 linear_lut
[i
] = 0xFFFF * i
/ 15;
1399 params
.backlight_ramping_start
= 0xCCCC;
1400 params
.backlight_ramping_reduction
= 0xCCCCCCCC;
1401 params
.backlight_lut_array_size
= 16;
1402 params
.backlight_lut_array
= linear_lut
;
1404 /* Min backlight level after ABM reduction, Don't allow below 1%
1405 * 0xFFFF x 0.01 = 0x28F
1407 params
.min_abm_backlight
= 0x28F;
1409 /* todo will enable for navi10 */
1410 if (adev
->asic_type
<= CHIP_RAVEN
) {
1411 ret
= dmcu_load_iram(dmcu
, params
);
1417 return detect_mst_link_for_all_connectors(adev
->ddev
);
1420 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
1422 struct amdgpu_dm_connector
*aconnector
;
1423 struct drm_connector
*connector
;
1424 struct drm_connector_list_iter iter
;
1425 struct drm_dp_mst_topology_mgr
*mgr
;
1427 bool need_hotplug
= false;
1429 drm_connector_list_iter_begin(dev
, &iter
);
1430 drm_for_each_connector_iter(connector
, &iter
) {
1431 aconnector
= to_amdgpu_dm_connector(connector
);
1432 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
||
1433 aconnector
->mst_port
)
1436 mgr
= &aconnector
->mst_mgr
;
1439 drm_dp_mst_topology_mgr_suspend(mgr
);
1441 ret
= drm_dp_mst_topology_mgr_resume(mgr
, true);
1443 drm_dp_mst_topology_mgr_set_mst(mgr
, false);
1444 need_hotplug
= true;
1448 drm_connector_list_iter_end(&iter
);
1451 drm_kms_helper_hotplug_event(dev
);
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device
*adev
)
1456 struct smu_context
*smu
= &adev
->smu
;
1459 if (!is_support_sw_smu(adev
))
1462 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 * on window driver dc implementation.
1464 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 * should be passed to smu during boot up and resume from s3.
1466 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 * dcn20_resource_construct
1468 * then call pplib functions below to pass the settings to smu:
1469 * smu_set_watermarks_for_clock_ranges
1470 * smu_set_watermarks_table
1471 * navi10_set_watermarks_table
1472 * smu_write_watermarks_table
1474 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 * dc has implemented different flow for window driver:
1476 * dc_hardware_init / dc_set_power_state
1481 * smu_set_watermarks_for_clock_ranges
1482 * renoir_set_watermarks_table
1483 * smu_write_watermarks_table
1486 * dc_hardware_init -> amdgpu_dm_init
1487 * dc_set_power_state --> dm_resume
1489 * therefore, this function apply to navi10/12/14 but not Renoir
1492 switch(adev
->asic_type
) {
1501 mutex_lock(&smu
->mutex
);
1503 /* pass data to smu controller */
1504 if ((smu
->watermarks_bitmap
& WATERMARKS_EXIST
) &&
1505 !(smu
->watermarks_bitmap
& WATERMARKS_LOADED
)) {
1506 ret
= smu_write_watermarks_table(smu
);
1509 mutex_unlock(&smu
->mutex
);
1510 DRM_ERROR("Failed to update WMTABLE!\n");
1513 smu
->watermarks_bitmap
|= WATERMARKS_LOADED
;
1516 mutex_unlock(&smu
->mutex
);
1522 * dm_hw_init() - Initialize DC device
1523 * @handle: The base driver device containing the amdgpu_dm device.
1525 * Initialize the &struct amdgpu_display_manager device. This involves calling
1526 * the initializers of each DM component, then populating the struct with them.
1528 * Although the function implies hardware initialization, both hardware and
1529 * software are initialized here. Splitting them out to their relevant init
1530 * hooks is a future TODO item.
1532 * Some notable things that are initialized here:
1534 * - Display Core, both software and hardware
1535 * - DC modules that we need (freesync and color management)
1536 * - DRM software states
1537 * - Interrupt sources and handlers
1539 * - Debug FS entries, if enabled
1541 static int dm_hw_init(void *handle
)
1543 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1544 /* Create DAL display manager */
1545 amdgpu_dm_init(adev
);
1546 amdgpu_dm_hpd_init(adev
);
1552 * dm_hw_fini() - Teardown DC device
1553 * @handle: The base driver device containing the amdgpu_dm device.
1555 * Teardown components within &struct amdgpu_display_manager that require
1556 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1557 * were loaded. Also flush IRQ workqueues and disable them.
1559 static int dm_hw_fini(void *handle
)
1561 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1563 amdgpu_dm_hpd_fini(adev
);
1565 amdgpu_dm_irq_fini(adev
);
1566 amdgpu_dm_fini(adev
);
1570 static int dm_suspend(void *handle
)
1572 struct amdgpu_device
*adev
= handle
;
1573 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1576 WARN_ON(adev
->dm
.cached_state
);
1577 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
1579 s3_handle_mst(adev
->ddev
, true);
1581 amdgpu_dm_irq_suspend(adev
);
1584 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
1589 static struct amdgpu_dm_connector
*
1590 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
1591 struct drm_crtc
*crtc
)
1594 struct drm_connector_state
*new_con_state
;
1595 struct drm_connector
*connector
;
1596 struct drm_crtc
*crtc_from_state
;
1598 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
1599 crtc_from_state
= new_con_state
->crtc
;
1601 if (crtc_from_state
== crtc
)
1602 return to_amdgpu_dm_connector(connector
);
1608 static void emulated_link_detect(struct dc_link
*link
)
1610 struct dc_sink_init_data sink_init_data
= { 0 };
1611 struct display_sink_capability sink_caps
= { 0 };
1612 enum dc_edid_status edid_status
;
1613 struct dc_context
*dc_ctx
= link
->ctx
;
1614 struct dc_sink
*sink
= NULL
;
1615 struct dc_sink
*prev_sink
= NULL
;
1617 link
->type
= dc_connection_none
;
1618 prev_sink
= link
->local_sink
;
1620 if (prev_sink
!= NULL
)
1621 dc_sink_retain(prev_sink
);
1623 switch (link
->connector_signal
) {
1624 case SIGNAL_TYPE_HDMI_TYPE_A
: {
1625 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1626 sink_caps
.signal
= SIGNAL_TYPE_HDMI_TYPE_A
;
1630 case SIGNAL_TYPE_DVI_SINGLE_LINK
: {
1631 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1632 sink_caps
.signal
= SIGNAL_TYPE_DVI_SINGLE_LINK
;
1636 case SIGNAL_TYPE_DVI_DUAL_LINK
: {
1637 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1638 sink_caps
.signal
= SIGNAL_TYPE_DVI_DUAL_LINK
;
1642 case SIGNAL_TYPE_LVDS
: {
1643 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1644 sink_caps
.signal
= SIGNAL_TYPE_LVDS
;
1648 case SIGNAL_TYPE_EDP
: {
1649 sink_caps
.transaction_type
=
1650 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1651 sink_caps
.signal
= SIGNAL_TYPE_EDP
;
1655 case SIGNAL_TYPE_DISPLAY_PORT
: {
1656 sink_caps
.transaction_type
=
1657 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1658 sink_caps
.signal
= SIGNAL_TYPE_VIRTUAL
;
1663 DC_ERROR("Invalid connector type! signal:%d\n",
1664 link
->connector_signal
);
1668 sink_init_data
.link
= link
;
1669 sink_init_data
.sink_signal
= sink_caps
.signal
;
1671 sink
= dc_sink_create(&sink_init_data
);
1673 DC_ERROR("Failed to create sink!\n");
1677 /* dc_sink_create returns a new reference */
1678 link
->local_sink
= sink
;
1680 edid_status
= dm_helpers_read_local_edid(
1685 if (edid_status
!= EDID_OK
)
1686 DC_ERROR("Failed to read EDID");
1690 static int dm_resume(void *handle
)
1692 struct amdgpu_device
*adev
= handle
;
1693 struct drm_device
*ddev
= adev
->ddev
;
1694 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1695 struct amdgpu_dm_connector
*aconnector
;
1696 struct drm_connector
*connector
;
1697 struct drm_connector_list_iter iter
;
1698 struct drm_crtc
*crtc
;
1699 struct drm_crtc_state
*new_crtc_state
;
1700 struct dm_crtc_state
*dm_new_crtc_state
;
1701 struct drm_plane
*plane
;
1702 struct drm_plane_state
*new_plane_state
;
1703 struct dm_plane_state
*dm_new_plane_state
;
1704 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(dm
->atomic_obj
.state
);
1705 enum dc_connection_type new_connection_type
= dc_connection_none
;
1708 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1709 dc_release_state(dm_state
->context
);
1710 dm_state
->context
= dc_create_state(dm
->dc
);
1711 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1712 dc_resource_state_construct(dm
->dc
, dm_state
->context
);
1714 /* Before powering on DC we need to re-initialize DMUB. */
1715 r
= dm_dmub_hw_init(adev
);
1717 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
1719 /* power on hardware */
1720 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
1722 /* program HPD filter */
1726 * early enable HPD Rx IRQ, should be done before set mode as short
1727 * pulse interrupts are used for MST
1729 amdgpu_dm_irq_resume_early(adev
);
1731 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1732 s3_handle_mst(ddev
, false);
1735 drm_connector_list_iter_begin(ddev
, &iter
);
1736 drm_for_each_connector_iter(connector
, &iter
) {
1737 aconnector
= to_amdgpu_dm_connector(connector
);
1740 * this is the case when traversing through already created
1741 * MST connectors, should be skipped
1743 if (aconnector
->mst_port
)
1746 mutex_lock(&aconnector
->hpd_lock
);
1747 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1748 DRM_ERROR("KMS: Failed to detect connector\n");
1750 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
)
1751 emulated_link_detect(aconnector
->dc_link
);
1753 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
1755 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
1756 aconnector
->fake_enable
= false;
1758 if (aconnector
->dc_sink
)
1759 dc_sink_release(aconnector
->dc_sink
);
1760 aconnector
->dc_sink
= NULL
;
1761 amdgpu_dm_update_connector_after_detect(aconnector
);
1762 mutex_unlock(&aconnector
->hpd_lock
);
1764 drm_connector_list_iter_end(&iter
);
1766 /* Force mode set in atomic commit */
1767 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
1768 new_crtc_state
->active_changed
= true;
1771 * atomic_check is expected to create the dc states. We need to release
1772 * them here, since they were duplicated as part of the suspend
1775 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
1776 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
1777 if (dm_new_crtc_state
->stream
) {
1778 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
1779 dc_stream_release(dm_new_crtc_state
->stream
);
1780 dm_new_crtc_state
->stream
= NULL
;
1784 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
1785 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
1786 if (dm_new_plane_state
->dc_state
) {
1787 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
1788 dc_plane_state_release(dm_new_plane_state
->dc_state
);
1789 dm_new_plane_state
->dc_state
= NULL
;
1793 drm_atomic_helper_resume(ddev
, dm
->cached_state
);
1795 dm
->cached_state
= NULL
;
1797 amdgpu_dm_irq_resume_late(adev
);
1799 amdgpu_dm_smu_write_watermarks_table(adev
);
1807 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1808 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1809 * the base driver's device list to be initialized and torn down accordingly.
1811 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1814 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
1816 .early_init
= dm_early_init
,
1817 .late_init
= dm_late_init
,
1818 .sw_init
= dm_sw_init
,
1819 .sw_fini
= dm_sw_fini
,
1820 .hw_init
= dm_hw_init
,
1821 .hw_fini
= dm_hw_fini
,
1822 .suspend
= dm_suspend
,
1823 .resume
= dm_resume
,
1824 .is_idle
= dm_is_idle
,
1825 .wait_for_idle
= dm_wait_for_idle
,
1826 .check_soft_reset
= dm_check_soft_reset
,
1827 .soft_reset
= dm_soft_reset
,
1828 .set_clockgating_state
= dm_set_clockgating_state
,
1829 .set_powergating_state
= dm_set_powergating_state
,
1832 const struct amdgpu_ip_block_version dm_ip_block
=
1834 .type
= AMD_IP_BLOCK_TYPE_DCE
,
1838 .funcs
= &amdgpu_dm_funcs
,
1848 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
1849 .fb_create
= amdgpu_display_user_framebuffer_create
,
1850 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
1851 .atomic_check
= amdgpu_dm_atomic_check
,
1852 .atomic_commit
= amdgpu_dm_atomic_commit
,
1855 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
1856 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
1859 static void update_connector_ext_caps(struct amdgpu_dm_connector
*aconnector
)
1861 u32 max_cll
, min_cll
, max
, min
, q
, r
;
1862 struct amdgpu_dm_backlight_caps
*caps
;
1863 struct amdgpu_display_manager
*dm
;
1864 struct drm_connector
*conn_base
;
1865 struct amdgpu_device
*adev
;
1866 static const u8 pre_computed_values
[] = {
1867 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1868 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1870 if (!aconnector
|| !aconnector
->dc_link
)
1873 conn_base
= &aconnector
->base
;
1874 adev
= conn_base
->dev
->dev_private
;
1876 caps
= &dm
->backlight_caps
;
1877 caps
->ext_caps
= &aconnector
->dc_link
->dpcd_sink_ext_caps
;
1878 caps
->aux_support
= false;
1879 max_cll
= conn_base
->hdr_sink_metadata
.hdmi_type1
.max_cll
;
1880 min_cll
= conn_base
->hdr_sink_metadata
.hdmi_type1
.min_cll
;
1882 if (caps
->ext_caps
->bits
.oled
== 1 ||
1883 caps
->ext_caps
->bits
.sdr_aux_backlight_control
== 1 ||
1884 caps
->ext_caps
->bits
.hdr_aux_backlight_control
== 1)
1885 caps
->aux_support
= true;
1887 /* From the specification (CTA-861-G), for calculating the maximum
1888 * luminance we need to use:
1889 * Luminance = 50*2**(CV/32)
1890 * Where CV is a one-byte value.
1891 * For calculating this expression we may need float point precision;
1892 * to avoid this complexity level, we take advantage that CV is divided
1893 * by a constant. From the Euclids division algorithm, we know that CV
1894 * can be written as: CV = 32*q + r. Next, we replace CV in the
1895 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1896 * need to pre-compute the value of r/32. For pre-computing the values
1897 * We just used the following Ruby line:
1898 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1899 * The results of the above expressions can be verified at
1900 * pre_computed_values.
1904 max
= (1 << q
) * pre_computed_values
[r
];
1906 // min luminance: maxLum * (CV/255)^2 / 100
1907 q
= DIV_ROUND_CLOSEST(min_cll
, 255);
1908 min
= max
* DIV_ROUND_CLOSEST((q
* q
), 100);
1910 caps
->aux_max_input_signal
= max
;
1911 caps
->aux_min_input_signal
= min
;
1914 void amdgpu_dm_update_connector_after_detect(
1915 struct amdgpu_dm_connector
*aconnector
)
1917 struct drm_connector
*connector
= &aconnector
->base
;
1918 struct drm_device
*dev
= connector
->dev
;
1919 struct dc_sink
*sink
;
1921 /* MST handled by drm_mst framework */
1922 if (aconnector
->mst_mgr
.mst_state
== true)
1926 sink
= aconnector
->dc_link
->local_sink
;
1928 dc_sink_retain(sink
);
1931 * Edid mgmt connector gets first update only in mode_valid hook and then
1932 * the connector sink is set to either fake or physical sink depends on link status.
1933 * Skip if already done during boot.
1935 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
1936 && aconnector
->dc_em_sink
) {
1939 * For S3 resume with headless use eml_sink to fake stream
1940 * because on resume connector->sink is set to NULL
1942 mutex_lock(&dev
->mode_config
.mutex
);
1945 if (aconnector
->dc_sink
) {
1946 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1948 * retain and release below are used to
1949 * bump up refcount for sink because the link doesn't point
1950 * to it anymore after disconnect, so on next crtc to connector
1951 * reshuffle by UMD we will get into unwanted dc_sink release
1953 dc_sink_release(aconnector
->dc_sink
);
1955 aconnector
->dc_sink
= sink
;
1956 dc_sink_retain(aconnector
->dc_sink
);
1957 amdgpu_dm_update_freesync_caps(connector
,
1960 amdgpu_dm_update_freesync_caps(connector
, NULL
);
1961 if (!aconnector
->dc_sink
) {
1962 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
1963 dc_sink_retain(aconnector
->dc_sink
);
1967 mutex_unlock(&dev
->mode_config
.mutex
);
1970 dc_sink_release(sink
);
1975 * TODO: temporary guard to look for proper fix
1976 * if this sink is MST sink, we should not do anything
1978 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
1979 dc_sink_release(sink
);
1983 if (aconnector
->dc_sink
== sink
) {
1985 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1988 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1989 aconnector
->connector_id
);
1991 dc_sink_release(sink
);
1995 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1996 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
1998 mutex_lock(&dev
->mode_config
.mutex
);
2001 * 1. Update status of the drm connector
2002 * 2. Send an event and let userspace tell us what to do
2006 * TODO: check if we still need the S3 mode update workaround.
2007 * If yes, put it here.
2009 if (aconnector
->dc_sink
)
2010 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2012 aconnector
->dc_sink
= sink
;
2013 dc_sink_retain(aconnector
->dc_sink
);
2014 if (sink
->dc_edid
.length
== 0) {
2015 aconnector
->edid
= NULL
;
2016 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
2019 (struct edid
*) sink
->dc_edid
.raw_edid
;
2022 drm_connector_update_edid_property(connector
,
2024 drm_dp_cec_set_edid(&aconnector
->dm_dp_aux
.aux
,
2027 amdgpu_dm_update_freesync_caps(connector
, aconnector
->edid
);
2028 update_connector_ext_caps(aconnector
);
2030 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
2031 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2032 drm_connector_update_edid_property(connector
, NULL
);
2033 aconnector
->num_modes
= 0;
2034 dc_sink_release(aconnector
->dc_sink
);
2035 aconnector
->dc_sink
= NULL
;
2036 aconnector
->edid
= NULL
;
2037 #ifdef CONFIG_DRM_AMD_DC_HDCP
2038 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2039 if (connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
2040 connector
->state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
2044 mutex_unlock(&dev
->mode_config
.mutex
);
2047 dc_sink_release(sink
);
2050 static void handle_hpd_irq(void *param
)
2052 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2053 struct drm_connector
*connector
= &aconnector
->base
;
2054 struct drm_device
*dev
= connector
->dev
;
2055 enum dc_connection_type new_connection_type
= dc_connection_none
;
2056 #ifdef CONFIG_DRM_AMD_DC_HDCP
2057 struct amdgpu_device
*adev
= dev
->dev_private
;
2061 * In case of failure or MST no need to update connector status or notify the OS
2062 * since (for MST case) MST does this in its own context.
2064 mutex_lock(&aconnector
->hpd_lock
);
2066 #ifdef CONFIG_DRM_AMD_DC_HDCP
2067 if (adev
->dm
.hdcp_workqueue
)
2068 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
2070 if (aconnector
->fake_enable
)
2071 aconnector
->fake_enable
= false;
2073 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
2074 DRM_ERROR("KMS: Failed to detect connector\n");
2076 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2077 emulated_link_detect(aconnector
->dc_link
);
2080 drm_modeset_lock_all(dev
);
2081 dm_restore_drm_connector_state(dev
, connector
);
2082 drm_modeset_unlock_all(dev
);
2084 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2085 drm_kms_helper_hotplug_event(dev
);
2087 } else if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
2088 amdgpu_dm_update_connector_after_detect(aconnector
);
2091 drm_modeset_lock_all(dev
);
2092 dm_restore_drm_connector_state(dev
, connector
);
2093 drm_modeset_unlock_all(dev
);
2095 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2096 drm_kms_helper_hotplug_event(dev
);
2098 mutex_unlock(&aconnector
->hpd_lock
);
2102 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
2104 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
2106 bool new_irq_handled
= false;
2108 int dpcd_bytes_to_read
;
2110 const int max_process_count
= 30;
2111 int process_count
= 0;
2113 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
2115 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
2116 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
2117 /* DPCD 0x200 - 0x201 for downstream IRQ */
2118 dpcd_addr
= DP_SINK_COUNT
;
2120 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
2121 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2122 dpcd_addr
= DP_SINK_COUNT_ESI
;
2125 dret
= drm_dp_dpcd_read(
2126 &aconnector
->dm_dp_aux
.aux
,
2129 dpcd_bytes_to_read
);
2131 while (dret
== dpcd_bytes_to_read
&&
2132 process_count
< max_process_count
) {
2138 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
2139 /* handle HPD short pulse irq */
2140 if (aconnector
->mst_mgr
.mst_state
)
2142 &aconnector
->mst_mgr
,
2146 if (new_irq_handled
) {
2147 /* ACK at DPCD to notify down stream */
2148 const int ack_dpcd_bytes_to_write
=
2149 dpcd_bytes_to_read
- 1;
2151 for (retry
= 0; retry
< 3; retry
++) {
2154 wret
= drm_dp_dpcd_write(
2155 &aconnector
->dm_dp_aux
.aux
,
2158 ack_dpcd_bytes_to_write
);
2159 if (wret
== ack_dpcd_bytes_to_write
)
2163 /* check if there is new irq to be handled */
2164 dret
= drm_dp_dpcd_read(
2165 &aconnector
->dm_dp_aux
.aux
,
2168 dpcd_bytes_to_read
);
2170 new_irq_handled
= false;
2176 if (process_count
== max_process_count
)
2177 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2180 static void handle_hpd_rx_irq(void *param
)
2182 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2183 struct drm_connector
*connector
= &aconnector
->base
;
2184 struct drm_device
*dev
= connector
->dev
;
2185 struct dc_link
*dc_link
= aconnector
->dc_link
;
2186 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
2187 enum dc_connection_type new_connection_type
= dc_connection_none
;
2188 #ifdef CONFIG_DRM_AMD_DC_HDCP
2189 union hpd_irq_data hpd_irq_data
;
2190 struct amdgpu_device
*adev
= dev
->dev_private
;
2192 memset(&hpd_irq_data
, 0, sizeof(hpd_irq_data
));
2196 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2197 * conflict, after implement i2c helper, this mutex should be
2200 if (dc_link
->type
!= dc_connection_mst_branch
)
2201 mutex_lock(&aconnector
->hpd_lock
);
2204 #ifdef CONFIG_DRM_AMD_DC_HDCP
2205 if (dc_link_handle_hpd_rx_irq(dc_link
, &hpd_irq_data
, NULL
) &&
2207 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
, NULL
) &&
2209 !is_mst_root_connector
) {
2210 /* Downstream Port status changed. */
2211 if (!dc_link_detect_sink(dc_link
, &new_connection_type
))
2212 DRM_ERROR("KMS: Failed to detect connector\n");
2214 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2215 emulated_link_detect(dc_link
);
2217 if (aconnector
->fake_enable
)
2218 aconnector
->fake_enable
= false;
2220 amdgpu_dm_update_connector_after_detect(aconnector
);
2223 drm_modeset_lock_all(dev
);
2224 dm_restore_drm_connector_state(dev
, connector
);
2225 drm_modeset_unlock_all(dev
);
2227 drm_kms_helper_hotplug_event(dev
);
2228 } else if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
2230 if (aconnector
->fake_enable
)
2231 aconnector
->fake_enable
= false;
2233 amdgpu_dm_update_connector_after_detect(aconnector
);
2236 drm_modeset_lock_all(dev
);
2237 dm_restore_drm_connector_state(dev
, connector
);
2238 drm_modeset_unlock_all(dev
);
2240 drm_kms_helper_hotplug_event(dev
);
2243 #ifdef CONFIG_DRM_AMD_DC_HDCP
2244 if (hpd_irq_data
.bytes
.device_service_irq
.bits
.CP_IRQ
) {
2245 if (adev
->dm
.hdcp_workqueue
)
2246 hdcp_handle_cpirq(adev
->dm
.hdcp_workqueue
, aconnector
->base
.index
);
2249 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
2250 (dc_link
->type
== dc_connection_mst_branch
))
2251 dm_handle_hpd_rx_irq(aconnector
);
2253 if (dc_link
->type
!= dc_connection_mst_branch
) {
2254 drm_dp_cec_irq(&aconnector
->dm_dp_aux
.aux
);
2255 mutex_unlock(&aconnector
->hpd_lock
);
2259 static void register_hpd_handlers(struct amdgpu_device
*adev
)
2261 struct drm_device
*dev
= adev
->ddev
;
2262 struct drm_connector
*connector
;
2263 struct amdgpu_dm_connector
*aconnector
;
2264 const struct dc_link
*dc_link
;
2265 struct dc_interrupt_params int_params
= {0};
2267 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2268 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2270 list_for_each_entry(connector
,
2271 &dev
->mode_config
.connector_list
, head
) {
2273 aconnector
= to_amdgpu_dm_connector(connector
);
2274 dc_link
= aconnector
->dc_link
;
2276 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
2277 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2278 int_params
.irq_source
= dc_link
->irq_source_hpd
;
2280 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2282 (void *) aconnector
);
2285 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
2287 /* Also register for DP short pulse (hpd_rx). */
2288 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2289 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
2291 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2293 (void *) aconnector
);
2298 /* Register IRQ sources and initialize IRQ callbacks */
2299 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
2301 struct dc
*dc
= adev
->dm
.dc
;
2302 struct common_irq_params
*c_irq_params
;
2303 struct dc_interrupt_params int_params
= {0};
2306 unsigned client_id
= AMDGPU_IRQ_CLIENTID_LEGACY
;
2308 if (adev
->asic_type
>= CHIP_VEGA10
)
2309 client_id
= SOC15_IH_CLIENTID_DCE
;
2311 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2312 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2315 * Actions of amdgpu_irq_add_id():
2316 * 1. Register a set() function with base driver.
2317 * Base driver will call set() function to enable/disable an
2318 * interrupt in DC hardware.
2319 * 2. Register amdgpu_dm_irq_handler().
2320 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2321 * coming from DC hardware.
2322 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2323 * for acknowledging and handling. */
2325 /* Use VBLANK interrupt */
2326 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
2327 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
2329 DRM_ERROR("Failed to add crtc irq id!\n");
2333 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2334 int_params
.irq_source
=
2335 dc_interrupt_to_irq_source(dc
, i
, 0);
2337 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2339 c_irq_params
->adev
= adev
;
2340 c_irq_params
->irq_src
= int_params
.irq_source
;
2342 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2343 dm_crtc_high_irq
, c_irq_params
);
2346 /* Use VUPDATE interrupt */
2347 for (i
= VISLANDS30_IV_SRCID_D1_V_UPDATE_INT
; i
<= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT
; i
+= 2) {
2348 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->vupdate_irq
);
2350 DRM_ERROR("Failed to add vupdate irq id!\n");
2354 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2355 int_params
.irq_source
=
2356 dc_interrupt_to_irq_source(dc
, i
, 0);
2358 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
2360 c_irq_params
->adev
= adev
;
2361 c_irq_params
->irq_src
= int_params
.irq_source
;
2363 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2364 dm_vupdate_high_irq
, c_irq_params
);
2367 /* Use GRPH_PFLIP interrupt */
2368 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
2369 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
2370 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
2372 DRM_ERROR("Failed to add page flip irq id!\n");
2376 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2377 int_params
.irq_source
=
2378 dc_interrupt_to_irq_source(dc
, i
, 0);
2380 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2382 c_irq_params
->adev
= adev
;
2383 c_irq_params
->irq_src
= int_params
.irq_source
;
2385 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2386 dm_pflip_high_irq
, c_irq_params
);
2391 r
= amdgpu_irq_add_id(adev
, client_id
,
2392 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
2394 DRM_ERROR("Failed to add hpd irq id!\n");
2398 register_hpd_handlers(adev
);
2403 #if defined(CONFIG_DRM_AMD_DC_DCN)
2404 /* Register IRQ sources and initialize IRQ callbacks */
2405 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
2407 struct dc
*dc
= adev
->dm
.dc
;
2408 struct common_irq_params
*c_irq_params
;
2409 struct dc_interrupt_params int_params
= {0};
2413 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2414 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2417 * Actions of amdgpu_irq_add_id():
2418 * 1. Register a set() function with base driver.
2419 * Base driver will call set() function to enable/disable an
2420 * interrupt in DC hardware.
2421 * 2. Register amdgpu_dm_irq_handler().
2422 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2423 * coming from DC hardware.
2424 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2425 * for acknowledging and handling.
2428 /* Use VSTARTUP interrupt */
2429 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
2430 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
2432 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
2435 DRM_ERROR("Failed to add crtc irq id!\n");
2439 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2440 int_params
.irq_source
=
2441 dc_interrupt_to_irq_source(dc
, i
, 0);
2443 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2445 c_irq_params
->adev
= adev
;
2446 c_irq_params
->irq_src
= int_params
.irq_source
;
2448 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2449 dm_dcn_crtc_high_irq
, c_irq_params
);
2452 /* Use GRPH_PFLIP interrupt */
2453 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
2454 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
2456 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
2458 DRM_ERROR("Failed to add page flip irq id!\n");
2462 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2463 int_params
.irq_source
=
2464 dc_interrupt_to_irq_source(dc
, i
, 0);
2466 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2468 c_irq_params
->adev
= adev
;
2469 c_irq_params
->irq_src
= int_params
.irq_source
;
2471 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2472 dm_pflip_high_irq
, c_irq_params
);
2477 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
2480 DRM_ERROR("Failed to add hpd irq id!\n");
2484 register_hpd_handlers(adev
);
2491 * Acquires the lock for the atomic state object and returns
2492 * the new atomic state.
2494 * This should only be called during atomic check.
2496 static int dm_atomic_get_state(struct drm_atomic_state
*state
,
2497 struct dm_atomic_state
**dm_state
)
2499 struct drm_device
*dev
= state
->dev
;
2500 struct amdgpu_device
*adev
= dev
->dev_private
;
2501 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2502 struct drm_private_state
*priv_state
;
2507 priv_state
= drm_atomic_get_private_obj_state(state
, &dm
->atomic_obj
);
2508 if (IS_ERR(priv_state
))
2509 return PTR_ERR(priv_state
);
2511 *dm_state
= to_dm_atomic_state(priv_state
);
2516 struct dm_atomic_state
*
2517 dm_atomic_get_new_state(struct drm_atomic_state
*state
)
2519 struct drm_device
*dev
= state
->dev
;
2520 struct amdgpu_device
*adev
= dev
->dev_private
;
2521 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2522 struct drm_private_obj
*obj
;
2523 struct drm_private_state
*new_obj_state
;
2526 for_each_new_private_obj_in_state(state
, obj
, new_obj_state
, i
) {
2527 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2528 return to_dm_atomic_state(new_obj_state
);
2534 struct dm_atomic_state
*
2535 dm_atomic_get_old_state(struct drm_atomic_state
*state
)
2537 struct drm_device
*dev
= state
->dev
;
2538 struct amdgpu_device
*adev
= dev
->dev_private
;
2539 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2540 struct drm_private_obj
*obj
;
2541 struct drm_private_state
*old_obj_state
;
2544 for_each_old_private_obj_in_state(state
, obj
, old_obj_state
, i
) {
2545 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2546 return to_dm_atomic_state(old_obj_state
);
2552 static struct drm_private_state
*
2553 dm_atomic_duplicate_state(struct drm_private_obj
*obj
)
2555 struct dm_atomic_state
*old_state
, *new_state
;
2557 new_state
= kzalloc(sizeof(*new_state
), GFP_KERNEL
);
2561 __drm_atomic_helper_private_obj_duplicate_state(obj
, &new_state
->base
);
2563 old_state
= to_dm_atomic_state(obj
->state
);
2565 if (old_state
&& old_state
->context
)
2566 new_state
->context
= dc_copy_state(old_state
->context
);
2568 if (!new_state
->context
) {
2573 return &new_state
->base
;
2576 static void dm_atomic_destroy_state(struct drm_private_obj
*obj
,
2577 struct drm_private_state
*state
)
2579 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
2581 if (dm_state
&& dm_state
->context
)
2582 dc_release_state(dm_state
->context
);
2587 static struct drm_private_state_funcs dm_atomic_state_funcs
= {
2588 .atomic_duplicate_state
= dm_atomic_duplicate_state
,
2589 .atomic_destroy_state
= dm_atomic_destroy_state
,
2592 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
2594 struct dm_atomic_state
*state
;
2597 adev
->mode_info
.mode_config_initialized
= true;
2599 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
2600 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
2602 adev
->ddev
->mode_config
.max_width
= 16384;
2603 adev
->ddev
->mode_config
.max_height
= 16384;
2605 adev
->ddev
->mode_config
.preferred_depth
= 24;
2606 adev
->ddev
->mode_config
.prefer_shadow
= 1;
2607 /* indicates support for immediate flip */
2608 adev
->ddev
->mode_config
.async_page_flip
= true;
2610 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
2612 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2616 state
->context
= dc_create_state(adev
->dm
.dc
);
2617 if (!state
->context
) {
2622 dc_resource_state_copy_construct_current(adev
->dm
.dc
, state
->context
);
2624 drm_atomic_private_obj_init(adev
->ddev
,
2625 &adev
->dm
.atomic_obj
,
2627 &dm_atomic_state_funcs
);
2629 r
= amdgpu_display_modeset_create_props(adev
);
2633 r
= amdgpu_dm_audio_init(adev
);
2640 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2641 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2642 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2644 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2645 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2647 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager
*dm
)
2649 #if defined(CONFIG_ACPI)
2650 struct amdgpu_dm_backlight_caps caps
;
2652 if (dm
->backlight_caps
.caps_valid
)
2655 amdgpu_acpi_get_backlight_caps(dm
->adev
, &caps
);
2656 if (caps
.caps_valid
) {
2657 dm
->backlight_caps
.caps_valid
= true;
2658 if (caps
.aux_support
)
2660 dm
->backlight_caps
.min_input_signal
= caps
.min_input_signal
;
2661 dm
->backlight_caps
.max_input_signal
= caps
.max_input_signal
;
2663 dm
->backlight_caps
.min_input_signal
=
2664 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2665 dm
->backlight_caps
.max_input_signal
=
2666 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2669 if (dm
->backlight_caps
.aux_support
)
2672 dm
->backlight_caps
.min_input_signal
= AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2673 dm
->backlight_caps
.max_input_signal
= AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2677 static int set_backlight_via_aux(struct dc_link
*link
, uint32_t brightness
)
2684 rc
= dc_link_set_backlight_level_nits(link
, true, brightness
,
2685 AUX_BL_DEFAULT_TRANSITION_TIME_MS
);
2690 static u32
convert_brightness(const struct amdgpu_dm_backlight_caps
*caps
,
2691 const uint32_t user_brightness
)
2693 u32 min
, max
, conversion_pace
;
2694 u32 brightness
= user_brightness
;
2699 if (!caps
->aux_support
) {
2700 max
= caps
->max_input_signal
;
2701 min
= caps
->min_input_signal
;
2703 * The brightness input is in the range 0-255
2704 * It needs to be rescaled to be between the
2705 * requested min and max input signal
2706 * It also needs to be scaled up by 0x101 to
2707 * match the DC interface which has a range of
2710 conversion_pace
= 0x101;
2715 / AMDGPU_MAX_BL_LEVEL
2716 + min
* conversion_pace
;
2719 * We are doing a linear interpolation here, which is OK but
2720 * does not provide the optimal result. We probably want
2721 * something close to the Perceptual Quantizer (PQ) curve.
2723 max
= caps
->aux_max_input_signal
;
2724 min
= caps
->aux_min_input_signal
;
2726 brightness
= (AMDGPU_MAX_BL_LEVEL
- user_brightness
) * min
2727 + user_brightness
* max
;
2728 // Multiple the value by 1000 since we use millinits
2730 brightness
= DIV_ROUND_CLOSEST(brightness
, AMDGPU_MAX_BL_LEVEL
);
2737 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
2739 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2740 struct amdgpu_dm_backlight_caps caps
;
2741 struct dc_link
*link
= NULL
;
2745 amdgpu_dm_update_backlight_caps(dm
);
2746 caps
= dm
->backlight_caps
;
2748 link
= (struct dc_link
*)dm
->backlight_link
;
2750 brightness
= convert_brightness(&caps
, bd
->props
.brightness
);
2751 // Change brightness based on AUX property
2752 if (caps
.aux_support
)
2753 return set_backlight_via_aux(link
, brightness
);
2755 rc
= dc_link_set_backlight_level(dm
->backlight_link
, brightness
, 0);
2760 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
2762 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2763 int ret
= dc_link_get_backlight_level(dm
->backlight_link
);
2765 if (ret
== DC_ERROR_UNEXPECTED
)
2766 return bd
->props
.brightness
;
2770 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
2771 .options
= BL_CORE_SUSPENDRESUME
,
2772 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
2773 .update_status
= amdgpu_dm_backlight_update_status
,
2777 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
2780 struct backlight_properties props
= { 0 };
2782 amdgpu_dm_update_backlight_caps(dm
);
2784 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
2785 props
.brightness
= AMDGPU_MAX_BL_LEVEL
;
2786 props
.type
= BACKLIGHT_RAW
;
2788 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
2789 dm
->adev
->ddev
->primary
->index
);
2791 dm
->backlight_dev
= backlight_device_register(bl_name
,
2792 dm
->adev
->ddev
->dev
,
2794 &amdgpu_dm_backlight_ops
,
2797 if (IS_ERR(dm
->backlight_dev
))
2798 DRM_ERROR("DM: Backlight registration failed!\n");
2800 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
2805 static int initialize_plane(struct amdgpu_display_manager
*dm
,
2806 struct amdgpu_mode_info
*mode_info
, int plane_id
,
2807 enum drm_plane_type plane_type
,
2808 const struct dc_plane_cap
*plane_cap
)
2810 struct drm_plane
*plane
;
2811 unsigned long possible_crtcs
;
2814 plane
= kzalloc(sizeof(struct drm_plane
), GFP_KERNEL
);
2816 DRM_ERROR("KMS: Failed to allocate plane\n");
2819 plane
->type
= plane_type
;
2822 * HACK: IGT tests expect that the primary plane for a CRTC
2823 * can only have one possible CRTC. Only expose support for
2824 * any CRTC if they're not going to be used as a primary plane
2825 * for a CRTC - like overlay or underlay planes.
2827 possible_crtcs
= 1 << plane_id
;
2828 if (plane_id
>= dm
->dc
->caps
.max_streams
)
2829 possible_crtcs
= 0xff;
2831 ret
= amdgpu_dm_plane_init(dm
, plane
, possible_crtcs
, plane_cap
);
2834 DRM_ERROR("KMS: Failed to initialize plane\n");
2840 mode_info
->planes
[plane_id
] = plane
;
2846 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
2847 struct dc_link
*link
)
2849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2850 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2852 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
2853 link
->type
!= dc_connection_none
) {
2855 * Event if registration failed, we should continue with
2856 * DM initialization because not having a backlight control
2857 * is better then a black screen.
2859 amdgpu_dm_register_backlight_device(dm
);
2861 if (dm
->backlight_dev
)
2862 dm
->backlight_link
= link
;
2869 * In this architecture, the association
2870 * connector -> encoder -> crtc
2871 * id not really requried. The crtc and connector will hold the
2872 * display_index as an abstraction to use with DAL component
2874 * Returns 0 on success
2876 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
2878 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2880 struct amdgpu_dm_connector
*aconnector
= NULL
;
2881 struct amdgpu_encoder
*aencoder
= NULL
;
2882 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2884 int32_t primary_planes
;
2885 enum dc_connection_type new_connection_type
= dc_connection_none
;
2886 const struct dc_plane_cap
*plane
;
2888 link_cnt
= dm
->dc
->caps
.max_links
;
2889 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
2890 DRM_ERROR("DM: Failed to initialize mode config\n");
2894 /* There is one primary plane per CRTC */
2895 primary_planes
= dm
->dc
->caps
.max_streams
;
2896 ASSERT(primary_planes
<= AMDGPU_MAX_PLANES
);
2899 * Initialize primary planes, implicit planes for legacy IOCTLS.
2900 * Order is reversed to match iteration order in atomic check.
2902 for (i
= (primary_planes
- 1); i
>= 0; i
--) {
2903 plane
= &dm
->dc
->caps
.planes
[i
];
2905 if (initialize_plane(dm
, mode_info
, i
,
2906 DRM_PLANE_TYPE_PRIMARY
, plane
)) {
2907 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2913 * Initialize overlay planes, index starting after primary planes.
2914 * These planes have a higher DRM index than the primary planes since
2915 * they should be considered as having a higher z-order.
2916 * Order is reversed to match iteration order in atomic check.
2918 * Only support DCN for now, and only expose one so we don't encourage
2919 * userspace to use up all the pipes.
2921 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; ++i
) {
2922 struct dc_plane_cap
*plane
= &dm
->dc
->caps
.planes
[i
];
2924 if (plane
->type
!= DC_PLANE_TYPE_DCN_UNIVERSAL
)
2927 if (!plane
->blends_with_above
|| !plane
->blends_with_below
)
2930 if (!plane
->pixel_format_support
.argb8888
)
2933 if (initialize_plane(dm
, NULL
, primary_planes
+ i
,
2934 DRM_PLANE_TYPE_OVERLAY
, plane
)) {
2935 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2939 /* Only create one overlay plane. */
2943 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
2944 if (amdgpu_dm_crtc_init(dm
, mode_info
->planes
[i
], i
)) {
2945 DRM_ERROR("KMS: Failed to initialize crtc\n");
2949 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
2951 /* loops over all connectors on the board */
2952 for (i
= 0; i
< link_cnt
; i
++) {
2953 struct dc_link
*link
= NULL
;
2955 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
2957 "KMS: Cannot support more than %d display indexes\n",
2958 AMDGPU_DM_MAX_DISPLAY_INDEX
);
2962 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
2966 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
2970 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
2971 DRM_ERROR("KMS: Failed to initialize encoder\n");
2975 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
2976 DRM_ERROR("KMS: Failed to initialize connector\n");
2980 link
= dc_get_link_at_index(dm
->dc
, i
);
2982 if (!dc_link_detect_sink(link
, &new_connection_type
))
2983 DRM_ERROR("KMS: Failed to detect connector\n");
2985 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2986 emulated_link_detect(link
);
2987 amdgpu_dm_update_connector_after_detect(aconnector
);
2989 } else if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
2990 amdgpu_dm_update_connector_after_detect(aconnector
);
2991 register_backlight_device(dm
, link
);
2992 if (amdgpu_dc_feature_mask
& DC_PSR_MASK
)
2993 amdgpu_dm_set_psr_caps(link
);
2999 /* Software is initialized. Now we can register interrupt handlers. */
3000 switch (adev
->asic_type
) {
3010 case CHIP_POLARIS11
:
3011 case CHIP_POLARIS10
:
3012 case CHIP_POLARIS12
:
3017 if (dce110_register_irq_handlers(dm
->adev
)) {
3018 DRM_ERROR("DM: Failed to initialize IRQ\n");
3022 #if defined(CONFIG_DRM_AMD_DC_DCN)
3028 if (dcn10_register_irq_handlers(dm
->adev
)) {
3029 DRM_ERROR("DM: Failed to initialize IRQ\n");
3035 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
3039 if (adev
->asic_type
!= CHIP_CARRIZO
&& adev
->asic_type
!= CHIP_STONEY
)
3040 dm
->dc
->debug
.disable_stutter
= amdgpu_pp_feature_mask
& PP_STUTTER_MODE
? false : true;
3042 /* No userspace support. */
3043 dm
->dc
->debug
.disable_tri_buf
= true;
3053 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
3055 drm_mode_config_cleanup(dm
->ddev
);
3056 drm_atomic_private_obj_fini(&dm
->atomic_obj
);
3060 /******************************************************************************
3061 * amdgpu_display_funcs functions
3062 *****************************************************************************/
3065 * dm_bandwidth_update - program display watermarks
3067 * @adev: amdgpu_device pointer
3069 * Calculate and program the display watermarks and line buffer allocation.
3071 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
3073 /* TODO: implement later */
3076 static const struct amdgpu_display_funcs dm_display_funcs
= {
3077 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
3078 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
3079 .backlight_set_level
= NULL
, /* never called for DC */
3080 .backlight_get_level
= NULL
, /* never called for DC */
3081 .hpd_sense
= NULL
,/* called unconditionally */
3082 .hpd_set_polarity
= NULL
, /* called unconditionally */
3083 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
3084 .page_flip_get_scanoutpos
=
3085 dm_crtc_get_scanoutpos
,/* called unconditionally */
3086 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
3087 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
3090 #if defined(CONFIG_DEBUG_KERNEL_DC)
3092 static ssize_t
s3_debug_store(struct device
*device
,
3093 struct device_attribute
*attr
,
3099 struct drm_device
*drm_dev
= dev_get_drvdata(device
);
3100 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
3102 ret
= kstrtoint(buf
, 0, &s3_state
);
3107 drm_kms_helper_hotplug_event(adev
->ddev
);
3112 return ret
== 0 ? count
: 0;
3115 DEVICE_ATTR_WO(s3_debug
);
3119 static int dm_early_init(void *handle
)
3121 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3123 switch (adev
->asic_type
) {
3126 adev
->mode_info
.num_crtc
= 6;
3127 adev
->mode_info
.num_hpd
= 6;
3128 adev
->mode_info
.num_dig
= 6;
3131 adev
->mode_info
.num_crtc
= 4;
3132 adev
->mode_info
.num_hpd
= 6;
3133 adev
->mode_info
.num_dig
= 7;
3137 adev
->mode_info
.num_crtc
= 2;
3138 adev
->mode_info
.num_hpd
= 6;
3139 adev
->mode_info
.num_dig
= 6;
3143 adev
->mode_info
.num_crtc
= 6;
3144 adev
->mode_info
.num_hpd
= 6;
3145 adev
->mode_info
.num_dig
= 7;
3148 adev
->mode_info
.num_crtc
= 3;
3149 adev
->mode_info
.num_hpd
= 6;
3150 adev
->mode_info
.num_dig
= 9;
3153 adev
->mode_info
.num_crtc
= 2;
3154 adev
->mode_info
.num_hpd
= 6;
3155 adev
->mode_info
.num_dig
= 9;
3157 case CHIP_POLARIS11
:
3158 case CHIP_POLARIS12
:
3159 adev
->mode_info
.num_crtc
= 5;
3160 adev
->mode_info
.num_hpd
= 5;
3161 adev
->mode_info
.num_dig
= 5;
3163 case CHIP_POLARIS10
:
3165 adev
->mode_info
.num_crtc
= 6;
3166 adev
->mode_info
.num_hpd
= 6;
3167 adev
->mode_info
.num_dig
= 6;
3172 adev
->mode_info
.num_crtc
= 6;
3173 adev
->mode_info
.num_hpd
= 6;
3174 adev
->mode_info
.num_dig
= 6;
3176 #if defined(CONFIG_DRM_AMD_DC_DCN)
3178 adev
->mode_info
.num_crtc
= 4;
3179 adev
->mode_info
.num_hpd
= 4;
3180 adev
->mode_info
.num_dig
= 4;
3185 adev
->mode_info
.num_crtc
= 6;
3186 adev
->mode_info
.num_hpd
= 6;
3187 adev
->mode_info
.num_dig
= 6;
3190 adev
->mode_info
.num_crtc
= 5;
3191 adev
->mode_info
.num_hpd
= 5;
3192 adev
->mode_info
.num_dig
= 5;
3195 adev
->mode_info
.num_crtc
= 4;
3196 adev
->mode_info
.num_hpd
= 4;
3197 adev
->mode_info
.num_dig
= 4;
3200 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
3204 amdgpu_dm_set_irq_funcs(adev
);
3206 if (adev
->mode_info
.funcs
== NULL
)
3207 adev
->mode_info
.funcs
= &dm_display_funcs
;
3210 * Note: Do NOT change adev->audio_endpt_rreg and
3211 * adev->audio_endpt_wreg because they are initialised in
3212 * amdgpu_device_init()
3214 #if defined(CONFIG_DEBUG_KERNEL_DC)
3217 &dev_attr_s3_debug
);
3223 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
3224 struct dc_stream_state
*new_stream
,
3225 struct dc_stream_state
*old_stream
)
3227 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
3230 if (!crtc_state
->enable
)
3233 return crtc_state
->active
;
3236 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
3238 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
3241 return !crtc_state
->enable
|| !crtc_state
->active
;
3244 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
3246 drm_encoder_cleanup(encoder
);
3250 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
3251 .destroy
= amdgpu_dm_encoder_destroy
,
3255 static int fill_dc_scaling_info(const struct drm_plane_state
*state
,
3256 struct dc_scaling_info
*scaling_info
)
3258 int scale_w
, scale_h
;
3260 memset(scaling_info
, 0, sizeof(*scaling_info
));
3262 /* Source is fixed 16.16 but we ignore mantissa for now... */
3263 scaling_info
->src_rect
.x
= state
->src_x
>> 16;
3264 scaling_info
->src_rect
.y
= state
->src_y
>> 16;
3266 scaling_info
->src_rect
.width
= state
->src_w
>> 16;
3267 if (scaling_info
->src_rect
.width
== 0)
3270 scaling_info
->src_rect
.height
= state
->src_h
>> 16;
3271 if (scaling_info
->src_rect
.height
== 0)
3274 scaling_info
->dst_rect
.x
= state
->crtc_x
;
3275 scaling_info
->dst_rect
.y
= state
->crtc_y
;
3277 if (state
->crtc_w
== 0)
3280 scaling_info
->dst_rect
.width
= state
->crtc_w
;
3282 if (state
->crtc_h
== 0)
3285 scaling_info
->dst_rect
.height
= state
->crtc_h
;
3287 /* DRM doesn't specify clipping on destination output. */
3288 scaling_info
->clip_rect
= scaling_info
->dst_rect
;
3290 /* TODO: Validate scaling per-format with DC plane caps */
3291 scale_w
= scaling_info
->dst_rect
.width
* 1000 /
3292 scaling_info
->src_rect
.width
;
3294 if (scale_w
< 250 || scale_w
> 16000)
3297 scale_h
= scaling_info
->dst_rect
.height
* 1000 /
3298 scaling_info
->src_rect
.height
;
3300 if (scale_h
< 250 || scale_h
> 16000)
3304 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3305 * assume reasonable defaults based on the format.
3311 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
3312 uint64_t *tiling_flags
, bool *tmz_surface
)
3314 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
3315 int r
= amdgpu_bo_reserve(rbo
, false);
3318 /* Don't show error message when returning -ERESTARTSYS */
3319 if (r
!= -ERESTARTSYS
)
3320 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
3325 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
3328 *tmz_surface
= amdgpu_bo_encrypted(rbo
);
3330 amdgpu_bo_unreserve(rbo
);
3335 static inline uint64_t get_dcc_address(uint64_t address
, uint64_t tiling_flags
)
3337 uint32_t offset
= AMDGPU_TILING_GET(tiling_flags
, DCC_OFFSET_256B
);
3339 return offset
? (address
+ offset
* 256) : 0;
3343 fill_plane_dcc_attributes(struct amdgpu_device
*adev
,
3344 const struct amdgpu_framebuffer
*afb
,
3345 const enum surface_pixel_format format
,
3346 const enum dc_rotation_angle rotation
,
3347 const struct plane_size
*plane_size
,
3348 const union dc_tiling_info
*tiling_info
,
3349 const uint64_t info
,
3350 struct dc_plane_dcc_param
*dcc
,
3351 struct dc_plane_address
*address
,
3352 bool force_disable_dcc
)
3354 struct dc
*dc
= adev
->dm
.dc
;
3355 struct dc_dcc_surface_param input
;
3356 struct dc_surface_dcc_cap output
;
3357 uint32_t offset
= AMDGPU_TILING_GET(info
, DCC_OFFSET_256B
);
3358 uint32_t i64b
= AMDGPU_TILING_GET(info
, DCC_INDEPENDENT_64B
) != 0;
3359 uint64_t dcc_address
;
3361 memset(&input
, 0, sizeof(input
));
3362 memset(&output
, 0, sizeof(output
));
3364 if (force_disable_dcc
)
3370 if (format
>= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3373 if (!dc
->cap_funcs
.get_dcc_compression_cap
)
3376 input
.format
= format
;
3377 input
.surface_size
.width
= plane_size
->surface_size
.width
;
3378 input
.surface_size
.height
= plane_size
->surface_size
.height
;
3379 input
.swizzle_mode
= tiling_info
->gfx9
.swizzle
;
3381 if (rotation
== ROTATION_ANGLE_0
|| rotation
== ROTATION_ANGLE_180
)
3382 input
.scan
= SCAN_DIRECTION_HORIZONTAL
;
3383 else if (rotation
== ROTATION_ANGLE_90
|| rotation
== ROTATION_ANGLE_270
)
3384 input
.scan
= SCAN_DIRECTION_VERTICAL
;
3386 if (!dc
->cap_funcs
.get_dcc_compression_cap(dc
, &input
, &output
))
3389 if (!output
.capable
)
3392 if (i64b
== 0 && output
.grph
.rgb
.independent_64b_blks
!= 0)
3397 AMDGPU_TILING_GET(info
, DCC_PITCH_MAX
) + 1;
3398 dcc
->independent_64b_blks
= i64b
;
3400 dcc_address
= get_dcc_address(afb
->address
, info
);
3401 address
->grph
.meta_addr
.low_part
= lower_32_bits(dcc_address
);
3402 address
->grph
.meta_addr
.high_part
= upper_32_bits(dcc_address
);
3408 fill_plane_buffer_attributes(struct amdgpu_device
*adev
,
3409 const struct amdgpu_framebuffer
*afb
,
3410 const enum surface_pixel_format format
,
3411 const enum dc_rotation_angle rotation
,
3412 const uint64_t tiling_flags
,
3413 union dc_tiling_info
*tiling_info
,
3414 struct plane_size
*plane_size
,
3415 struct dc_plane_dcc_param
*dcc
,
3416 struct dc_plane_address
*address
,
3418 bool force_disable_dcc
)
3420 const struct drm_framebuffer
*fb
= &afb
->base
;
3423 memset(tiling_info
, 0, sizeof(*tiling_info
));
3424 memset(plane_size
, 0, sizeof(*plane_size
));
3425 memset(dcc
, 0, sizeof(*dcc
));
3426 memset(address
, 0, sizeof(*address
));
3428 address
->tmz_surface
= tmz_surface
;
3430 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3431 plane_size
->surface_size
.x
= 0;
3432 plane_size
->surface_size
.y
= 0;
3433 plane_size
->surface_size
.width
= fb
->width
;
3434 plane_size
->surface_size
.height
= fb
->height
;
3435 plane_size
->surface_pitch
=
3436 fb
->pitches
[0] / fb
->format
->cpp
[0];
3438 address
->type
= PLN_ADDR_TYPE_GRAPHICS
;
3439 address
->grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3440 address
->grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3441 } else if (format
< SURFACE_PIXEL_FORMAT_INVALID
) {
3442 uint64_t chroma_addr
= afb
->address
+ fb
->offsets
[1];
3444 plane_size
->surface_size
.x
= 0;
3445 plane_size
->surface_size
.y
= 0;
3446 plane_size
->surface_size
.width
= fb
->width
;
3447 plane_size
->surface_size
.height
= fb
->height
;
3448 plane_size
->surface_pitch
=
3449 fb
->pitches
[0] / fb
->format
->cpp
[0];
3451 plane_size
->chroma_size
.x
= 0;
3452 plane_size
->chroma_size
.y
= 0;
3453 /* TODO: set these based on surface format */
3454 plane_size
->chroma_size
.width
= fb
->width
/ 2;
3455 plane_size
->chroma_size
.height
= fb
->height
/ 2;
3457 plane_size
->chroma_pitch
=
3458 fb
->pitches
[1] / fb
->format
->cpp
[1];
3460 address
->type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
3461 address
->video_progressive
.luma_addr
.low_part
=
3462 lower_32_bits(afb
->address
);
3463 address
->video_progressive
.luma_addr
.high_part
=
3464 upper_32_bits(afb
->address
);
3465 address
->video_progressive
.chroma_addr
.low_part
=
3466 lower_32_bits(chroma_addr
);
3467 address
->video_progressive
.chroma_addr
.high_part
=
3468 upper_32_bits(chroma_addr
);
3471 /* Fill GFX8 params */
3472 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
3473 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
3475 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
3476 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
3477 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
3478 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
3479 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
3481 /* XXX fix me for VI */
3482 tiling_info
->gfx8
.num_banks
= num_banks
;
3483 tiling_info
->gfx8
.array_mode
=
3484 DC_ARRAY_2D_TILED_THIN1
;
3485 tiling_info
->gfx8
.tile_split
= tile_split
;
3486 tiling_info
->gfx8
.bank_width
= bankw
;
3487 tiling_info
->gfx8
.bank_height
= bankh
;
3488 tiling_info
->gfx8
.tile_aspect
= mtaspect
;
3489 tiling_info
->gfx8
.tile_mode
=
3490 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
3491 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
3492 == DC_ARRAY_1D_TILED_THIN1
) {
3493 tiling_info
->gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
3496 tiling_info
->gfx8
.pipe_config
=
3497 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
3499 if (adev
->asic_type
== CHIP_VEGA10
||
3500 adev
->asic_type
== CHIP_VEGA12
||
3501 adev
->asic_type
== CHIP_VEGA20
||
3502 adev
->asic_type
== CHIP_NAVI10
||
3503 adev
->asic_type
== CHIP_NAVI14
||
3504 adev
->asic_type
== CHIP_NAVI12
||
3505 adev
->asic_type
== CHIP_RENOIR
||
3506 adev
->asic_type
== CHIP_RAVEN
) {
3507 /* Fill GFX9 params */
3508 tiling_info
->gfx9
.num_pipes
=
3509 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
3510 tiling_info
->gfx9
.num_banks
=
3511 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
3512 tiling_info
->gfx9
.pipe_interleave
=
3513 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
3514 tiling_info
->gfx9
.num_shader_engines
=
3515 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
3516 tiling_info
->gfx9
.max_compressed_frags
=
3517 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
3518 tiling_info
->gfx9
.num_rb_per_se
=
3519 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
3520 tiling_info
->gfx9
.swizzle
=
3521 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
3522 tiling_info
->gfx9
.shaderEnable
= 1;
3524 ret
= fill_plane_dcc_attributes(adev
, afb
, format
, rotation
,
3525 plane_size
, tiling_info
,
3526 tiling_flags
, dcc
, address
,
3536 fill_blending_from_plane_state(const struct drm_plane_state
*plane_state
,
3537 bool *per_pixel_alpha
, bool *global_alpha
,
3538 int *global_alpha_value
)
3540 *per_pixel_alpha
= false;
3541 *global_alpha
= false;
3542 *global_alpha_value
= 0xff;
3544 if (plane_state
->plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
3547 if (plane_state
->pixel_blend_mode
== DRM_MODE_BLEND_PREMULTI
) {
3548 static const uint32_t alpha_formats
[] = {
3549 DRM_FORMAT_ARGB8888
,
3550 DRM_FORMAT_RGBA8888
,
3551 DRM_FORMAT_ABGR8888
,
3553 uint32_t format
= plane_state
->fb
->format
->format
;
3556 for (i
= 0; i
< ARRAY_SIZE(alpha_formats
); ++i
) {
3557 if (format
== alpha_formats
[i
]) {
3558 *per_pixel_alpha
= true;
3564 if (plane_state
->alpha
< 0xffff) {
3565 *global_alpha
= true;
3566 *global_alpha_value
= plane_state
->alpha
>> 8;
3571 fill_plane_color_attributes(const struct drm_plane_state
*plane_state
,
3572 const enum surface_pixel_format format
,
3573 enum dc_color_space
*color_space
)
3577 *color_space
= COLOR_SPACE_SRGB
;
3579 /* DRM color properties only affect non-RGB formats. */
3580 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3583 full_range
= (plane_state
->color_range
== DRM_COLOR_YCBCR_FULL_RANGE
);
3585 switch (plane_state
->color_encoding
) {
3586 case DRM_COLOR_YCBCR_BT601
:
3588 *color_space
= COLOR_SPACE_YCBCR601
;
3590 *color_space
= COLOR_SPACE_YCBCR601_LIMITED
;
3593 case DRM_COLOR_YCBCR_BT709
:
3595 *color_space
= COLOR_SPACE_YCBCR709
;
3597 *color_space
= COLOR_SPACE_YCBCR709_LIMITED
;
3600 case DRM_COLOR_YCBCR_BT2020
:
3602 *color_space
= COLOR_SPACE_2020_YCBCR
;
3615 fill_dc_plane_info_and_addr(struct amdgpu_device
*adev
,
3616 const struct drm_plane_state
*plane_state
,
3617 const uint64_t tiling_flags
,
3618 struct dc_plane_info
*plane_info
,
3619 struct dc_plane_address
*address
,
3621 bool force_disable_dcc
)
3623 const struct drm_framebuffer
*fb
= plane_state
->fb
;
3624 const struct amdgpu_framebuffer
*afb
=
3625 to_amdgpu_framebuffer(plane_state
->fb
);
3626 struct drm_format_name_buf format_name
;
3629 memset(plane_info
, 0, sizeof(*plane_info
));
3631 switch (fb
->format
->format
) {
3633 plane_info
->format
=
3634 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
3636 case DRM_FORMAT_RGB565
:
3637 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
3639 case DRM_FORMAT_XRGB8888
:
3640 case DRM_FORMAT_ARGB8888
:
3641 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
3643 case DRM_FORMAT_XRGB2101010
:
3644 case DRM_FORMAT_ARGB2101010
:
3645 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
3647 case DRM_FORMAT_XBGR2101010
:
3648 case DRM_FORMAT_ABGR2101010
:
3649 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
3651 case DRM_FORMAT_XBGR8888
:
3652 case DRM_FORMAT_ABGR8888
:
3653 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
;
3655 case DRM_FORMAT_NV21
:
3656 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
3658 case DRM_FORMAT_NV12
:
3659 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
3661 case DRM_FORMAT_P010
:
3662 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb
;
3666 "Unsupported screen format %s\n",
3667 drm_get_format_name(fb
->format
->format
, &format_name
));
3671 switch (plane_state
->rotation
& DRM_MODE_ROTATE_MASK
) {
3672 case DRM_MODE_ROTATE_0
:
3673 plane_info
->rotation
= ROTATION_ANGLE_0
;
3675 case DRM_MODE_ROTATE_90
:
3676 plane_info
->rotation
= ROTATION_ANGLE_90
;
3678 case DRM_MODE_ROTATE_180
:
3679 plane_info
->rotation
= ROTATION_ANGLE_180
;
3681 case DRM_MODE_ROTATE_270
:
3682 plane_info
->rotation
= ROTATION_ANGLE_270
;
3685 plane_info
->rotation
= ROTATION_ANGLE_0
;
3689 plane_info
->visible
= true;
3690 plane_info
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
3692 plane_info
->layer_index
= 0;
3694 ret
= fill_plane_color_attributes(plane_state
, plane_info
->format
,
3695 &plane_info
->color_space
);
3699 ret
= fill_plane_buffer_attributes(adev
, afb
, plane_info
->format
,
3700 plane_info
->rotation
, tiling_flags
,
3701 &plane_info
->tiling_info
,
3702 &plane_info
->plane_size
,
3703 &plane_info
->dcc
, address
, tmz_surface
,
3708 fill_blending_from_plane_state(
3709 plane_state
, &plane_info
->per_pixel_alpha
,
3710 &plane_info
->global_alpha
, &plane_info
->global_alpha_value
);
3715 static int fill_dc_plane_attributes(struct amdgpu_device
*adev
,
3716 struct dc_plane_state
*dc_plane_state
,
3717 struct drm_plane_state
*plane_state
,
3718 struct drm_crtc_state
*crtc_state
)
3720 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(crtc_state
);
3721 const struct amdgpu_framebuffer
*amdgpu_fb
=
3722 to_amdgpu_framebuffer(plane_state
->fb
);
3723 struct dc_scaling_info scaling_info
;
3724 struct dc_plane_info plane_info
;
3725 uint64_t tiling_flags
;
3727 bool tmz_surface
= false;
3728 bool force_disable_dcc
= false;
3730 ret
= fill_dc_scaling_info(plane_state
, &scaling_info
);
3734 dc_plane_state
->src_rect
= scaling_info
.src_rect
;
3735 dc_plane_state
->dst_rect
= scaling_info
.dst_rect
;
3736 dc_plane_state
->clip_rect
= scaling_info
.clip_rect
;
3737 dc_plane_state
->scaling_quality
= scaling_info
.scaling_quality
;
3739 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
, &tmz_surface
);
3743 force_disable_dcc
= adev
->asic_type
== CHIP_RAVEN
&& adev
->in_suspend
;
3744 ret
= fill_dc_plane_info_and_addr(adev
, plane_state
, tiling_flags
,
3746 &dc_plane_state
->address
,
3752 dc_plane_state
->format
= plane_info
.format
;
3753 dc_plane_state
->color_space
= plane_info
.color_space
;
3754 dc_plane_state
->format
= plane_info
.format
;
3755 dc_plane_state
->plane_size
= plane_info
.plane_size
;
3756 dc_plane_state
->rotation
= plane_info
.rotation
;
3757 dc_plane_state
->horizontal_mirror
= plane_info
.horizontal_mirror
;
3758 dc_plane_state
->stereo_format
= plane_info
.stereo_format
;
3759 dc_plane_state
->tiling_info
= plane_info
.tiling_info
;
3760 dc_plane_state
->visible
= plane_info
.visible
;
3761 dc_plane_state
->per_pixel_alpha
= plane_info
.per_pixel_alpha
;
3762 dc_plane_state
->global_alpha
= plane_info
.global_alpha
;
3763 dc_plane_state
->global_alpha_value
= plane_info
.global_alpha_value
;
3764 dc_plane_state
->dcc
= plane_info
.dcc
;
3765 dc_plane_state
->layer_index
= plane_info
.layer_index
; // Always returns 0
3768 * Always set input transfer function, since plane state is refreshed
3771 ret
= amdgpu_dm_update_plane_color_mgmt(dm_crtc_state
, dc_plane_state
);
3778 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
3779 const struct dm_connector_state
*dm_state
,
3780 struct dc_stream_state
*stream
)
3782 enum amdgpu_rmx_type rmx_type
;
3784 struct rect src
= { 0 }; /* viewport in composition space*/
3785 struct rect dst
= { 0 }; /* stream addressable area */
3787 /* no mode. nothing to be done */
3791 /* Full screen scaling by default */
3792 src
.width
= mode
->hdisplay
;
3793 src
.height
= mode
->vdisplay
;
3794 dst
.width
= stream
->timing
.h_addressable
;
3795 dst
.height
= stream
->timing
.v_addressable
;
3798 rmx_type
= dm_state
->scaling
;
3799 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
3800 if (src
.width
* dst
.height
<
3801 src
.height
* dst
.width
) {
3802 /* height needs less upscaling/more downscaling */
3803 dst
.width
= src
.width
*
3804 dst
.height
/ src
.height
;
3806 /* width needs less upscaling/more downscaling */
3807 dst
.height
= src
.height
*
3808 dst
.width
/ src
.width
;
3810 } else if (rmx_type
== RMX_CENTER
) {
3814 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
3815 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
3817 if (dm_state
->underscan_enable
) {
3818 dst
.x
+= dm_state
->underscan_hborder
/ 2;
3819 dst
.y
+= dm_state
->underscan_vborder
/ 2;
3820 dst
.width
-= dm_state
->underscan_hborder
;
3821 dst
.height
-= dm_state
->underscan_vborder
;
3828 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3829 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
3833 static enum dc_color_depth
3834 convert_color_depth_from_display_info(const struct drm_connector
*connector
,
3835 const struct drm_connector_state
*state
,
3843 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3844 if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_48
)
3846 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_36
)
3848 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_30
)
3851 bpc
= (uint8_t)connector
->display_info
.bpc
;
3852 /* Assume 8 bpc by default if no bpc is specified. */
3853 bpc
= bpc
? bpc
: 8;
3857 state
= connector
->state
;
3861 * Cap display bpc based on the user requested value.
3863 * The value for state->max_bpc may not correctly updated
3864 * depending on when the connector gets added to the state
3865 * or if this was called outside of atomic check, so it
3866 * can't be used directly.
3868 bpc
= min(bpc
, state
->max_requested_bpc
);
3870 /* Round down to the nearest even number. */
3871 bpc
= bpc
- (bpc
& 1);
3877 * Temporary Work around, DRM doesn't parse color depth for
3878 * EDID revision before 1.4
3879 * TODO: Fix edid parsing
3881 return COLOR_DEPTH_888
;
3883 return COLOR_DEPTH_666
;
3885 return COLOR_DEPTH_888
;
3887 return COLOR_DEPTH_101010
;
3889 return COLOR_DEPTH_121212
;
3891 return COLOR_DEPTH_141414
;
3893 return COLOR_DEPTH_161616
;
3895 return COLOR_DEPTH_UNDEFINED
;
3899 static enum dc_aspect_ratio
3900 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
3902 /* 1-1 mapping, since both enums follow the HDMI spec. */
3903 return (enum dc_aspect_ratio
) mode_in
->picture_aspect_ratio
;
3906 static enum dc_color_space
3907 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
3909 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
3911 switch (dc_crtc_timing
->pixel_encoding
) {
3912 case PIXEL_ENCODING_YCBCR422
:
3913 case PIXEL_ENCODING_YCBCR444
:
3914 case PIXEL_ENCODING_YCBCR420
:
3917 * 27030khz is the separation point between HDTV and SDTV
3918 * according to HDMI spec, we use YCbCr709 and YCbCr601
3921 if (dc_crtc_timing
->pix_clk_100hz
> 270300) {
3922 if (dc_crtc_timing
->flags
.Y_ONLY
)
3924 COLOR_SPACE_YCBCR709_LIMITED
;
3926 color_space
= COLOR_SPACE_YCBCR709
;
3928 if (dc_crtc_timing
->flags
.Y_ONLY
)
3930 COLOR_SPACE_YCBCR601_LIMITED
;
3932 color_space
= COLOR_SPACE_YCBCR601
;
3937 case PIXEL_ENCODING_RGB
:
3938 color_space
= COLOR_SPACE_SRGB
;
3949 static bool adjust_colour_depth_from_display_info(
3950 struct dc_crtc_timing
*timing_out
,
3951 const struct drm_display_info
*info
)
3953 enum dc_color_depth depth
= timing_out
->display_color_depth
;
3956 normalized_clk
= timing_out
->pix_clk_100hz
/ 10;
3957 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3958 if (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
3959 normalized_clk
/= 2;
3960 /* Adjusting pix clock following on HDMI spec based on colour depth */
3962 case COLOR_DEPTH_888
:
3964 case COLOR_DEPTH_101010
:
3965 normalized_clk
= (normalized_clk
* 30) / 24;
3967 case COLOR_DEPTH_121212
:
3968 normalized_clk
= (normalized_clk
* 36) / 24;
3970 case COLOR_DEPTH_161616
:
3971 normalized_clk
= (normalized_clk
* 48) / 24;
3974 /* The above depths are the only ones valid for HDMI. */
3977 if (normalized_clk
<= info
->max_tmds_clock
) {
3978 timing_out
->display_color_depth
= depth
;
3981 } while (--depth
> COLOR_DEPTH_666
);
3985 static void fill_stream_properties_from_drm_display_mode(
3986 struct dc_stream_state
*stream
,
3987 const struct drm_display_mode
*mode_in
,
3988 const struct drm_connector
*connector
,
3989 const struct drm_connector_state
*connector_state
,
3990 const struct dc_stream_state
*old_stream
)
3992 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
3993 const struct drm_display_info
*info
= &connector
->display_info
;
3994 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
3995 struct hdmi_vendor_infoframe hv_frame
;
3996 struct hdmi_avi_infoframe avi_frame
;
3998 memset(&hv_frame
, 0, sizeof(hv_frame
));
3999 memset(&avi_frame
, 0, sizeof(avi_frame
));
4001 timing_out
->h_border_left
= 0;
4002 timing_out
->h_border_right
= 0;
4003 timing_out
->v_border_top
= 0;
4004 timing_out
->v_border_bottom
= 0;
4005 /* TODO: un-hardcode */
4006 if (drm_mode_is_420_only(info
, mode_in
)
4007 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4008 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4009 else if (drm_mode_is_420_also(info
, mode_in
)
4010 && aconnector
->force_yuv420_output
)
4011 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4012 else if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
4013 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4014 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
4016 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
4018 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
4019 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
4020 connector
, connector_state
,
4021 (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
));
4022 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
4023 timing_out
->hdmi_vic
= 0;
4026 timing_out
->vic
= old_stream
->timing
.vic
;
4027 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.HSYNC_POSITIVE_POLARITY
;
4028 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.VSYNC_POSITIVE_POLARITY
;
4030 timing_out
->vic
= drm_match_cea_mode(mode_in
);
4031 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
4032 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
4033 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
4034 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
4037 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
4038 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame
, (struct drm_connector
*)connector
, mode_in
);
4039 timing_out
->vic
= avi_frame
.video_code
;
4040 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame
, (struct drm_connector
*)connector
, mode_in
);
4041 timing_out
->hdmi_vic
= hv_frame
.vic
;
4044 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
4045 timing_out
->h_total
= mode_in
->crtc_htotal
;
4046 timing_out
->h_sync_width
=
4047 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
4048 timing_out
->h_front_porch
=
4049 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
4050 timing_out
->v_total
= mode_in
->crtc_vtotal
;
4051 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
4052 timing_out
->v_front_porch
=
4053 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
4054 timing_out
->v_sync_width
=
4055 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
4056 timing_out
->pix_clk_100hz
= mode_in
->crtc_clock
* 10;
4057 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
4059 stream
->output_color_space
= get_output_color_space(timing_out
);
4061 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
4062 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
4063 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
4064 if (!adjust_colour_depth_from_display_info(timing_out
, info
) &&
4065 drm_mode_is_420_also(info
, mode_in
) &&
4066 timing_out
->pixel_encoding
!= PIXEL_ENCODING_YCBCR420
) {
4067 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4068 adjust_colour_depth_from_display_info(timing_out
, info
);
4073 static void fill_audio_info(struct audio_info
*audio_info
,
4074 const struct drm_connector
*drm_connector
,
4075 const struct dc_sink
*dc_sink
)
4078 int cea_revision
= 0;
4079 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
4081 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
4082 audio_info
->product_id
= edid_caps
->product_id
;
4084 cea_revision
= drm_connector
->display_info
.cea_rev
;
4086 strscpy(audio_info
->display_name
,
4087 edid_caps
->display_name
,
4088 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
);
4090 if (cea_revision
>= 3) {
4091 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
4093 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
4094 audio_info
->modes
[i
].format_code
=
4095 (enum audio_format_code
)
4096 (edid_caps
->audio_modes
[i
].format_code
);
4097 audio_info
->modes
[i
].channel_count
=
4098 edid_caps
->audio_modes
[i
].channel_count
;
4099 audio_info
->modes
[i
].sample_rates
.all
=
4100 edid_caps
->audio_modes
[i
].sample_rate
;
4101 audio_info
->modes
[i
].sample_size
=
4102 edid_caps
->audio_modes
[i
].sample_size
;
4106 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
4108 /* TODO: We only check for the progressive mode, check for interlace mode too */
4109 if (drm_connector
->latency_present
[0]) {
4110 audio_info
->video_latency
= drm_connector
->video_latency
[0];
4111 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
4114 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4119 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
4120 struct drm_display_mode
*dst_mode
)
4122 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
4123 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
4124 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
4125 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
4126 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
4127 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
4128 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
4129 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
4130 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
4131 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
4132 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
4133 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
4134 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
4135 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
4139 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
4140 const struct drm_display_mode
*native_mode
,
4143 if (scale_enabled
) {
4144 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
4145 } else if (native_mode
->clock
== drm_mode
->clock
&&
4146 native_mode
->htotal
== drm_mode
->htotal
&&
4147 native_mode
->vtotal
== drm_mode
->vtotal
) {
4148 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
4150 /* no scaling nor amdgpu inserted, no need to patch */
4154 static struct dc_sink
*
4155 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
4157 struct dc_sink_init_data sink_init_data
= { 0 };
4158 struct dc_sink
*sink
= NULL
;
4159 sink_init_data
.link
= aconnector
->dc_link
;
4160 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
4162 sink
= dc_sink_create(&sink_init_data
);
4164 DRM_ERROR("Failed to create sink!\n");
4167 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
4172 static void set_multisync_trigger_params(
4173 struct dc_stream_state
*stream
)
4175 if (stream
->triggered_crtc_reset
.enabled
) {
4176 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
4177 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
4181 static void set_master_stream(struct dc_stream_state
*stream_set
[],
4184 int j
, highest_rfr
= 0, master_stream
= 0;
4186 for (j
= 0; j
< stream_count
; j
++) {
4187 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
4188 int refresh_rate
= 0;
4190 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_100hz
*100)/
4191 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
4192 if (refresh_rate
> highest_rfr
) {
4193 highest_rfr
= refresh_rate
;
4198 for (j
= 0; j
< stream_count
; j
++) {
4200 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
4204 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
4208 if (context
->stream_count
< 2)
4210 for (i
= 0; i
< context
->stream_count
; i
++) {
4211 if (!context
->streams
[i
])
4214 * TODO: add a function to read AMD VSDB bits and set
4215 * crtc_sync_master.multi_sync_enabled flag
4216 * For now it's set to false
4218 set_multisync_trigger_params(context
->streams
[i
]);
4220 set_master_stream(context
->streams
, context
->stream_count
);
4223 static struct dc_stream_state
*
4224 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
4225 const struct drm_display_mode
*drm_mode
,
4226 const struct dm_connector_state
*dm_state
,
4227 const struct dc_stream_state
*old_stream
)
4229 struct drm_display_mode
*preferred_mode
= NULL
;
4230 struct drm_connector
*drm_connector
;
4231 const struct drm_connector_state
*con_state
=
4232 dm_state
? &dm_state
->base
: NULL
;
4233 struct dc_stream_state
*stream
= NULL
;
4234 struct drm_display_mode mode
= *drm_mode
;
4235 bool native_mode_found
= false;
4236 bool scale
= dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false;
4238 int preferred_refresh
= 0;
4239 #if defined(CONFIG_DRM_AMD_DC_DCN)
4240 struct dsc_dec_dpcd_caps dsc_caps
;
4242 uint32_t link_bandwidth_kbps
;
4244 struct dc_sink
*sink
= NULL
;
4245 if (aconnector
== NULL
) {
4246 DRM_ERROR("aconnector is NULL!\n");
4250 drm_connector
= &aconnector
->base
;
4252 if (!aconnector
->dc_sink
) {
4253 sink
= create_fake_sink(aconnector
);
4257 sink
= aconnector
->dc_sink
;
4258 dc_sink_retain(sink
);
4261 stream
= dc_create_stream_for_sink(sink
);
4263 if (stream
== NULL
) {
4264 DRM_ERROR("Failed to create stream for sink!\n");
4268 stream
->dm_stream_context
= aconnector
;
4270 stream
->timing
.flags
.LTE_340MCSC_SCRAMBLE
=
4271 drm_connector
->display_info
.hdmi
.scdc
.scrambling
.low_rates
;
4273 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
4274 /* Search for preferred mode */
4275 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
4276 native_mode_found
= true;
4280 if (!native_mode_found
)
4281 preferred_mode
= list_first_entry_or_null(
4282 &aconnector
->base
.modes
,
4283 struct drm_display_mode
,
4286 mode_refresh
= drm_mode_vrefresh(&mode
);
4288 if (preferred_mode
== NULL
) {
4290 * This may not be an error, the use case is when we have no
4291 * usermode calls to reset and set mode upon hotplug. In this
4292 * case, we call set mode ourselves to restore the previous mode
4293 * and the modelist may not be filled in in time.
4295 DRM_DEBUG_DRIVER("No preferred mode found\n");
4297 decide_crtc_timing_for_drm_display_mode(
4298 &mode
, preferred_mode
,
4299 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
4300 preferred_refresh
= drm_mode_vrefresh(preferred_mode
);
4304 drm_mode_set_crtcinfo(&mode
, 0);
4307 * If scaling is enabled and refresh rate didn't change
4308 * we copy the vic and polarities of the old timings
4310 if (!scale
|| mode_refresh
!= preferred_refresh
)
4311 fill_stream_properties_from_drm_display_mode(stream
,
4312 &mode
, &aconnector
->base
, con_state
, NULL
);
4314 fill_stream_properties_from_drm_display_mode(stream
,
4315 &mode
, &aconnector
->base
, con_state
, old_stream
);
4317 stream
->timing
.flags
.DSC
= 0;
4319 if (aconnector
->dc_link
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4320 #if defined(CONFIG_DRM_AMD_DC_DCN)
4321 dc_dsc_parse_dsc_dpcd(aconnector
->dc_link
->ctx
->dc
,
4322 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_basic_caps
.raw
,
4323 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_ext_caps
.raw
,
4326 link_bandwidth_kbps
= dc_link_bandwidth_kbps(aconnector
->dc_link
,
4327 dc_link_get_link_cap(aconnector
->dc_link
));
4329 #if defined(CONFIG_DRM_AMD_DC_DCN)
4330 if (dsc_caps
.is_dsc_supported
)
4331 if (dc_dsc_compute_config(aconnector
->dc_link
->ctx
->dc
->res_pool
->dscs
[0],
4333 aconnector
->dc_link
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
4334 link_bandwidth_kbps
,
4336 &stream
->timing
.dsc_cfg
))
4337 stream
->timing
.flags
.DSC
= 1;
4341 update_stream_scaling_settings(&mode
, dm_state
, stream
);
4344 &stream
->audio_info
,
4348 update_stream_signal(stream
, sink
);
4350 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4351 mod_build_hf_vsif_infopacket(stream
, &stream
->vsp_infopacket
, false, false);
4352 if (stream
->link
->psr_settings
.psr_feature_enabled
) {
4353 struct dc
*core_dc
= stream
->link
->ctx
->dc
;
4355 if (dc_is_dmcu_initialized(core_dc
)) {
4357 // should decide stream support vsc sdp colorimetry capability
4358 // before building vsc info packet
4360 stream
->use_vsc_sdp_for_colorimetry
= false;
4361 if (aconnector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
4362 stream
->use_vsc_sdp_for_colorimetry
=
4363 aconnector
->dc_sink
->is_vsc_sdp_colorimetry_supported
;
4365 if (stream
->link
->dpcd_caps
.dpcd_rev
.raw
>= 0x14 &&
4366 stream
->link
->dpcd_caps
.dprx_feature
.bits
.VSC_SDP_COLORIMETRY_SUPPORTED
) {
4367 stream
->use_vsc_sdp_for_colorimetry
= true;
4370 mod_build_vsc_infopacket(stream
, &stream
->vsc_infopacket
);
4374 dc_sink_release(sink
);
4379 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
4381 drm_crtc_cleanup(crtc
);
4385 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
4386 struct drm_crtc_state
*state
)
4388 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
4390 /* TODO Destroy dc_stream objects are stream object is flattened */
4392 dc_stream_release(cur
->stream
);
4395 __drm_atomic_helper_crtc_destroy_state(state
);
4401 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
4403 struct dm_crtc_state
*state
;
4406 dm_crtc_destroy_state(crtc
, crtc
->state
);
4408 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4409 if (WARN_ON(!state
))
4412 crtc
->state
= &state
->base
;
4413 crtc
->state
->crtc
= crtc
;
4417 static struct drm_crtc_state
*
4418 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
4420 struct dm_crtc_state
*state
, *cur
;
4422 cur
= to_dm_crtc_state(crtc
->state
);
4424 if (WARN_ON(!crtc
->state
))
4427 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4431 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
4434 state
->stream
= cur
->stream
;
4435 dc_stream_retain(state
->stream
);
4438 state
->active_planes
= cur
->active_planes
;
4439 state
->interrupts_enabled
= cur
->interrupts_enabled
;
4440 state
->vrr_params
= cur
->vrr_params
;
4441 state
->vrr_infopacket
= cur
->vrr_infopacket
;
4442 state
->abm_level
= cur
->abm_level
;
4443 state
->vrr_supported
= cur
->vrr_supported
;
4444 state
->freesync_config
= cur
->freesync_config
;
4445 state
->crc_src
= cur
->crc_src
;
4446 state
->cm_has_degamma
= cur
->cm_has_degamma
;
4447 state
->cm_is_degamma_srgb
= cur
->cm_is_degamma_srgb
;
4449 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4451 return &state
->base
;
4454 static inline int dm_set_vupdate_irq(struct drm_crtc
*crtc
, bool enable
)
4456 enum dc_irq_source irq_source
;
4457 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4458 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4461 /* Do not set vupdate for DCN hardware */
4462 if (adev
->family
> AMDGPU_FAMILY_AI
)
4465 irq_source
= IRQ_TYPE_VUPDATE
+ acrtc
->otg_inst
;
4467 rc
= dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4469 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4470 acrtc
->crtc_id
, enable
? "en" : "dis", rc
);
4474 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
4476 enum dc_irq_source irq_source
;
4477 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4478 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4479 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
4483 /* vblank irq on -> Only need vupdate irq in vrr mode */
4484 if (amdgpu_dm_vrr_active(acrtc_state
))
4485 rc
= dm_set_vupdate_irq(crtc
, true);
4487 /* vblank irq off -> vupdate irq off */
4488 rc
= dm_set_vupdate_irq(crtc
, false);
4494 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
4495 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4498 static int dm_enable_vblank(struct drm_crtc
*crtc
)
4500 return dm_set_vblank(crtc
, true);
4503 static void dm_disable_vblank(struct drm_crtc
*crtc
)
4505 dm_set_vblank(crtc
, false);
4508 /* Implemented only the options currently availible for the driver */
4509 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
4510 .reset
= dm_crtc_reset_state
,
4511 .destroy
= amdgpu_dm_crtc_destroy
,
4512 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
4513 .set_config
= drm_atomic_helper_set_config
,
4514 .page_flip
= drm_atomic_helper_page_flip
,
4515 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
4516 .atomic_destroy_state
= dm_crtc_destroy_state
,
4517 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
4518 .verify_crc_source
= amdgpu_dm_crtc_verify_crc_source
,
4519 .get_crc_sources
= amdgpu_dm_crtc_get_crc_sources
,
4520 .get_vblank_counter
= amdgpu_get_vblank_counter_kms
,
4521 .enable_vblank
= dm_enable_vblank
,
4522 .disable_vblank
= dm_disable_vblank
,
4523 .get_vblank_timestamp
= drm_crtc_vblank_helper_get_vblank_timestamp
,
4526 static enum drm_connector_status
4527 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
4530 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4534 * 1. This interface is NOT called in context of HPD irq.
4535 * 2. This interface *is called* in context of user-mode ioctl. Which
4536 * makes it a bad place for *any* MST-related activity.
4539 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
4540 !aconnector
->fake_enable
)
4541 connected
= (aconnector
->dc_sink
!= NULL
);
4543 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
4545 return (connected
? connector_status_connected
:
4546 connector_status_disconnected
);
4549 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
4550 struct drm_connector_state
*connector_state
,
4551 struct drm_property
*property
,
4554 struct drm_device
*dev
= connector
->dev
;
4555 struct amdgpu_device
*adev
= dev
->dev_private
;
4556 struct dm_connector_state
*dm_old_state
=
4557 to_dm_connector_state(connector
->state
);
4558 struct dm_connector_state
*dm_new_state
=
4559 to_dm_connector_state(connector_state
);
4563 if (property
== dev
->mode_config
.scaling_mode_property
) {
4564 enum amdgpu_rmx_type rmx_type
;
4567 case DRM_MODE_SCALE_CENTER
:
4568 rmx_type
= RMX_CENTER
;
4570 case DRM_MODE_SCALE_ASPECT
:
4571 rmx_type
= RMX_ASPECT
;
4573 case DRM_MODE_SCALE_FULLSCREEN
:
4574 rmx_type
= RMX_FULL
;
4576 case DRM_MODE_SCALE_NONE
:
4582 if (dm_old_state
->scaling
== rmx_type
)
4585 dm_new_state
->scaling
= rmx_type
;
4587 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4588 dm_new_state
->underscan_hborder
= val
;
4590 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4591 dm_new_state
->underscan_vborder
= val
;
4593 } else if (property
== adev
->mode_info
.underscan_property
) {
4594 dm_new_state
->underscan_enable
= val
;
4596 } else if (property
== adev
->mode_info
.abm_level_property
) {
4597 dm_new_state
->abm_level
= val
;
4604 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
4605 const struct drm_connector_state
*state
,
4606 struct drm_property
*property
,
4609 struct drm_device
*dev
= connector
->dev
;
4610 struct amdgpu_device
*adev
= dev
->dev_private
;
4611 struct dm_connector_state
*dm_state
=
4612 to_dm_connector_state(state
);
4615 if (property
== dev
->mode_config
.scaling_mode_property
) {
4616 switch (dm_state
->scaling
) {
4618 *val
= DRM_MODE_SCALE_CENTER
;
4621 *val
= DRM_MODE_SCALE_ASPECT
;
4624 *val
= DRM_MODE_SCALE_FULLSCREEN
;
4628 *val
= DRM_MODE_SCALE_NONE
;
4632 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4633 *val
= dm_state
->underscan_hborder
;
4635 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4636 *val
= dm_state
->underscan_vborder
;
4638 } else if (property
== adev
->mode_info
.underscan_property
) {
4639 *val
= dm_state
->underscan_enable
;
4641 } else if (property
== adev
->mode_info
.abm_level_property
) {
4642 *val
= dm_state
->abm_level
;
4649 static void amdgpu_dm_connector_unregister(struct drm_connector
*connector
)
4651 struct amdgpu_dm_connector
*amdgpu_dm_connector
= to_amdgpu_dm_connector(connector
);
4653 drm_dp_aux_unregister(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4656 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
4658 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4659 const struct dc_link
*link
= aconnector
->dc_link
;
4660 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4661 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4663 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4664 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4666 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
4667 link
->type
!= dc_connection_none
&&
4668 dm
->backlight_dev
) {
4669 backlight_device_unregister(dm
->backlight_dev
);
4670 dm
->backlight_dev
= NULL
;
4674 if (aconnector
->dc_em_sink
)
4675 dc_sink_release(aconnector
->dc_em_sink
);
4676 aconnector
->dc_em_sink
= NULL
;
4677 if (aconnector
->dc_sink
)
4678 dc_sink_release(aconnector
->dc_sink
);
4679 aconnector
->dc_sink
= NULL
;
4681 drm_dp_cec_unregister_connector(&aconnector
->dm_dp_aux
.aux
);
4682 drm_connector_unregister(connector
);
4683 drm_connector_cleanup(connector
);
4684 if (aconnector
->i2c
) {
4685 i2c_del_adapter(&aconnector
->i2c
->base
);
4686 kfree(aconnector
->i2c
);
4688 kfree(aconnector
->dm_dp_aux
.aux
.name
);
4693 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
4695 struct dm_connector_state
*state
=
4696 to_dm_connector_state(connector
->state
);
4698 if (connector
->state
)
4699 __drm_atomic_helper_connector_destroy_state(connector
->state
);
4703 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4706 state
->scaling
= RMX_OFF
;
4707 state
->underscan_enable
= false;
4708 state
->underscan_hborder
= 0;
4709 state
->underscan_vborder
= 0;
4710 state
->base
.max_requested_bpc
= 8;
4711 state
->vcpi_slots
= 0;
4713 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4714 state
->abm_level
= amdgpu_dm_abm_level
;
4716 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
4720 struct drm_connector_state
*
4721 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
4723 struct dm_connector_state
*state
=
4724 to_dm_connector_state(connector
->state
);
4726 struct dm_connector_state
*new_state
=
4727 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
4732 __drm_atomic_helper_connector_duplicate_state(connector
, &new_state
->base
);
4734 new_state
->freesync_capable
= state
->freesync_capable
;
4735 new_state
->abm_level
= state
->abm_level
;
4736 new_state
->scaling
= state
->scaling
;
4737 new_state
->underscan_enable
= state
->underscan_enable
;
4738 new_state
->underscan_hborder
= state
->underscan_hborder
;
4739 new_state
->underscan_vborder
= state
->underscan_vborder
;
4740 new_state
->vcpi_slots
= state
->vcpi_slots
;
4741 new_state
->pbn
= state
->pbn
;
4742 return &new_state
->base
;
4746 amdgpu_dm_connector_late_register(struct drm_connector
*connector
)
4748 #if defined(CONFIG_DEBUG_FS)
4749 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
4750 to_amdgpu_dm_connector(connector
);
4753 if ((connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
) ||
4754 (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)) {
4755 amdgpu_dm_connector
->dm_dp_aux
.aux
.dev
= connector
->kdev
;
4756 r
= drm_dp_aux_register(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4761 connector_debugfs_init(amdgpu_dm_connector
);
4767 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
4768 .reset
= amdgpu_dm_connector_funcs_reset
,
4769 .detect
= amdgpu_dm_connector_detect
,
4770 .fill_modes
= drm_helper_probe_single_connector_modes
,
4771 .destroy
= amdgpu_dm_connector_destroy
,
4772 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
4773 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4774 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
4775 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
,
4776 .late_register
= amdgpu_dm_connector_late_register
,
4777 .early_unregister
= amdgpu_dm_connector_unregister
4780 static int get_modes(struct drm_connector
*connector
)
4782 return amdgpu_dm_connector_get_modes(connector
);
4785 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
4787 struct dc_sink_init_data init_params
= {
4788 .link
= aconnector
->dc_link
,
4789 .sink_signal
= SIGNAL_TYPE_VIRTUAL
4793 if (!aconnector
->base
.edid_blob_ptr
) {
4794 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4795 aconnector
->base
.name
);
4797 aconnector
->base
.force
= DRM_FORCE_OFF
;
4798 aconnector
->base
.override_edid
= false;
4802 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
4804 aconnector
->edid
= edid
;
4806 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
4807 aconnector
->dc_link
,
4809 (edid
->extensions
+ 1) * EDID_LENGTH
,
4812 if (aconnector
->base
.force
== DRM_FORCE_ON
) {
4813 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
4814 aconnector
->dc_link
->local_sink
:
4815 aconnector
->dc_em_sink
;
4816 dc_sink_retain(aconnector
->dc_sink
);
4820 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
4822 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
4825 * In case of headless boot with force on for DP managed connector
4826 * Those settings have to be != 0 to get initial modeset
4828 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4829 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
4830 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
4834 aconnector
->base
.override_edid
= true;
4835 create_eml_sink(aconnector
);
4838 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
4839 struct drm_display_mode
*mode
)
4841 int result
= MODE_ERROR
;
4842 struct dc_sink
*dc_sink
;
4843 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4844 /* TODO: Unhardcode stream count */
4845 struct dc_stream_state
*stream
;
4846 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4847 enum dc_status dc_result
= DC_OK
;
4849 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
4850 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
4854 * Only run this the first time mode_valid is called to initilialize
4857 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
4858 !aconnector
->dc_em_sink
)
4859 handle_edid_mgmt(aconnector
);
4861 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
4863 if (dc_sink
== NULL
) {
4864 DRM_ERROR("dc_sink is NULL!\n");
4868 stream
= create_stream_for_sink(aconnector
, mode
, NULL
, NULL
);
4869 if (stream
== NULL
) {
4870 DRM_ERROR("Failed to create stream for sink!\n");
4874 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
4876 if (dc_result
== DC_OK
)
4879 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4885 dc_stream_release(stream
);
4888 /* TODO: error handling*/
4892 static int fill_hdr_info_packet(const struct drm_connector_state
*state
,
4893 struct dc_info_packet
*out
)
4895 struct hdmi_drm_infoframe frame
;
4896 unsigned char buf
[30]; /* 26 + 4 */
4900 memset(out
, 0, sizeof(*out
));
4902 if (!state
->hdr_output_metadata
)
4905 ret
= drm_hdmi_infoframe_set_hdr_metadata(&frame
, state
);
4909 len
= hdmi_drm_infoframe_pack_only(&frame
, buf
, sizeof(buf
));
4913 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4917 /* Prepare the infopacket for DC. */
4918 switch (state
->connector
->connector_type
) {
4919 case DRM_MODE_CONNECTOR_HDMIA
:
4920 out
->hb0
= 0x87; /* type */
4921 out
->hb1
= 0x01; /* version */
4922 out
->hb2
= 0x1A; /* length */
4923 out
->sb
[0] = buf
[3]; /* checksum */
4927 case DRM_MODE_CONNECTOR_DisplayPort
:
4928 case DRM_MODE_CONNECTOR_eDP
:
4929 out
->hb0
= 0x00; /* sdp id, zero */
4930 out
->hb1
= 0x87; /* type */
4931 out
->hb2
= 0x1D; /* payload len - 1 */
4932 out
->hb3
= (0x13 << 2); /* sdp version */
4933 out
->sb
[0] = 0x01; /* version */
4934 out
->sb
[1] = 0x1A; /* length */
4942 memcpy(&out
->sb
[i
], &buf
[4], 26);
4945 print_hex_dump(KERN_DEBUG
, "HDR SB:", DUMP_PREFIX_NONE
, 16, 1, out
->sb
,
4946 sizeof(out
->sb
), false);
4952 is_hdr_metadata_different(const struct drm_connector_state
*old_state
,
4953 const struct drm_connector_state
*new_state
)
4955 struct drm_property_blob
*old_blob
= old_state
->hdr_output_metadata
;
4956 struct drm_property_blob
*new_blob
= new_state
->hdr_output_metadata
;
4958 if (old_blob
!= new_blob
) {
4959 if (old_blob
&& new_blob
&&
4960 old_blob
->length
== new_blob
->length
)
4961 return memcmp(old_blob
->data
, new_blob
->data
,
4971 amdgpu_dm_connector_atomic_check(struct drm_connector
*conn
,
4972 struct drm_atomic_state
*state
)
4974 struct drm_connector_state
*new_con_state
=
4975 drm_atomic_get_new_connector_state(state
, conn
);
4976 struct drm_connector_state
*old_con_state
=
4977 drm_atomic_get_old_connector_state(state
, conn
);
4978 struct drm_crtc
*crtc
= new_con_state
->crtc
;
4979 struct drm_crtc_state
*new_crtc_state
;
4985 if (is_hdr_metadata_different(old_con_state
, new_con_state
)) {
4986 struct dc_info_packet hdr_infopacket
;
4988 ret
= fill_hdr_info_packet(new_con_state
, &hdr_infopacket
);
4992 new_crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
4993 if (IS_ERR(new_crtc_state
))
4994 return PTR_ERR(new_crtc_state
);
4997 * DC considers the stream backends changed if the
4998 * static metadata changes. Forcing the modeset also
4999 * gives a simple way for userspace to switch from
5000 * 8bpc to 10bpc when setting the metadata to enter
5003 * Changing the static metadata after it's been
5004 * set is permissible, however. So only force a
5005 * modeset if we're entering or exiting HDR.
5007 new_crtc_state
->mode_changed
=
5008 !old_con_state
->hdr_output_metadata
||
5009 !new_con_state
->hdr_output_metadata
;
5015 static const struct drm_connector_helper_funcs
5016 amdgpu_dm_connector_helper_funcs
= {
5018 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5019 * modes will be filtered by drm_mode_validate_size(), and those modes
5020 * are missing after user start lightdm. So we need to renew modes list.
5021 * in get_modes call back, not just return the modes count
5023 .get_modes
= get_modes
,
5024 .mode_valid
= amdgpu_dm_connector_mode_valid
,
5025 .atomic_check
= amdgpu_dm_connector_atomic_check
,
5028 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
5032 static bool does_crtc_have_active_cursor(struct drm_crtc_state
*new_crtc_state
)
5034 struct drm_device
*dev
= new_crtc_state
->crtc
->dev
;
5035 struct drm_plane
*plane
;
5037 drm_for_each_plane_mask(plane
, dev
, new_crtc_state
->plane_mask
) {
5038 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5045 static int count_crtc_active_planes(struct drm_crtc_state
*new_crtc_state
)
5047 struct drm_atomic_state
*state
= new_crtc_state
->state
;
5048 struct drm_plane
*plane
;
5051 drm_for_each_plane_mask(plane
, state
->dev
, new_crtc_state
->plane_mask
) {
5052 struct drm_plane_state
*new_plane_state
;
5054 /* Cursor planes are "fake". */
5055 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5058 new_plane_state
= drm_atomic_get_new_plane_state(state
, plane
);
5060 if (!new_plane_state
) {
5062 * The plane is enable on the CRTC and hasn't changed
5063 * state. This means that it previously passed
5064 * validation and is therefore enabled.
5070 /* We need a framebuffer to be considered enabled. */
5071 num_active
+= (new_plane_state
->fb
!= NULL
);
5078 * Sets whether interrupts should be enabled on a specific CRTC.
5079 * We require that the stream be enabled and that there exist active
5080 * DC planes on the stream.
5083 dm_update_crtc_interrupt_state(struct drm_crtc
*crtc
,
5084 struct drm_crtc_state
*new_crtc_state
)
5086 struct dm_crtc_state
*dm_new_crtc_state
=
5087 to_dm_crtc_state(new_crtc_state
);
5089 dm_new_crtc_state
->active_planes
= 0;
5090 dm_new_crtc_state
->interrupts_enabled
= false;
5092 if (!dm_new_crtc_state
->stream
)
5095 dm_new_crtc_state
->active_planes
=
5096 count_crtc_active_planes(new_crtc_state
);
5098 dm_new_crtc_state
->interrupts_enabled
=
5099 dm_new_crtc_state
->active_planes
> 0;
5102 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
5103 struct drm_crtc_state
*state
)
5105 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
5106 struct dc
*dc
= adev
->dm
.dc
;
5107 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
5111 * Update interrupt state for the CRTC. This needs to happen whenever
5112 * the CRTC has changed or whenever any of its planes have changed.
5113 * Atomic check satisfies both of these requirements since the CRTC
5114 * is added to the state by DRM during drm_atomic_helper_check_planes.
5116 dm_update_crtc_interrupt_state(crtc
, state
);
5118 if (unlikely(!dm_crtc_state
->stream
&&
5119 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
5124 /* In some use cases, like reset, no stream is attached */
5125 if (!dm_crtc_state
->stream
)
5129 * We want at least one hardware plane enabled to use
5130 * the stream with a cursor enabled.
5132 if (state
->enable
&& state
->active
&&
5133 does_crtc_have_active_cursor(state
) &&
5134 dm_crtc_state
->active_planes
== 0)
5137 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
5143 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
5144 const struct drm_display_mode
*mode
,
5145 struct drm_display_mode
*adjusted_mode
)
5150 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
5151 .disable
= dm_crtc_helper_disable
,
5152 .atomic_check
= dm_crtc_helper_atomic_check
,
5153 .mode_fixup
= dm_crtc_helper_mode_fixup
,
5154 .get_scanout_position
= amdgpu_crtc_get_scanout_position
,
5157 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
5162 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth
)
5164 switch (display_color_depth
) {
5165 case COLOR_DEPTH_666
:
5167 case COLOR_DEPTH_888
:
5169 case COLOR_DEPTH_101010
:
5171 case COLOR_DEPTH_121212
:
5173 case COLOR_DEPTH_141414
:
5175 case COLOR_DEPTH_161616
:
5183 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
5184 struct drm_crtc_state
*crtc_state
,
5185 struct drm_connector_state
*conn_state
)
5187 struct drm_atomic_state
*state
= crtc_state
->state
;
5188 struct drm_connector
*connector
= conn_state
->connector
;
5189 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5190 struct dm_connector_state
*dm_new_connector_state
= to_dm_connector_state(conn_state
);
5191 const struct drm_display_mode
*adjusted_mode
= &crtc_state
->adjusted_mode
;
5192 struct drm_dp_mst_topology_mgr
*mst_mgr
;
5193 struct drm_dp_mst_port
*mst_port
;
5194 enum dc_color_depth color_depth
;
5196 bool is_y420
= false;
5198 if (!aconnector
->port
|| !aconnector
->dc_sink
)
5201 mst_port
= aconnector
->port
;
5202 mst_mgr
= &aconnector
->mst_port
->mst_mgr
;
5204 if (!crtc_state
->connectors_changed
&& !crtc_state
->mode_changed
)
5207 if (!state
->duplicated
) {
5208 is_y420
= drm_mode_is_420_also(&connector
->display_info
, adjusted_mode
) &&
5209 aconnector
->force_yuv420_output
;
5210 color_depth
= convert_color_depth_from_display_info(connector
, conn_state
,
5212 bpp
= convert_dc_color_depth_into_bpc(color_depth
) * 3;
5213 clock
= adjusted_mode
->clock
;
5214 dm_new_connector_state
->pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, false);
5216 dm_new_connector_state
->vcpi_slots
= drm_dp_atomic_find_vcpi_slots(state
,
5219 dm_new_connector_state
->pbn
,
5221 if (dm_new_connector_state
->vcpi_slots
< 0) {
5222 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state
->vcpi_slots
);
5223 return dm_new_connector_state
->vcpi_slots
;
5228 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
5229 .disable
= dm_encoder_helper_disable
,
5230 .atomic_check
= dm_encoder_helper_atomic_check
5233 #if defined(CONFIG_DRM_AMD_DC_DCN)
5234 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state
*state
,
5235 struct dc_state
*dc_state
)
5237 struct dc_stream_state
*stream
= NULL
;
5238 struct drm_connector
*connector
;
5239 struct drm_connector_state
*new_con_state
, *old_con_state
;
5240 struct amdgpu_dm_connector
*aconnector
;
5241 struct dm_connector_state
*dm_conn_state
;
5242 int i
, j
, clock
, bpp
;
5243 int vcpi
, pbn_div
, pbn
= 0;
5245 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
5247 aconnector
= to_amdgpu_dm_connector(connector
);
5249 if (!aconnector
->port
)
5252 if (!new_con_state
|| !new_con_state
->crtc
)
5255 dm_conn_state
= to_dm_connector_state(new_con_state
);
5257 for (j
= 0; j
< dc_state
->stream_count
; j
++) {
5258 stream
= dc_state
->streams
[j
];
5262 if ((struct amdgpu_dm_connector
*)stream
->dm_stream_context
== aconnector
)
5271 if (stream
->timing
.flags
.DSC
!= 1) {
5272 drm_dp_mst_atomic_enable_dsc(state
,
5280 pbn_div
= dm_mst_get_pbn_divider(stream
->link
);
5281 bpp
= stream
->timing
.dsc_cfg
.bits_per_pixel
;
5282 clock
= stream
->timing
.pix_clk_100hz
/ 10;
5283 pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, true);
5284 vcpi
= drm_dp_mst_atomic_enable_dsc(state
,
5291 dm_conn_state
->pbn
= pbn
;
5292 dm_conn_state
->vcpi_slots
= vcpi
;
5298 static void dm_drm_plane_reset(struct drm_plane
*plane
)
5300 struct dm_plane_state
*amdgpu_state
= NULL
;
5303 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
5305 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
5306 WARN_ON(amdgpu_state
== NULL
);
5309 __drm_atomic_helper_plane_reset(plane
, &amdgpu_state
->base
);
5312 static struct drm_plane_state
*
5313 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
5315 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
5317 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
5318 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
5319 if (!dm_plane_state
)
5322 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
5324 if (old_dm_plane_state
->dc_state
) {
5325 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
5326 dc_plane_state_retain(dm_plane_state
->dc_state
);
5329 return &dm_plane_state
->base
;
5332 void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
5333 struct drm_plane_state
*state
)
5335 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
5337 if (dm_plane_state
->dc_state
)
5338 dc_plane_state_release(dm_plane_state
->dc_state
);
5340 drm_atomic_helper_plane_destroy_state(plane
, state
);
5343 static const struct drm_plane_funcs dm_plane_funcs
= {
5344 .update_plane
= drm_atomic_helper_update_plane
,
5345 .disable_plane
= drm_atomic_helper_disable_plane
,
5346 .destroy
= drm_primary_helper_destroy
,
5347 .reset
= dm_drm_plane_reset
,
5348 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
5349 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
5352 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
5353 struct drm_plane_state
*new_state
)
5355 struct amdgpu_framebuffer
*afb
;
5356 struct drm_gem_object
*obj
;
5357 struct amdgpu_device
*adev
;
5358 struct amdgpu_bo
*rbo
;
5359 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
5360 struct list_head list
;
5361 struct ttm_validate_buffer tv
;
5362 struct ww_acquire_ctx ticket
;
5363 uint64_t tiling_flags
;
5366 bool tmz_surface
= false;
5367 bool force_disable_dcc
= false;
5369 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
5370 dm_plane_state_new
= to_dm_plane_state(new_state
);
5372 if (!new_state
->fb
) {
5373 DRM_DEBUG_DRIVER("No FB bound\n");
5377 afb
= to_amdgpu_framebuffer(new_state
->fb
);
5378 obj
= new_state
->fb
->obj
[0];
5379 rbo
= gem_to_amdgpu_bo(obj
);
5380 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
5381 INIT_LIST_HEAD(&list
);
5385 list_add(&tv
.head
, &list
);
5387 r
= ttm_eu_reserve_buffers(&ticket
, &list
, false, NULL
);
5389 dev_err(adev
->dev
, "fail to reserve bo (%d)\n", r
);
5393 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5394 domain
= amdgpu_display_supported_domains(adev
, rbo
->flags
);
5396 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
5398 r
= amdgpu_bo_pin(rbo
, domain
);
5399 if (unlikely(r
!= 0)) {
5400 if (r
!= -ERESTARTSYS
)
5401 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
5402 ttm_eu_backoff_reservation(&ticket
, &list
);
5406 r
= amdgpu_ttm_alloc_gart(&rbo
->tbo
);
5407 if (unlikely(r
!= 0)) {
5408 amdgpu_bo_unpin(rbo
);
5409 ttm_eu_backoff_reservation(&ticket
, &list
);
5410 DRM_ERROR("%p bind failed\n", rbo
);
5414 amdgpu_bo_get_tiling_flags(rbo
, &tiling_flags
);
5416 tmz_surface
= amdgpu_bo_encrypted(rbo
);
5418 ttm_eu_backoff_reservation(&ticket
, &list
);
5420 afb
->address
= amdgpu_bo_gpu_offset(rbo
);
5424 if (dm_plane_state_new
->dc_state
&&
5425 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
5426 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
5428 force_disable_dcc
= adev
->asic_type
== CHIP_RAVEN
&& adev
->in_suspend
;
5429 fill_plane_buffer_attributes(
5430 adev
, afb
, plane_state
->format
, plane_state
->rotation
,
5431 tiling_flags
, &plane_state
->tiling_info
,
5432 &plane_state
->plane_size
, &plane_state
->dcc
,
5433 &plane_state
->address
, tmz_surface
,
5440 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
5441 struct drm_plane_state
*old_state
)
5443 struct amdgpu_bo
*rbo
;
5449 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
5450 r
= amdgpu_bo_reserve(rbo
, false);
5452 DRM_ERROR("failed to reserve rbo before unpin\n");
5456 amdgpu_bo_unpin(rbo
);
5457 amdgpu_bo_unreserve(rbo
);
5458 amdgpu_bo_unref(&rbo
);
5461 static int dm_plane_atomic_check(struct drm_plane
*plane
,
5462 struct drm_plane_state
*state
)
5464 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
5465 struct dc
*dc
= adev
->dm
.dc
;
5466 struct dm_plane_state
*dm_plane_state
;
5467 struct dc_scaling_info scaling_info
;
5470 dm_plane_state
= to_dm_plane_state(state
);
5472 if (!dm_plane_state
->dc_state
)
5475 ret
= fill_dc_scaling_info(state
, &scaling_info
);
5479 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
5485 static int dm_plane_atomic_async_check(struct drm_plane
*plane
,
5486 struct drm_plane_state
*new_plane_state
)
5488 /* Only support async updates on cursor planes. */
5489 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5495 static void dm_plane_atomic_async_update(struct drm_plane
*plane
,
5496 struct drm_plane_state
*new_state
)
5498 struct drm_plane_state
*old_state
=
5499 drm_atomic_get_old_plane_state(new_state
->state
, plane
);
5501 swap(plane
->state
->fb
, new_state
->fb
);
5503 plane
->state
->src_x
= new_state
->src_x
;
5504 plane
->state
->src_y
= new_state
->src_y
;
5505 plane
->state
->src_w
= new_state
->src_w
;
5506 plane
->state
->src_h
= new_state
->src_h
;
5507 plane
->state
->crtc_x
= new_state
->crtc_x
;
5508 plane
->state
->crtc_y
= new_state
->crtc_y
;
5509 plane
->state
->crtc_w
= new_state
->crtc_w
;
5510 plane
->state
->crtc_h
= new_state
->crtc_h
;
5512 handle_cursor_update(plane
, old_state
);
5515 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
5516 .prepare_fb
= dm_plane_helper_prepare_fb
,
5517 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
5518 .atomic_check
= dm_plane_atomic_check
,
5519 .atomic_async_check
= dm_plane_atomic_async_check
,
5520 .atomic_async_update
= dm_plane_atomic_async_update
5524 * TODO: these are currently initialized to rgb formats only.
5525 * For future use cases we should either initialize them dynamically based on
5526 * plane capabilities, or initialize this array to all formats, so internal drm
5527 * check will succeed, and let DC implement proper check
5529 static const uint32_t rgb_formats
[] = {
5530 DRM_FORMAT_XRGB8888
,
5531 DRM_FORMAT_ARGB8888
,
5532 DRM_FORMAT_RGBA8888
,
5533 DRM_FORMAT_XRGB2101010
,
5534 DRM_FORMAT_XBGR2101010
,
5535 DRM_FORMAT_ARGB2101010
,
5536 DRM_FORMAT_ABGR2101010
,
5537 DRM_FORMAT_XBGR8888
,
5538 DRM_FORMAT_ABGR8888
,
5542 static const uint32_t overlay_formats
[] = {
5543 DRM_FORMAT_XRGB8888
,
5544 DRM_FORMAT_ARGB8888
,
5545 DRM_FORMAT_RGBA8888
,
5546 DRM_FORMAT_XBGR8888
,
5547 DRM_FORMAT_ABGR8888
,
5551 static const u32 cursor_formats
[] = {
5555 static int get_plane_formats(const struct drm_plane
*plane
,
5556 const struct dc_plane_cap
*plane_cap
,
5557 uint32_t *formats
, int max_formats
)
5559 int i
, num_formats
= 0;
5562 * TODO: Query support for each group of formats directly from
5563 * DC plane caps. This will require adding more formats to the
5567 switch (plane
->type
) {
5568 case DRM_PLANE_TYPE_PRIMARY
:
5569 for (i
= 0; i
< ARRAY_SIZE(rgb_formats
); ++i
) {
5570 if (num_formats
>= max_formats
)
5573 formats
[num_formats
++] = rgb_formats
[i
];
5576 if (plane_cap
&& plane_cap
->pixel_format_support
.nv12
)
5577 formats
[num_formats
++] = DRM_FORMAT_NV12
;
5578 if (plane_cap
&& plane_cap
->pixel_format_support
.p010
)
5579 formats
[num_formats
++] = DRM_FORMAT_P010
;
5582 case DRM_PLANE_TYPE_OVERLAY
:
5583 for (i
= 0; i
< ARRAY_SIZE(overlay_formats
); ++i
) {
5584 if (num_formats
>= max_formats
)
5587 formats
[num_formats
++] = overlay_formats
[i
];
5591 case DRM_PLANE_TYPE_CURSOR
:
5592 for (i
= 0; i
< ARRAY_SIZE(cursor_formats
); ++i
) {
5593 if (num_formats
>= max_formats
)
5596 formats
[num_formats
++] = cursor_formats
[i
];
5604 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
5605 struct drm_plane
*plane
,
5606 unsigned long possible_crtcs
,
5607 const struct dc_plane_cap
*plane_cap
)
5609 uint32_t formats
[32];
5613 num_formats
= get_plane_formats(plane
, plane_cap
, formats
,
5614 ARRAY_SIZE(formats
));
5616 res
= drm_universal_plane_init(dm
->adev
->ddev
, plane
, possible_crtcs
,
5617 &dm_plane_funcs
, formats
, num_formats
,
5618 NULL
, plane
->type
, NULL
);
5622 if (plane
->type
== DRM_PLANE_TYPE_OVERLAY
&&
5623 plane_cap
&& plane_cap
->per_pixel_alpha
) {
5624 unsigned int blend_caps
= BIT(DRM_MODE_BLEND_PIXEL_NONE
) |
5625 BIT(DRM_MODE_BLEND_PREMULTI
);
5627 drm_plane_create_alpha_property(plane
);
5628 drm_plane_create_blend_mode_property(plane
, blend_caps
);
5631 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
&&
5633 (plane_cap
->pixel_format_support
.nv12
||
5634 plane_cap
->pixel_format_support
.p010
)) {
5635 /* This only affects YUV formats. */
5636 drm_plane_create_color_properties(
5638 BIT(DRM_COLOR_YCBCR_BT601
) |
5639 BIT(DRM_COLOR_YCBCR_BT709
) |
5640 BIT(DRM_COLOR_YCBCR_BT2020
),
5641 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE
) |
5642 BIT(DRM_COLOR_YCBCR_FULL_RANGE
),
5643 DRM_COLOR_YCBCR_BT709
, DRM_COLOR_YCBCR_LIMITED_RANGE
);
5646 drm_plane_helper_add(plane
, &dm_plane_helper_funcs
);
5648 /* Create (reset) the plane state */
5649 if (plane
->funcs
->reset
)
5650 plane
->funcs
->reset(plane
);
5655 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
5656 struct drm_plane
*plane
,
5657 uint32_t crtc_index
)
5659 struct amdgpu_crtc
*acrtc
= NULL
;
5660 struct drm_plane
*cursor_plane
;
5664 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
5668 cursor_plane
->type
= DRM_PLANE_TYPE_CURSOR
;
5669 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0, NULL
);
5671 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
5675 res
= drm_crtc_init_with_planes(
5680 &amdgpu_dm_crtc_funcs
, NULL
);
5685 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
5687 /* Create (reset) the plane state */
5688 if (acrtc
->base
.funcs
->reset
)
5689 acrtc
->base
.funcs
->reset(&acrtc
->base
);
5691 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
5692 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
5694 acrtc
->crtc_id
= crtc_index
;
5695 acrtc
->base
.enabled
= false;
5696 acrtc
->otg_inst
= -1;
5698 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
5699 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
5700 true, MAX_COLOR_LUT_ENTRIES
);
5701 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
5707 kfree(cursor_plane
);
5712 static int to_drm_connector_type(enum signal_type st
)
5715 case SIGNAL_TYPE_HDMI_TYPE_A
:
5716 return DRM_MODE_CONNECTOR_HDMIA
;
5717 case SIGNAL_TYPE_EDP
:
5718 return DRM_MODE_CONNECTOR_eDP
;
5719 case SIGNAL_TYPE_LVDS
:
5720 return DRM_MODE_CONNECTOR_LVDS
;
5721 case SIGNAL_TYPE_RGB
:
5722 return DRM_MODE_CONNECTOR_VGA
;
5723 case SIGNAL_TYPE_DISPLAY_PORT
:
5724 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
5725 return DRM_MODE_CONNECTOR_DisplayPort
;
5726 case SIGNAL_TYPE_DVI_DUAL_LINK
:
5727 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
5728 return DRM_MODE_CONNECTOR_DVID
;
5729 case SIGNAL_TYPE_VIRTUAL
:
5730 return DRM_MODE_CONNECTOR_VIRTUAL
;
5733 return DRM_MODE_CONNECTOR_Unknown
;
5737 static struct drm_encoder
*amdgpu_dm_connector_to_encoder(struct drm_connector
*connector
)
5739 struct drm_encoder
*encoder
;
5741 /* There is only one encoder per connector */
5742 drm_connector_for_each_possible_encoder(connector
, encoder
)
5748 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
5750 struct drm_encoder
*encoder
;
5751 struct amdgpu_encoder
*amdgpu_encoder
;
5753 encoder
= amdgpu_dm_connector_to_encoder(connector
);
5755 if (encoder
== NULL
)
5758 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5760 amdgpu_encoder
->native_mode
.clock
= 0;
5762 if (!list_empty(&connector
->probed_modes
)) {
5763 struct drm_display_mode
*preferred_mode
= NULL
;
5765 list_for_each_entry(preferred_mode
,
5766 &connector
->probed_modes
,
5768 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
5769 amdgpu_encoder
->native_mode
= *preferred_mode
;
5777 static struct drm_display_mode
*
5778 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
5780 int hdisplay
, int vdisplay
)
5782 struct drm_device
*dev
= encoder
->dev
;
5783 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5784 struct drm_display_mode
*mode
= NULL
;
5785 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
5787 mode
= drm_mode_duplicate(dev
, native_mode
);
5792 mode
->hdisplay
= hdisplay
;
5793 mode
->vdisplay
= vdisplay
;
5794 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
5795 strscpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
5801 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
5802 struct drm_connector
*connector
)
5804 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
5805 struct drm_display_mode
*mode
= NULL
;
5806 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
5807 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5808 to_amdgpu_dm_connector(connector
);
5812 char name
[DRM_DISPLAY_MODE_LEN
];
5815 } common_modes
[] = {
5816 { "640x480", 640, 480},
5817 { "800x600", 800, 600},
5818 { "1024x768", 1024, 768},
5819 { "1280x720", 1280, 720},
5820 { "1280x800", 1280, 800},
5821 {"1280x1024", 1280, 1024},
5822 { "1440x900", 1440, 900},
5823 {"1680x1050", 1680, 1050},
5824 {"1600x1200", 1600, 1200},
5825 {"1920x1080", 1920, 1080},
5826 {"1920x1200", 1920, 1200}
5829 n
= ARRAY_SIZE(common_modes
);
5831 for (i
= 0; i
< n
; i
++) {
5832 struct drm_display_mode
*curmode
= NULL
;
5833 bool mode_existed
= false;
5835 if (common_modes
[i
].w
> native_mode
->hdisplay
||
5836 common_modes
[i
].h
> native_mode
->vdisplay
||
5837 (common_modes
[i
].w
== native_mode
->hdisplay
&&
5838 common_modes
[i
].h
== native_mode
->vdisplay
))
5841 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
5842 if (common_modes
[i
].w
== curmode
->hdisplay
&&
5843 common_modes
[i
].h
== curmode
->vdisplay
) {
5844 mode_existed
= true;
5852 mode
= amdgpu_dm_create_common_mode(encoder
,
5853 common_modes
[i
].name
, common_modes
[i
].w
,
5855 drm_mode_probed_add(connector
, mode
);
5856 amdgpu_dm_connector
->num_modes
++;
5860 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
5863 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5864 to_amdgpu_dm_connector(connector
);
5867 /* empty probed_modes */
5868 INIT_LIST_HEAD(&connector
->probed_modes
);
5869 amdgpu_dm_connector
->num_modes
=
5870 drm_add_edid_modes(connector
, edid
);
5872 /* sorting the probed modes before calling function
5873 * amdgpu_dm_get_native_mode() since EDID can have
5874 * more than one preferred mode. The modes that are
5875 * later in the probed mode list could be of higher
5876 * and preferred resolution. For example, 3840x2160
5877 * resolution in base EDID preferred timing and 4096x2160
5878 * preferred resolution in DID extension block later.
5880 drm_mode_sort(&connector
->probed_modes
);
5881 amdgpu_dm_get_native_mode(connector
);
5883 amdgpu_dm_connector
->num_modes
= 0;
5887 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
5889 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5890 to_amdgpu_dm_connector(connector
);
5891 struct drm_encoder
*encoder
;
5892 struct edid
*edid
= amdgpu_dm_connector
->edid
;
5894 encoder
= amdgpu_dm_connector_to_encoder(connector
);
5896 if (!edid
|| !drm_edid_is_valid(edid
)) {
5897 amdgpu_dm_connector
->num_modes
=
5898 drm_add_modes_noedid(connector
, 640, 480);
5900 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
5901 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
5903 amdgpu_dm_fbc_init(connector
);
5905 return amdgpu_dm_connector
->num_modes
;
5908 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
5909 struct amdgpu_dm_connector
*aconnector
,
5911 struct dc_link
*link
,
5914 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
5917 * Some of the properties below require access to state, like bpc.
5918 * Allocate some default initial connector state with our reset helper.
5920 if (aconnector
->base
.funcs
->reset
)
5921 aconnector
->base
.funcs
->reset(&aconnector
->base
);
5923 aconnector
->connector_id
= link_index
;
5924 aconnector
->dc_link
= link
;
5925 aconnector
->base
.interlace_allowed
= false;
5926 aconnector
->base
.doublescan_allowed
= false;
5927 aconnector
->base
.stereo_allowed
= false;
5928 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
5929 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
5930 aconnector
->audio_inst
= -1;
5931 mutex_init(&aconnector
->hpd_lock
);
5934 * configure support HPD hot plug connector_>polled default value is 0
5935 * which means HPD hot plug not supported
5937 switch (connector_type
) {
5938 case DRM_MODE_CONNECTOR_HDMIA
:
5939 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5940 aconnector
->base
.ycbcr_420_allowed
=
5941 link
->link_enc
->features
.hdmi_ycbcr420_supported
? true : false;
5943 case DRM_MODE_CONNECTOR_DisplayPort
:
5944 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5945 aconnector
->base
.ycbcr_420_allowed
=
5946 link
->link_enc
->features
.dp_ycbcr420_supported
? true : false;
5948 case DRM_MODE_CONNECTOR_DVID
:
5949 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
5955 drm_object_attach_property(&aconnector
->base
.base
,
5956 dm
->ddev
->mode_config
.scaling_mode_property
,
5957 DRM_MODE_SCALE_NONE
);
5959 drm_object_attach_property(&aconnector
->base
.base
,
5960 adev
->mode_info
.underscan_property
,
5962 drm_object_attach_property(&aconnector
->base
.base
,
5963 adev
->mode_info
.underscan_hborder_property
,
5965 drm_object_attach_property(&aconnector
->base
.base
,
5966 adev
->mode_info
.underscan_vborder_property
,
5969 if (!aconnector
->mst_port
)
5970 drm_connector_attach_max_bpc_property(&aconnector
->base
, 8, 16);
5972 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5973 aconnector
->base
.state
->max_bpc
= (connector_type
== DRM_MODE_CONNECTOR_eDP
) ? 16 : 8;
5974 aconnector
->base
.state
->max_requested_bpc
= aconnector
->base
.state
->max_bpc
;
5976 if (connector_type
== DRM_MODE_CONNECTOR_eDP
&&
5977 dc_is_dmcu_initialized(adev
->dm
.dc
)) {
5978 drm_object_attach_property(&aconnector
->base
.base
,
5979 adev
->mode_info
.abm_level_property
, 0);
5982 if (connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
5983 connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
5984 connector_type
== DRM_MODE_CONNECTOR_eDP
) {
5985 drm_object_attach_property(
5986 &aconnector
->base
.base
,
5987 dm
->ddev
->mode_config
.hdr_output_metadata_property
, 0);
5989 if (!aconnector
->mst_port
)
5990 drm_connector_attach_vrr_capable_property(&aconnector
->base
);
5992 #ifdef CONFIG_DRM_AMD_DC_HDCP
5993 if (adev
->dm
.hdcp_workqueue
)
5994 drm_connector_attach_content_protection_property(&aconnector
->base
, true);
5999 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
6000 struct i2c_msg
*msgs
, int num
)
6002 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
6003 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
6004 struct i2c_command cmd
;
6008 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
6013 cmd
.number_of_payloads
= num
;
6014 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
6017 for (i
= 0; i
< num
; i
++) {
6018 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
6019 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
6020 cmd
.payloads
[i
].length
= msgs
[i
].len
;
6021 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
6025 ddc_service
->ctx
->dc
,
6026 ddc_service
->ddc_pin
->hw_info
.ddc_channel
,
6030 kfree(cmd
.payloads
);
6034 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
6036 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
6039 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
6040 .master_xfer
= amdgpu_dm_i2c_xfer
,
6041 .functionality
= amdgpu_dm_i2c_func
,
6044 static struct amdgpu_i2c_adapter
*
6045 create_i2c(struct ddc_service
*ddc_service
,
6049 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
6050 struct amdgpu_i2c_adapter
*i2c
;
6052 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
6055 i2c
->base
.owner
= THIS_MODULE
;
6056 i2c
->base
.class = I2C_CLASS_DDC
;
6057 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
6058 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
6059 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
6060 i2c_set_adapdata(&i2c
->base
, i2c
);
6061 i2c
->ddc_service
= ddc_service
;
6062 i2c
->ddc_service
->ddc_pin
->hw_info
.ddc_channel
= link_index
;
6069 * Note: this function assumes that dc_link_detect() was called for the
6070 * dc_link which will be represented by this aconnector.
6072 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
6073 struct amdgpu_dm_connector
*aconnector
,
6074 uint32_t link_index
,
6075 struct amdgpu_encoder
*aencoder
)
6079 struct dc
*dc
= dm
->dc
;
6080 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
6081 struct amdgpu_i2c_adapter
*i2c
;
6083 link
->priv
= aconnector
;
6085 DRM_DEBUG_DRIVER("%s()\n", __func__
);
6087 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
6089 DRM_ERROR("Failed to create i2c adapter data\n");
6093 aconnector
->i2c
= i2c
;
6094 res
= i2c_add_adapter(&i2c
->base
);
6097 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
6101 connector_type
= to_drm_connector_type(link
->connector_signal
);
6103 res
= drm_connector_init_with_ddc(
6106 &amdgpu_dm_connector_funcs
,
6111 DRM_ERROR("connector_init failed\n");
6112 aconnector
->connector_id
= -1;
6116 drm_connector_helper_add(
6118 &amdgpu_dm_connector_helper_funcs
);
6120 amdgpu_dm_connector_init_helper(
6127 drm_connector_attach_encoder(
6128 &aconnector
->base
, &aencoder
->base
);
6130 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
6131 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
6132 amdgpu_dm_initialize_dp_connector(dm
, aconnector
, link
->link_index
);
6137 aconnector
->i2c
= NULL
;
6142 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
6144 switch (adev
->mode_info
.num_crtc
) {
6161 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
6162 struct amdgpu_encoder
*aencoder
,
6163 uint32_t link_index
)
6165 struct amdgpu_device
*adev
= dev
->dev_private
;
6167 int res
= drm_encoder_init(dev
,
6169 &amdgpu_dm_encoder_funcs
,
6170 DRM_MODE_ENCODER_TMDS
,
6173 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
6176 aencoder
->encoder_id
= link_index
;
6178 aencoder
->encoder_id
= -1;
6180 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
6185 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
6186 struct amdgpu_crtc
*acrtc
,
6190 * this is not correct translation but will work as soon as VBLANK
6191 * constant is the same as PFLIP
6194 amdgpu_display_crtc_idx_to_irq_type(
6199 drm_crtc_vblank_on(&acrtc
->base
);
6202 &adev
->pageflip_irq
,
6208 &adev
->pageflip_irq
,
6210 drm_crtc_vblank_off(&acrtc
->base
);
6215 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
6216 const struct dm_connector_state
*old_dm_state
)
6218 if (dm_state
->scaling
!= old_dm_state
->scaling
)
6220 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
6221 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
6223 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
6224 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
6226 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
6227 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
6232 #ifdef CONFIG_DRM_AMD_DC_HDCP
6233 static bool is_content_protection_different(struct drm_connector_state
*state
,
6234 const struct drm_connector_state
*old_state
,
6235 const struct drm_connector
*connector
, struct hdcp_workqueue
*hdcp_w
)
6237 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
6239 if (old_state
->hdcp_content_type
!= state
->hdcp_content_type
&&
6240 state
->content_protection
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
6241 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6245 /* CP is being re enabled, ignore this */
6246 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
&&
6247 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
) {
6248 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
6252 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6253 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
&&
6254 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
6255 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6257 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6258 * hot-plug, headless s3, dpms
6260 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&& connector
->dpms
== DRM_MODE_DPMS_ON
&&
6261 aconnector
->dc_sink
!= NULL
)
6264 if (old_state
->content_protection
== state
->content_protection
)
6267 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
6274 static void remove_stream(struct amdgpu_device
*adev
,
6275 struct amdgpu_crtc
*acrtc
,
6276 struct dc_stream_state
*stream
)
6278 /* this is the update mode case */
6280 acrtc
->otg_inst
= -1;
6281 acrtc
->enabled
= false;
6284 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
6285 struct dc_cursor_position
*position
)
6287 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6289 int xorigin
= 0, yorigin
= 0;
6291 position
->enable
= false;
6295 if (!crtc
|| !plane
->state
->fb
)
6298 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
6299 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
6300 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6302 plane
->state
->crtc_w
,
6303 plane
->state
->crtc_h
);
6307 x
= plane
->state
->crtc_x
;
6308 y
= plane
->state
->crtc_y
;
6310 if (x
<= -amdgpu_crtc
->max_cursor_width
||
6311 y
<= -amdgpu_crtc
->max_cursor_height
)
6315 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
6319 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
6322 position
->enable
= true;
6323 position
->translate_by_source
= true;
6326 position
->x_hotspot
= xorigin
;
6327 position
->y_hotspot
= yorigin
;
6332 static void handle_cursor_update(struct drm_plane
*plane
,
6333 struct drm_plane_state
*old_plane_state
)
6335 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
6336 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
6337 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
6338 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
6339 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6340 uint64_t address
= afb
? afb
->address
: 0;
6341 struct dc_cursor_position position
;
6342 struct dc_cursor_attributes attributes
;
6345 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
6348 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6350 amdgpu_crtc
->crtc_id
,
6351 plane
->state
->crtc_w
,
6352 plane
->state
->crtc_h
);
6354 ret
= get_cursor_position(plane
, crtc
, &position
);
6358 if (!position
.enable
) {
6359 /* turn off cursor */
6360 if (crtc_state
&& crtc_state
->stream
) {
6361 mutex_lock(&adev
->dm
.dc_lock
);
6362 dc_stream_set_cursor_position(crtc_state
->stream
,
6364 mutex_unlock(&adev
->dm
.dc_lock
);
6369 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
6370 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
6372 memset(&attributes
, 0, sizeof(attributes
));
6373 attributes
.address
.high_part
= upper_32_bits(address
);
6374 attributes
.address
.low_part
= lower_32_bits(address
);
6375 attributes
.width
= plane
->state
->crtc_w
;
6376 attributes
.height
= plane
->state
->crtc_h
;
6377 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
6378 attributes
.rotation_angle
= 0;
6379 attributes
.attribute_flags
.value
= 0;
6381 attributes
.pitch
= attributes
.width
;
6383 if (crtc_state
->stream
) {
6384 mutex_lock(&adev
->dm
.dc_lock
);
6385 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
6387 DRM_ERROR("DC failed to set cursor attributes\n");
6389 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
6391 DRM_ERROR("DC failed to set cursor position\n");
6392 mutex_unlock(&adev
->dm
.dc_lock
);
6396 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
6399 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
6400 WARN_ON(acrtc
->event
);
6402 acrtc
->event
= acrtc
->base
.state
->event
;
6404 /* Set the flip status */
6405 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
6407 /* Mark this event as consumed */
6408 acrtc
->base
.state
->event
= NULL
;
6410 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6414 static void update_freesync_state_on_stream(
6415 struct amdgpu_display_manager
*dm
,
6416 struct dm_crtc_state
*new_crtc_state
,
6417 struct dc_stream_state
*new_stream
,
6418 struct dc_plane_state
*surface
,
6419 u32 flip_timestamp_in_us
)
6421 struct mod_vrr_params vrr_params
;
6422 struct dc_info_packet vrr_infopacket
= {0};
6423 struct amdgpu_device
*adev
= dm
->adev
;
6424 unsigned long flags
;
6430 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6431 * For now it's sufficient to just guard against these conditions.
6434 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6437 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6438 vrr_params
= new_crtc_state
->vrr_params
;
6441 mod_freesync_handle_preflip(
6442 dm
->freesync_module
,
6445 flip_timestamp_in_us
,
6448 if (adev
->family
< AMDGPU_FAMILY_AI
&&
6449 amdgpu_dm_vrr_active(new_crtc_state
)) {
6450 mod_freesync_handle_v_update(dm
->freesync_module
,
6451 new_stream
, &vrr_params
);
6453 /* Need to call this before the frame ends. */
6454 dc_stream_adjust_vmin_vmax(dm
->dc
,
6455 new_crtc_state
->stream
,
6456 &vrr_params
.adjust
);
6460 mod_freesync_build_vrr_infopacket(
6461 dm
->freesync_module
,
6465 TRANSFER_FUNC_UNKNOWN
,
6468 new_crtc_state
->freesync_timing_changed
|=
6469 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6471 sizeof(vrr_params
.adjust
)) != 0);
6473 new_crtc_state
->freesync_vrr_info_changed
|=
6474 (memcmp(&new_crtc_state
->vrr_infopacket
,
6476 sizeof(vrr_infopacket
)) != 0);
6478 new_crtc_state
->vrr_params
= vrr_params
;
6479 new_crtc_state
->vrr_infopacket
= vrr_infopacket
;
6481 new_stream
->adjust
= new_crtc_state
->vrr_params
.adjust
;
6482 new_stream
->vrr_infopacket
= vrr_infopacket
;
6484 if (new_crtc_state
->freesync_vrr_info_changed
)
6485 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6486 new_crtc_state
->base
.crtc
->base
.id
,
6487 (int)new_crtc_state
->base
.vrr_enabled
,
6488 (int)vrr_params
.state
);
6490 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6493 static void pre_update_freesync_state_on_stream(
6494 struct amdgpu_display_manager
*dm
,
6495 struct dm_crtc_state
*new_crtc_state
)
6497 struct dc_stream_state
*new_stream
= new_crtc_state
->stream
;
6498 struct mod_vrr_params vrr_params
;
6499 struct mod_freesync_config config
= new_crtc_state
->freesync_config
;
6500 struct amdgpu_device
*adev
= dm
->adev
;
6501 unsigned long flags
;
6507 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6508 * For now it's sufficient to just guard against these conditions.
6510 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6513 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6514 vrr_params
= new_crtc_state
->vrr_params
;
6516 if (new_crtc_state
->vrr_supported
&&
6517 config
.min_refresh_in_uhz
&&
6518 config
.max_refresh_in_uhz
) {
6519 config
.state
= new_crtc_state
->base
.vrr_enabled
?
6520 VRR_STATE_ACTIVE_VARIABLE
:
6523 config
.state
= VRR_STATE_UNSUPPORTED
;
6526 mod_freesync_build_vrr_params(dm
->freesync_module
,
6528 &config
, &vrr_params
);
6530 new_crtc_state
->freesync_timing_changed
|=
6531 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6533 sizeof(vrr_params
.adjust
)) != 0);
6535 new_crtc_state
->vrr_params
= vrr_params
;
6536 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6539 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state
*old_state
,
6540 struct dm_crtc_state
*new_state
)
6542 bool old_vrr_active
= amdgpu_dm_vrr_active(old_state
);
6543 bool new_vrr_active
= amdgpu_dm_vrr_active(new_state
);
6545 if (!old_vrr_active
&& new_vrr_active
) {
6546 /* Transition VRR inactive -> active:
6547 * While VRR is active, we must not disable vblank irq, as a
6548 * reenable after disable would compute bogus vblank/pflip
6549 * timestamps if it likely happened inside display front-porch.
6551 * We also need vupdate irq for the actual core vblank handling
6554 dm_set_vupdate_irq(new_state
->base
.crtc
, true);
6555 drm_crtc_vblank_get(new_state
->base
.crtc
);
6556 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6557 __func__
, new_state
->base
.crtc
->base
.id
);
6558 } else if (old_vrr_active
&& !new_vrr_active
) {
6559 /* Transition VRR active -> inactive:
6560 * Allow vblank irq disable again for fixed refresh rate.
6562 dm_set_vupdate_irq(new_state
->base
.crtc
, false);
6563 drm_crtc_vblank_put(new_state
->base
.crtc
);
6564 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6565 __func__
, new_state
->base
.crtc
->base
.id
);
6569 static void amdgpu_dm_commit_cursors(struct drm_atomic_state
*state
)
6571 struct drm_plane
*plane
;
6572 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6576 * TODO: Make this per-stream so we don't issue redundant updates for
6577 * commits with multiple streams.
6579 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
,
6581 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6582 handle_cursor_update(plane
, old_plane_state
);
6585 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
6586 struct dc_state
*dc_state
,
6587 struct drm_device
*dev
,
6588 struct amdgpu_display_manager
*dm
,
6589 struct drm_crtc
*pcrtc
,
6590 bool wait_for_vblank
)
6593 uint64_t timestamp_ns
;
6594 struct drm_plane
*plane
;
6595 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6596 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
6597 struct drm_crtc_state
*new_pcrtc_state
=
6598 drm_atomic_get_new_crtc_state(state
, pcrtc
);
6599 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
6600 struct dm_crtc_state
*dm_old_crtc_state
=
6601 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
6602 int planes_count
= 0, vpos
, hpos
;
6604 unsigned long flags
;
6605 struct amdgpu_bo
*abo
;
6606 uint64_t tiling_flags
;
6607 bool tmz_surface
= false;
6608 uint32_t target_vblank
, last_flip_vblank
;
6609 bool vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
6610 bool pflip_present
= false;
6612 struct dc_surface_update surface_updates
[MAX_SURFACES
];
6613 struct dc_plane_info plane_infos
[MAX_SURFACES
];
6614 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
6615 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
6616 struct dc_stream_update stream_update
;
6619 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
6622 dm_error("Failed to allocate update bundle\n");
6627 * Disable the cursor first if we're disabling all the planes.
6628 * It'll remain on the screen after the planes are re-enabled
6631 if (acrtc_state
->active_planes
== 0)
6632 amdgpu_dm_commit_cursors(state
);
6634 /* update planes when needed */
6635 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
6636 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
6637 struct drm_crtc_state
*new_crtc_state
;
6638 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
6639 bool plane_needs_flip
;
6640 struct dc_plane_state
*dc_plane
;
6641 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
6643 /* Cursor plane is handled after stream updates */
6644 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6647 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
6650 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
6651 if (!new_crtc_state
->active
)
6654 dc_plane
= dm_new_plane_state
->dc_state
;
6656 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
6657 if (new_pcrtc_state
->color_mgmt_changed
) {
6658 bundle
->surface_updates
[planes_count
].gamma
= dc_plane
->gamma_correction
;
6659 bundle
->surface_updates
[planes_count
].in_transfer_func
= dc_plane
->in_transfer_func
;
6660 bundle
->surface_updates
[planes_count
].gamut_remap_matrix
= &dc_plane
->gamut_remap_matrix
;
6663 fill_dc_scaling_info(new_plane_state
,
6664 &bundle
->scaling_infos
[planes_count
]);
6666 bundle
->surface_updates
[planes_count
].scaling_info
=
6667 &bundle
->scaling_infos
[planes_count
];
6669 plane_needs_flip
= old_plane_state
->fb
&& new_plane_state
->fb
;
6671 pflip_present
= pflip_present
|| plane_needs_flip
;
6673 if (!plane_needs_flip
) {
6678 abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
6681 * Wait for all fences on this FB. Do limited wait to avoid
6682 * deadlock during GPU reset when this fence will not signal
6683 * but we hold reservation lock for the BO.
6685 r
= dma_resv_wait_timeout_rcu(abo
->tbo
.base
.resv
, true,
6687 msecs_to_jiffies(5000));
6688 if (unlikely(r
<= 0))
6689 DRM_ERROR("Waiting for fences timed out!");
6692 * TODO This might fail and hence better not used, wait
6693 * explicitly on fences instead
6694 * and in general should be called for
6695 * blocking commit to as per framework helpers
6697 r
= amdgpu_bo_reserve(abo
, true);
6698 if (unlikely(r
!= 0))
6699 DRM_ERROR("failed to reserve buffer before flip\n");
6701 amdgpu_bo_get_tiling_flags(abo
, &tiling_flags
);
6703 tmz_surface
= amdgpu_bo_encrypted(abo
);
6705 amdgpu_bo_unreserve(abo
);
6707 fill_dc_plane_info_and_addr(
6708 dm
->adev
, new_plane_state
, tiling_flags
,
6709 &bundle
->plane_infos
[planes_count
],
6710 &bundle
->flip_addrs
[planes_count
].address
,
6714 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6715 new_plane_state
->plane
->index
,
6716 bundle
->plane_infos
[planes_count
].dcc
.enable
);
6718 bundle
->surface_updates
[planes_count
].plane_info
=
6719 &bundle
->plane_infos
[planes_count
];
6722 * Only allow immediate flips for fast updates that don't
6723 * change FB pitch, DCC state, rotation or mirroing.
6725 bundle
->flip_addrs
[planes_count
].flip_immediate
=
6726 crtc
->state
->async_flip
&&
6727 acrtc_state
->update_type
== UPDATE_TYPE_FAST
;
6729 timestamp_ns
= ktime_get_ns();
6730 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
= div_u64(timestamp_ns
, 1000);
6731 bundle
->surface_updates
[planes_count
].flip_addr
= &bundle
->flip_addrs
[planes_count
];
6732 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
6734 if (!bundle
->surface_updates
[planes_count
].surface
) {
6735 DRM_ERROR("No surface for CRTC: id=%d\n",
6736 acrtc_attach
->crtc_id
);
6740 if (plane
== pcrtc
->primary
)
6741 update_freesync_state_on_stream(
6744 acrtc_state
->stream
,
6746 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
);
6748 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6750 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.high_part
,
6751 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.low_part
);
6757 if (pflip_present
) {
6759 /* Use old throttling in non-vrr fixed refresh rate mode
6760 * to keep flip scheduling based on target vblank counts
6761 * working in a backwards compatible way, e.g., for
6762 * clients using the GLX_OML_sync_control extension or
6763 * DRI3/Present extension with defined target_msc.
6765 last_flip_vblank
= amdgpu_get_vblank_counter_kms(pcrtc
);
6768 /* For variable refresh rate mode only:
6769 * Get vblank of last completed flip to avoid > 1 vrr
6770 * flips per video frame by use of throttling, but allow
6771 * flip programming anywhere in the possibly large
6772 * variable vrr vblank interval for fine-grained flip
6773 * timing control and more opportunity to avoid stutter
6774 * on late submission of flips.
6776 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6777 last_flip_vblank
= acrtc_attach
->last_flip_vblank
;
6778 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6781 target_vblank
= last_flip_vblank
+ wait_for_vblank
;
6784 * Wait until we're out of the vertical blank period before the one
6785 * targeted by the flip
6787 while ((acrtc_attach
->enabled
&&
6788 (amdgpu_display_get_crtc_scanoutpos(dm
->ddev
, acrtc_attach
->crtc_id
,
6789 0, &vpos
, &hpos
, NULL
,
6790 NULL
, &pcrtc
->hwmode
)
6791 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
6792 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
6793 (int)(target_vblank
-
6794 amdgpu_get_vblank_counter_kms(pcrtc
)) > 0)) {
6795 usleep_range(1000, 1100);
6798 if (acrtc_attach
->base
.state
->event
) {
6799 drm_crtc_vblank_get(pcrtc
);
6801 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6803 WARN_ON(acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
);
6804 prepare_flip_isr(acrtc_attach
);
6806 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6809 if (acrtc_state
->stream
) {
6810 if (acrtc_state
->freesync_vrr_info_changed
)
6811 bundle
->stream_update
.vrr_infopacket
=
6812 &acrtc_state
->stream
->vrr_infopacket
;
6816 /* Update the planes if changed or disable if we don't have any. */
6817 if ((planes_count
|| acrtc_state
->active_planes
== 0) &&
6818 acrtc_state
->stream
) {
6819 bundle
->stream_update
.stream
= acrtc_state
->stream
;
6820 if (new_pcrtc_state
->mode_changed
) {
6821 bundle
->stream_update
.src
= acrtc_state
->stream
->src
;
6822 bundle
->stream_update
.dst
= acrtc_state
->stream
->dst
;
6825 if (new_pcrtc_state
->color_mgmt_changed
) {
6827 * TODO: This isn't fully correct since we've actually
6828 * already modified the stream in place.
6830 bundle
->stream_update
.gamut_remap
=
6831 &acrtc_state
->stream
->gamut_remap_matrix
;
6832 bundle
->stream_update
.output_csc_transform
=
6833 &acrtc_state
->stream
->csc_color_matrix
;
6834 bundle
->stream_update
.out_transfer_func
=
6835 acrtc_state
->stream
->out_transfer_func
;
6838 acrtc_state
->stream
->abm_level
= acrtc_state
->abm_level
;
6839 if (acrtc_state
->abm_level
!= dm_old_crtc_state
->abm_level
)
6840 bundle
->stream_update
.abm_level
= &acrtc_state
->abm_level
;
6843 * If FreeSync state on the stream has changed then we need to
6844 * re-adjust the min/max bounds now that DC doesn't handle this
6845 * as part of commit.
6847 if (amdgpu_dm_vrr_active(dm_old_crtc_state
) !=
6848 amdgpu_dm_vrr_active(acrtc_state
)) {
6849 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
6850 dc_stream_adjust_vmin_vmax(
6851 dm
->dc
, acrtc_state
->stream
,
6852 &acrtc_state
->vrr_params
.adjust
);
6853 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
6855 mutex_lock(&dm
->dc_lock
);
6856 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
6857 acrtc_state
->stream
->link
->psr_settings
.psr_allow_active
)
6858 amdgpu_dm_psr_disable(acrtc_state
->stream
);
6860 dc_commit_updates_for_stream(dm
->dc
,
6861 bundle
->surface_updates
,
6863 acrtc_state
->stream
,
6864 &bundle
->stream_update
,
6867 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
6868 acrtc_state
->stream
->link
->psr_settings
.psr_version
!= PSR_VERSION_UNSUPPORTED
&&
6869 !acrtc_state
->stream
->link
->psr_settings
.psr_feature_enabled
)
6870 amdgpu_dm_link_setup_psr(acrtc_state
->stream
);
6871 else if ((acrtc_state
->update_type
== UPDATE_TYPE_FAST
) &&
6872 acrtc_state
->stream
->link
->psr_settings
.psr_feature_enabled
&&
6873 !acrtc_state
->stream
->link
->psr_settings
.psr_allow_active
) {
6874 amdgpu_dm_psr_enable(acrtc_state
->stream
);
6877 mutex_unlock(&dm
->dc_lock
);
6881 * Update cursor state *after* programming all the planes.
6882 * This avoids redundant programming in the case where we're going
6883 * to be disabling a single plane - those pipes are being disabled.
6885 if (acrtc_state
->active_planes
)
6886 amdgpu_dm_commit_cursors(state
);
6892 static void amdgpu_dm_commit_audio(struct drm_device
*dev
,
6893 struct drm_atomic_state
*state
)
6895 struct amdgpu_device
*adev
= dev
->dev_private
;
6896 struct amdgpu_dm_connector
*aconnector
;
6897 struct drm_connector
*connector
;
6898 struct drm_connector_state
*old_con_state
, *new_con_state
;
6899 struct drm_crtc_state
*new_crtc_state
;
6900 struct dm_crtc_state
*new_dm_crtc_state
;
6901 const struct dc_stream_status
*status
;
6904 /* Notify device removals. */
6905 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
6906 if (old_con_state
->crtc
!= new_con_state
->crtc
) {
6907 /* CRTC changes require notification. */
6911 if (!new_con_state
->crtc
)
6914 new_crtc_state
= drm_atomic_get_new_crtc_state(
6915 state
, new_con_state
->crtc
);
6917 if (!new_crtc_state
)
6920 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6924 aconnector
= to_amdgpu_dm_connector(connector
);
6926 mutex_lock(&adev
->dm
.audio_lock
);
6927 inst
= aconnector
->audio_inst
;
6928 aconnector
->audio_inst
= -1;
6929 mutex_unlock(&adev
->dm
.audio_lock
);
6931 amdgpu_dm_audio_eld_notify(adev
, inst
);
6934 /* Notify audio device additions. */
6935 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
6936 if (!new_con_state
->crtc
)
6939 new_crtc_state
= drm_atomic_get_new_crtc_state(
6940 state
, new_con_state
->crtc
);
6942 if (!new_crtc_state
)
6945 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
6948 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
6949 if (!new_dm_crtc_state
->stream
)
6952 status
= dc_stream_get_status(new_dm_crtc_state
->stream
);
6956 aconnector
= to_amdgpu_dm_connector(connector
);
6958 mutex_lock(&adev
->dm
.audio_lock
);
6959 inst
= status
->audio_inst
;
6960 aconnector
->audio_inst
= inst
;
6961 mutex_unlock(&adev
->dm
.audio_lock
);
6963 amdgpu_dm_audio_eld_notify(adev
, inst
);
6968 * Enable interrupts on CRTCs that are newly active, undergone
6969 * a modeset, or have active planes again.
6971 * Done in two passes, based on the for_modeset flag:
6972 * Pass 1: For CRTCs going through modeset
6973 * Pass 2: For CRTCs going from 0 to n active planes
6975 * Interrupts can only be enabled after the planes are programmed,
6976 * so this requires a two-pass approach since we don't want to
6977 * just defer the interrupts until after commit planes every time.
6979 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device
*dev
,
6980 struct drm_atomic_state
*state
,
6983 struct amdgpu_device
*adev
= dev
->dev_private
;
6984 struct drm_crtc
*crtc
;
6985 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
6987 #ifdef CONFIG_DEBUG_FS
6988 enum amdgpu_dm_pipe_crc_source source
;
6991 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
6992 new_crtc_state
, i
) {
6993 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
6994 struct dm_crtc_state
*dm_new_crtc_state
=
6995 to_dm_crtc_state(new_crtc_state
);
6996 struct dm_crtc_state
*dm_old_crtc_state
=
6997 to_dm_crtc_state(old_crtc_state
);
6998 bool modeset
= drm_atomic_crtc_needs_modeset(new_crtc_state
);
7001 run_pass
= (for_modeset
&& modeset
) ||
7002 (!for_modeset
&& !modeset
&&
7003 !dm_old_crtc_state
->interrupts_enabled
);
7008 if (!dm_new_crtc_state
->interrupts_enabled
)
7011 manage_dm_interrupts(adev
, acrtc
, true);
7013 #ifdef CONFIG_DEBUG_FS
7014 /* The stream has changed so CRC capture needs to re-enabled. */
7015 source
= dm_new_crtc_state
->crc_src
;
7016 if (amdgpu_dm_is_valid_crc_source(source
)) {
7017 amdgpu_dm_crtc_configure_crc_source(
7018 crtc
, dm_new_crtc_state
,
7019 dm_new_crtc_state
->crc_src
);
7026 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7027 * @crtc_state: the DRM CRTC state
7028 * @stream_state: the DC stream state.
7030 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7031 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7033 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
7034 struct dc_stream_state
*stream_state
)
7036 stream_state
->mode_changed
= drm_atomic_crtc_needs_modeset(crtc_state
);
7039 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
7040 struct drm_atomic_state
*state
,
7043 struct drm_crtc
*crtc
;
7044 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7045 struct amdgpu_device
*adev
= dev
->dev_private
;
7049 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7050 * a modeset, being disabled, or have no active planes.
7052 * It's done in atomic commit rather than commit tail for now since
7053 * some of these interrupt handlers access the current CRTC state and
7054 * potentially the stream pointer itself.
7056 * Since the atomic state is swapped within atomic commit and not within
7057 * commit tail this would leave to new state (that hasn't been committed yet)
7058 * being accesssed from within the handlers.
7060 * TODO: Fix this so we can do this in commit tail and not have to block
7063 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7064 struct dm_crtc_state
*dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7065 struct dm_crtc_state
*dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7066 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7068 if (dm_old_crtc_state
->interrupts_enabled
&&
7069 (!dm_new_crtc_state
->interrupts_enabled
||
7070 drm_atomic_crtc_needs_modeset(new_crtc_state
)))
7071 manage_dm_interrupts(adev
, acrtc
, false);
7074 * Add check here for SoC's that support hardware cursor plane, to
7075 * unset legacy_cursor_update
7078 return drm_atomic_helper_commit(dev
, state
, nonblock
);
7080 /*TODO Handle EINTR, reenable IRQ*/
7084 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7085 * @state: The atomic state to commit
7087 * This will tell DC to commit the constructed DC state from atomic_check,
7088 * programming the hardware. Any failures here implies a hardware failure, since
7089 * atomic check should have filtered anything non-kosher.
7091 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
7093 struct drm_device
*dev
= state
->dev
;
7094 struct amdgpu_device
*adev
= dev
->dev_private
;
7095 struct amdgpu_display_manager
*dm
= &adev
->dm
;
7096 struct dm_atomic_state
*dm_state
;
7097 struct dc_state
*dc_state
= NULL
, *dc_state_temp
= NULL
;
7099 struct drm_crtc
*crtc
;
7100 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7101 unsigned long flags
;
7102 bool wait_for_vblank
= true;
7103 struct drm_connector
*connector
;
7104 struct drm_connector_state
*old_con_state
, *new_con_state
;
7105 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7106 int crtc_disable_count
= 0;
7108 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
7110 dm_state
= dm_atomic_get_new_state(state
);
7111 if (dm_state
&& dm_state
->context
) {
7112 dc_state
= dm_state
->context
;
7114 /* No state changes, retain current state. */
7115 dc_state_temp
= dc_create_state(dm
->dc
);
7116 ASSERT(dc_state_temp
);
7117 dc_state
= dc_state_temp
;
7118 dc_resource_state_copy_construct_current(dm
->dc
, dc_state
);
7121 /* update changed items */
7122 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7123 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7125 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7126 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7129 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7130 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7131 "connectors_changed:%d\n",
7133 new_crtc_state
->enable
,
7134 new_crtc_state
->active
,
7135 new_crtc_state
->planes_changed
,
7136 new_crtc_state
->mode_changed
,
7137 new_crtc_state
->active_changed
,
7138 new_crtc_state
->connectors_changed
);
7140 /* Copy all transient state flags into dc state */
7141 if (dm_new_crtc_state
->stream
) {
7142 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
7143 dm_new_crtc_state
->stream
);
7146 /* handles headless hotplug case, updating new_state and
7147 * aconnector as needed
7150 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
7152 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
7154 if (!dm_new_crtc_state
->stream
) {
7156 * this could happen because of issues with
7157 * userspace notifications delivery.
7158 * In this case userspace tries to set mode on
7159 * display which is disconnected in fact.
7160 * dc_sink is NULL in this case on aconnector.
7161 * We expect reset mode will come soon.
7163 * This can also happen when unplug is done
7164 * during resume sequence ended
7166 * In this case, we want to pretend we still
7167 * have a sink to keep the pipe running so that
7168 * hw state is consistent with the sw state
7170 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7171 __func__
, acrtc
->base
.base
.id
);
7175 if (dm_old_crtc_state
->stream
)
7176 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
7178 pm_runtime_get_noresume(dev
->dev
);
7180 acrtc
->enabled
= true;
7181 acrtc
->hw_mode
= new_crtc_state
->mode
;
7182 crtc
->hwmode
= new_crtc_state
->mode
;
7183 } else if (modereset_required(new_crtc_state
)) {
7184 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
7185 /* i.e. reset mode */
7186 if (dm_old_crtc_state
->stream
) {
7187 if (dm_old_crtc_state
->stream
->link
->psr_settings
.psr_allow_active
)
7188 amdgpu_dm_psr_disable(dm_old_crtc_state
->stream
);
7190 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
7193 } /* for_each_crtc_in_state() */
7196 dm_enable_per_frame_crtc_master_sync(dc_state
);
7197 mutex_lock(&dm
->dc_lock
);
7198 WARN_ON(!dc_commit_state(dm
->dc
, dc_state
));
7199 mutex_unlock(&dm
->dc_lock
);
7202 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7203 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7205 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7207 if (dm_new_crtc_state
->stream
!= NULL
) {
7208 const struct dc_stream_status
*status
=
7209 dc_stream_get_status(dm_new_crtc_state
->stream
);
7212 status
= dc_stream_get_status_from_state(dc_state
,
7213 dm_new_crtc_state
->stream
);
7216 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
7218 acrtc
->otg_inst
= status
->primary_otg_inst
;
7221 #ifdef CONFIG_DRM_AMD_DC_HDCP
7222 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7223 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7224 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7225 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7227 new_crtc_state
= NULL
;
7230 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
7232 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7234 if (dm_new_crtc_state
&& dm_new_crtc_state
->stream
== NULL
&&
7235 connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
) {
7236 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
7237 new_con_state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
7241 if (is_content_protection_different(new_con_state
, old_con_state
, connector
, adev
->dm
.hdcp_workqueue
))
7242 hdcp_update_display(
7243 adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
, aconnector
,
7244 new_con_state
->hdcp_content_type
,
7245 new_con_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
? true
7250 /* Handle connector state changes */
7251 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7252 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7253 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
7254 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7255 struct dc_surface_update dummy_updates
[MAX_SURFACES
];
7256 struct dc_stream_update stream_update
;
7257 struct dc_info_packet hdr_packet
;
7258 struct dc_stream_status
*status
= NULL
;
7259 bool abm_changed
, hdr_changed
, scaling_changed
;
7261 memset(&dummy_updates
, 0, sizeof(dummy_updates
));
7262 memset(&stream_update
, 0, sizeof(stream_update
));
7265 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
7266 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
7269 /* Skip any modesets/resets */
7270 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
7273 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7274 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7276 scaling_changed
= is_scaling_state_different(dm_new_con_state
,
7279 abm_changed
= dm_new_crtc_state
->abm_level
!=
7280 dm_old_crtc_state
->abm_level
;
7283 is_hdr_metadata_different(old_con_state
, new_con_state
);
7285 if (!scaling_changed
&& !abm_changed
&& !hdr_changed
)
7288 stream_update
.stream
= dm_new_crtc_state
->stream
;
7289 if (scaling_changed
) {
7290 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
7291 dm_new_con_state
, dm_new_crtc_state
->stream
);
7293 stream_update
.src
= dm_new_crtc_state
->stream
->src
;
7294 stream_update
.dst
= dm_new_crtc_state
->stream
->dst
;
7298 dm_new_crtc_state
->stream
->abm_level
= dm_new_crtc_state
->abm_level
;
7300 stream_update
.abm_level
= &dm_new_crtc_state
->abm_level
;
7304 fill_hdr_info_packet(new_con_state
, &hdr_packet
);
7305 stream_update
.hdr_static_metadata
= &hdr_packet
;
7308 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
7310 WARN_ON(!status
->plane_count
);
7313 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7314 * Here we create an empty update on each plane.
7315 * To fix this, DC should permit updating only stream properties.
7317 for (j
= 0; j
< status
->plane_count
; j
++)
7318 dummy_updates
[j
].surface
= status
->plane_states
[0];
7321 mutex_lock(&dm
->dc_lock
);
7322 dc_commit_updates_for_stream(dm
->dc
,
7324 status
->plane_count
,
7325 dm_new_crtc_state
->stream
,
7328 mutex_unlock(&dm
->dc_lock
);
7331 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7332 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
7333 new_crtc_state
, i
) {
7334 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
7335 crtc_disable_count
++;
7337 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7338 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7340 /* Update freesync active state. */
7341 pre_update_freesync_state_on_stream(dm
, dm_new_crtc_state
);
7343 /* Handle vrr on->off / off->on transitions */
7344 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state
,
7348 /* Enable interrupts for CRTCs going through a modeset. */
7349 amdgpu_dm_enable_crtc_interrupts(dev
, state
, true);
7351 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
)
7352 if (new_crtc_state
->async_flip
)
7353 wait_for_vblank
= false;
7355 /* update planes when needed per crtc*/
7356 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
7357 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7359 if (dm_new_crtc_state
->stream
)
7360 amdgpu_dm_commit_planes(state
, dc_state
, dev
,
7361 dm
, crtc
, wait_for_vblank
);
7364 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7365 amdgpu_dm_enable_crtc_interrupts(dev
, state
, false);
7367 /* Update audio instances for each connector. */
7368 amdgpu_dm_commit_audio(dev
, state
);
7371 * send vblank event on all events not handled in flip and
7372 * mark consumed event for drm_atomic_helper_commit_hw_done
7374 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
7375 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7377 if (new_crtc_state
->event
)
7378 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
7380 new_crtc_state
->event
= NULL
;
7382 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
7384 /* Signal HW programming completion */
7385 drm_atomic_helper_commit_hw_done(state
);
7387 if (wait_for_vblank
)
7388 drm_atomic_helper_wait_for_flip_done(dev
, state
);
7390 drm_atomic_helper_cleanup_planes(dev
, state
);
7393 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7394 * so we can put the GPU into runtime suspend if we're not driving any
7397 for (i
= 0; i
< crtc_disable_count
; i
++)
7398 pm_runtime_put_autosuspend(dev
->dev
);
7399 pm_runtime_mark_last_busy(dev
->dev
);
7402 dc_release_state(dc_state_temp
);
7406 static int dm_force_atomic_commit(struct drm_connector
*connector
)
7409 struct drm_device
*ddev
= connector
->dev
;
7410 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
7411 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7412 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
7413 struct drm_connector_state
*conn_state
;
7414 struct drm_crtc_state
*crtc_state
;
7415 struct drm_plane_state
*plane_state
;
7420 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
7422 /* Construct an atomic state to restore previous display setting */
7425 * Attach connectors to drm_atomic_state
7427 conn_state
= drm_atomic_get_connector_state(state
, connector
);
7429 ret
= PTR_ERR_OR_ZERO(conn_state
);
7433 /* Attach crtc to drm_atomic_state*/
7434 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
7436 ret
= PTR_ERR_OR_ZERO(crtc_state
);
7440 /* force a restore */
7441 crtc_state
->mode_changed
= true;
7443 /* Attach plane to drm_atomic_state */
7444 plane_state
= drm_atomic_get_plane_state(state
, plane
);
7446 ret
= PTR_ERR_OR_ZERO(plane_state
);
7451 /* Call commit internally with the state we just constructed */
7452 ret
= drm_atomic_commit(state
);
7457 DRM_ERROR("Restoring old state failed with %i\n", ret
);
7458 drm_atomic_state_put(state
);
7464 * This function handles all cases when set mode does not come upon hotplug.
7465 * This includes when a display is unplugged then plugged back into the
7466 * same port and when running without usermode desktop manager supprot
7468 void dm_restore_drm_connector_state(struct drm_device
*dev
,
7469 struct drm_connector
*connector
)
7471 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7472 struct amdgpu_crtc
*disconnected_acrtc
;
7473 struct dm_crtc_state
*acrtc_state
;
7475 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
7478 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7479 if (!disconnected_acrtc
)
7482 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
7483 if (!acrtc_state
->stream
)
7487 * If the previous sink is not released and different from the current,
7488 * we deduce we are in a state where we can not rely on usermode call
7489 * to turn on the display, so we do it here
7491 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
7492 dm_force_atomic_commit(&aconnector
->base
);
7496 * Grabs all modesetting locks to serialize against any blocking commits,
7497 * Waits for completion of all non blocking commits.
7499 static int do_aquire_global_lock(struct drm_device
*dev
,
7500 struct drm_atomic_state
*state
)
7502 struct drm_crtc
*crtc
;
7503 struct drm_crtc_commit
*commit
;
7507 * Adding all modeset locks to aquire_ctx will
7508 * ensure that when the framework release it the
7509 * extra locks we are locking here will get released to
7511 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
7515 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
7516 spin_lock(&crtc
->commit_lock
);
7517 commit
= list_first_entry_or_null(&crtc
->commit_list
,
7518 struct drm_crtc_commit
, commit_entry
);
7520 drm_crtc_commit_get(commit
);
7521 spin_unlock(&crtc
->commit_lock
);
7527 * Make sure all pending HW programming completed and
7530 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
7533 ret
= wait_for_completion_interruptible_timeout(
7534 &commit
->flip_done
, 10*HZ
);
7537 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7538 "timed out\n", crtc
->base
.id
, crtc
->name
);
7540 drm_crtc_commit_put(commit
);
7543 return ret
< 0 ? ret
: 0;
7546 static void get_freesync_config_for_crtc(
7547 struct dm_crtc_state
*new_crtc_state
,
7548 struct dm_connector_state
*new_con_state
)
7550 struct mod_freesync_config config
= {0};
7551 struct amdgpu_dm_connector
*aconnector
=
7552 to_amdgpu_dm_connector(new_con_state
->base
.connector
);
7553 struct drm_display_mode
*mode
= &new_crtc_state
->base
.mode
;
7554 int vrefresh
= drm_mode_vrefresh(mode
);
7556 new_crtc_state
->vrr_supported
= new_con_state
->freesync_capable
&&
7557 vrefresh
>= aconnector
->min_vfreq
&&
7558 vrefresh
<= aconnector
->max_vfreq
;
7560 if (new_crtc_state
->vrr_supported
) {
7561 new_crtc_state
->stream
->ignore_msa_timing_param
= true;
7562 config
.state
= new_crtc_state
->base
.vrr_enabled
?
7563 VRR_STATE_ACTIVE_VARIABLE
:
7565 config
.min_refresh_in_uhz
=
7566 aconnector
->min_vfreq
* 1000000;
7567 config
.max_refresh_in_uhz
=
7568 aconnector
->max_vfreq
* 1000000;
7569 config
.vsif_supported
= true;
7573 new_crtc_state
->freesync_config
= config
;
7576 static void reset_freesync_config_for_crtc(
7577 struct dm_crtc_state
*new_crtc_state
)
7579 new_crtc_state
->vrr_supported
= false;
7581 memset(&new_crtc_state
->vrr_params
, 0,
7582 sizeof(new_crtc_state
->vrr_params
));
7583 memset(&new_crtc_state
->vrr_infopacket
, 0,
7584 sizeof(new_crtc_state
->vrr_infopacket
));
7587 static int dm_update_crtc_state(struct amdgpu_display_manager
*dm
,
7588 struct drm_atomic_state
*state
,
7589 struct drm_crtc
*crtc
,
7590 struct drm_crtc_state
*old_crtc_state
,
7591 struct drm_crtc_state
*new_crtc_state
,
7593 bool *lock_and_validation_needed
)
7595 struct dm_atomic_state
*dm_state
= NULL
;
7596 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7597 struct dc_stream_state
*new_stream
;
7601 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7602 * update changed items
7604 struct amdgpu_crtc
*acrtc
= NULL
;
7605 struct amdgpu_dm_connector
*aconnector
= NULL
;
7606 struct drm_connector_state
*drm_new_conn_state
= NULL
, *drm_old_conn_state
= NULL
;
7607 struct dm_connector_state
*dm_new_conn_state
= NULL
, *dm_old_conn_state
= NULL
;
7611 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7612 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7613 acrtc
= to_amdgpu_crtc(crtc
);
7614 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
7616 /* TODO This hack should go away */
7617 if (aconnector
&& enable
) {
7618 /* Make sure fake sink is created in plug-in scenario */
7619 drm_new_conn_state
= drm_atomic_get_new_connector_state(state
,
7621 drm_old_conn_state
= drm_atomic_get_old_connector_state(state
,
7624 if (IS_ERR(drm_new_conn_state
)) {
7625 ret
= PTR_ERR_OR_ZERO(drm_new_conn_state
);
7629 dm_new_conn_state
= to_dm_connector_state(drm_new_conn_state
);
7630 dm_old_conn_state
= to_dm_connector_state(drm_old_conn_state
);
7632 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7635 new_stream
= create_stream_for_sink(aconnector
,
7636 &new_crtc_state
->mode
,
7638 dm_old_crtc_state
->stream
);
7641 * we can have no stream on ACTION_SET if a display
7642 * was disconnected during S3, in this case it is not an
7643 * error, the OS will be updated after detection, and
7644 * will do the right thing on next atomic commit
7648 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7649 __func__
, acrtc
->base
.base
.id
);
7654 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
7656 ret
= fill_hdr_info_packet(drm_new_conn_state
,
7657 &new_stream
->hdr_static_metadata
);
7662 * If we already removed the old stream from the context
7663 * (and set the new stream to NULL) then we can't reuse
7664 * the old stream even if the stream and scaling are unchanged.
7665 * We'll hit the BUG_ON and black screen.
7667 * TODO: Refactor this function to allow this check to work
7668 * in all conditions.
7670 if (dm_new_crtc_state
->stream
&&
7671 dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
7672 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
7673 new_crtc_state
->mode_changed
= false;
7674 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7675 new_crtc_state
->mode_changed
);
7679 /* mode_changed flag may get updated above, need to check again */
7680 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7684 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7685 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7686 "connectors_changed:%d\n",
7688 new_crtc_state
->enable
,
7689 new_crtc_state
->active
,
7690 new_crtc_state
->planes_changed
,
7691 new_crtc_state
->mode_changed
,
7692 new_crtc_state
->active_changed
,
7693 new_crtc_state
->connectors_changed
);
7695 /* Remove stream for any changed/disabled CRTC */
7698 if (!dm_old_crtc_state
->stream
)
7701 ret
= dm_atomic_get_state(state
, &dm_state
);
7705 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7708 /* i.e. reset mode */
7709 if (dc_remove_stream_from_ctx(
7712 dm_old_crtc_state
->stream
) != DC_OK
) {
7717 dc_stream_release(dm_old_crtc_state
->stream
);
7718 dm_new_crtc_state
->stream
= NULL
;
7720 reset_freesync_config_for_crtc(dm_new_crtc_state
);
7722 *lock_and_validation_needed
= true;
7724 } else {/* Add stream for any updated/enabled CRTC */
7726 * Quick fix to prevent NULL pointer on new_stream when
7727 * added MST connectors not found in existing crtc_state in the chained mode
7728 * TODO: need to dig out the root cause of that
7730 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
7733 if (modereset_required(new_crtc_state
))
7736 if (modeset_required(new_crtc_state
, new_stream
,
7737 dm_old_crtc_state
->stream
)) {
7739 WARN_ON(dm_new_crtc_state
->stream
);
7741 ret
= dm_atomic_get_state(state
, &dm_state
);
7745 dm_new_crtc_state
->stream
= new_stream
;
7747 dc_stream_retain(new_stream
);
7749 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7752 if (dc_add_stream_to_ctx(
7755 dm_new_crtc_state
->stream
) != DC_OK
) {
7760 *lock_and_validation_needed
= true;
7765 /* Release extra reference */
7767 dc_stream_release(new_stream
);
7770 * We want to do dc stream updates that do not require a
7771 * full modeset below.
7773 if (!(enable
&& aconnector
&& new_crtc_state
->enable
&&
7774 new_crtc_state
->active
))
7777 * Given above conditions, the dc state cannot be NULL because:
7778 * 1. We're in the process of enabling CRTCs (just been added
7779 * to the dc context, or already is on the context)
7780 * 2. Has a valid connector attached, and
7781 * 3. Is currently active and enabled.
7782 * => The dc stream state currently exists.
7784 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
7786 /* Scaling or underscan settings */
7787 if (is_scaling_state_different(dm_old_conn_state
, dm_new_conn_state
))
7788 update_stream_scaling_settings(
7789 &new_crtc_state
->mode
, dm_new_conn_state
, dm_new_crtc_state
->stream
);
7792 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
7795 * Color management settings. We also update color properties
7796 * when a modeset is needed, to ensure it gets reprogrammed.
7798 if (dm_new_crtc_state
->base
.color_mgmt_changed
||
7799 drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
7800 ret
= amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state
);
7805 /* Update Freesync settings. */
7806 get_freesync_config_for_crtc(dm_new_crtc_state
,
7813 dc_stream_release(new_stream
);
7817 static bool should_reset_plane(struct drm_atomic_state
*state
,
7818 struct drm_plane
*plane
,
7819 struct drm_plane_state
*old_plane_state
,
7820 struct drm_plane_state
*new_plane_state
)
7822 struct drm_plane
*other
;
7823 struct drm_plane_state
*old_other_state
, *new_other_state
;
7824 struct drm_crtc_state
*new_crtc_state
;
7828 * TODO: Remove this hack once the checks below are sufficient
7829 * enough to determine when we need to reset all the planes on
7832 if (state
->allow_modeset
)
7835 /* Exit early if we know that we're adding or removing the plane. */
7836 if (old_plane_state
->crtc
!= new_plane_state
->crtc
)
7839 /* old crtc == new_crtc == NULL, plane not in context. */
7840 if (!new_plane_state
->crtc
)
7844 drm_atomic_get_new_crtc_state(state
, new_plane_state
->crtc
);
7846 if (!new_crtc_state
)
7849 /* CRTC Degamma changes currently require us to recreate planes. */
7850 if (new_crtc_state
->color_mgmt_changed
)
7853 if (drm_atomic_crtc_needs_modeset(new_crtc_state
))
7857 * If there are any new primary or overlay planes being added or
7858 * removed then the z-order can potentially change. To ensure
7859 * correct z-order and pipe acquisition the current DC architecture
7860 * requires us to remove and recreate all existing planes.
7862 * TODO: Come up with a more elegant solution for this.
7864 for_each_oldnew_plane_in_state(state
, other
, old_other_state
, new_other_state
, i
) {
7865 if (other
->type
== DRM_PLANE_TYPE_CURSOR
)
7868 if (old_other_state
->crtc
!= new_plane_state
->crtc
&&
7869 new_other_state
->crtc
!= new_plane_state
->crtc
)
7872 if (old_other_state
->crtc
!= new_other_state
->crtc
)
7875 /* TODO: Remove this once we can handle fast format changes. */
7876 if (old_other_state
->fb
&& new_other_state
->fb
&&
7877 old_other_state
->fb
->format
!= new_other_state
->fb
->format
)
7884 static int dm_update_plane_state(struct dc
*dc
,
7885 struct drm_atomic_state
*state
,
7886 struct drm_plane
*plane
,
7887 struct drm_plane_state
*old_plane_state
,
7888 struct drm_plane_state
*new_plane_state
,
7890 bool *lock_and_validation_needed
)
7893 struct dm_atomic_state
*dm_state
= NULL
;
7894 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
7895 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7896 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
7897 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
7902 new_plane_crtc
= new_plane_state
->crtc
;
7903 old_plane_crtc
= old_plane_state
->crtc
;
7904 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
7905 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
7907 /*TODO Implement atomic check for cursor plane */
7908 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
7911 needs_reset
= should_reset_plane(state
, plane
, old_plane_state
,
7914 /* Remove any changed/removed planes */
7919 if (!old_plane_crtc
)
7922 old_crtc_state
= drm_atomic_get_old_crtc_state(
7923 state
, old_plane_crtc
);
7924 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7926 if (!dm_old_crtc_state
->stream
)
7929 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7930 plane
->base
.id
, old_plane_crtc
->base
.id
);
7932 ret
= dm_atomic_get_state(state
, &dm_state
);
7936 if (!dc_remove_plane_from_context(
7938 dm_old_crtc_state
->stream
,
7939 dm_old_plane_state
->dc_state
,
7940 dm_state
->context
)) {
7947 dc_plane_state_release(dm_old_plane_state
->dc_state
);
7948 dm_new_plane_state
->dc_state
= NULL
;
7950 *lock_and_validation_needed
= true;
7952 } else { /* Add new planes */
7953 struct dc_plane_state
*dc_new_plane_state
;
7955 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
7958 if (!new_plane_crtc
)
7961 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
7962 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7964 if (!dm_new_crtc_state
->stream
)
7970 WARN_ON(dm_new_plane_state
->dc_state
);
7972 dc_new_plane_state
= dc_create_plane_state(dc
);
7973 if (!dc_new_plane_state
)
7976 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7977 plane
->base
.id
, new_plane_crtc
->base
.id
);
7979 ret
= fill_dc_plane_attributes(
7980 new_plane_crtc
->dev
->dev_private
,
7985 dc_plane_state_release(dc_new_plane_state
);
7989 ret
= dm_atomic_get_state(state
, &dm_state
);
7991 dc_plane_state_release(dc_new_plane_state
);
7996 * Any atomic check errors that occur after this will
7997 * not need a release. The plane state will be attached
7998 * to the stream, and therefore part of the atomic
7999 * state. It'll be released when the atomic state is
8002 if (!dc_add_plane_to_context(
8004 dm_new_crtc_state
->stream
,
8006 dm_state
->context
)) {
8008 dc_plane_state_release(dc_new_plane_state
);
8012 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
8014 /* Tell DC to do a full surface update every time there
8015 * is a plane change. Inefficient, but works for now.
8017 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
8019 *lock_and_validation_needed
= true;
8027 dm_determine_update_type_for_commit(struct amdgpu_display_manager
*dm
,
8028 struct drm_atomic_state
*state
,
8029 enum surface_update_type
*out_type
)
8031 struct dc
*dc
= dm
->dc
;
8032 struct dm_atomic_state
*dm_state
= NULL
, *old_dm_state
= NULL
;
8033 int i
, j
, num_plane
, ret
= 0;
8034 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
8035 struct dm_plane_state
*new_dm_plane_state
, *old_dm_plane_state
;
8036 struct drm_crtc
*new_plane_crtc
;
8037 struct drm_plane
*plane
;
8039 struct drm_crtc
*crtc
;
8040 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
8041 struct dm_crtc_state
*new_dm_crtc_state
, *old_dm_crtc_state
;
8042 struct dc_stream_status
*status
= NULL
;
8043 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
8044 struct surface_info_bundle
{
8045 struct dc_surface_update surface_updates
[MAX_SURFACES
];
8046 struct dc_plane_info plane_infos
[MAX_SURFACES
];
8047 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
8048 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
8049 struct dc_stream_update stream_update
;
8052 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
8055 DRM_ERROR("Failed to allocate update bundle\n");
8056 /* Set type to FULL to avoid crashing in DC*/
8057 update_type
= UPDATE_TYPE_FULL
;
8061 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8063 memset(bundle
, 0, sizeof(struct surface_info_bundle
));
8065 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
8066 old_dm_crtc_state
= to_dm_crtc_state(old_crtc_state
);
8069 if (new_dm_crtc_state
->stream
!= old_dm_crtc_state
->stream
) {
8070 update_type
= UPDATE_TYPE_FULL
;
8074 if (!new_dm_crtc_state
->stream
)
8077 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, j
) {
8078 const struct amdgpu_framebuffer
*amdgpu_fb
=
8079 to_amdgpu_framebuffer(new_plane_state
->fb
);
8080 struct dc_plane_info
*plane_info
= &bundle
->plane_infos
[num_plane
];
8081 struct dc_flip_addrs
*flip_addr
= &bundle
->flip_addrs
[num_plane
];
8082 struct dc_scaling_info
*scaling_info
= &bundle
->scaling_infos
[num_plane
];
8083 uint64_t tiling_flags
;
8084 bool tmz_surface
= false;
8086 new_plane_crtc
= new_plane_state
->crtc
;
8087 new_dm_plane_state
= to_dm_plane_state(new_plane_state
);
8088 old_dm_plane_state
= to_dm_plane_state(old_plane_state
);
8090 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8093 if (new_dm_plane_state
->dc_state
!= old_dm_plane_state
->dc_state
) {
8094 update_type
= UPDATE_TYPE_FULL
;
8098 if (crtc
!= new_plane_crtc
)
8101 bundle
->surface_updates
[num_plane
].surface
=
8102 new_dm_plane_state
->dc_state
;
8104 if (new_crtc_state
->mode_changed
) {
8105 bundle
->stream_update
.dst
= new_dm_crtc_state
->stream
->dst
;
8106 bundle
->stream_update
.src
= new_dm_crtc_state
->stream
->src
;
8109 if (new_crtc_state
->color_mgmt_changed
) {
8110 bundle
->surface_updates
[num_plane
].gamma
=
8111 new_dm_plane_state
->dc_state
->gamma_correction
;
8112 bundle
->surface_updates
[num_plane
].in_transfer_func
=
8113 new_dm_plane_state
->dc_state
->in_transfer_func
;
8114 bundle
->surface_updates
[num_plane
].gamut_remap_matrix
=
8115 &new_dm_plane_state
->dc_state
->gamut_remap_matrix
;
8116 bundle
->stream_update
.gamut_remap
=
8117 &new_dm_crtc_state
->stream
->gamut_remap_matrix
;
8118 bundle
->stream_update
.output_csc_transform
=
8119 &new_dm_crtc_state
->stream
->csc_color_matrix
;
8120 bundle
->stream_update
.out_transfer_func
=
8121 new_dm_crtc_state
->stream
->out_transfer_func
;
8124 ret
= fill_dc_scaling_info(new_plane_state
,
8129 bundle
->surface_updates
[num_plane
].scaling_info
= scaling_info
;
8132 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
, &tmz_surface
);
8136 ret
= fill_dc_plane_info_and_addr(
8137 dm
->adev
, new_plane_state
, tiling_flags
,
8139 &flip_addr
->address
, tmz_surface
,
8144 bundle
->surface_updates
[num_plane
].plane_info
= plane_info
;
8145 bundle
->surface_updates
[num_plane
].flip_addr
= flip_addr
;
8154 ret
= dm_atomic_get_state(state
, &dm_state
);
8158 old_dm_state
= dm_atomic_get_old_state(state
);
8159 if (!old_dm_state
) {
8164 status
= dc_stream_get_status_from_state(old_dm_state
->context
,
8165 new_dm_crtc_state
->stream
);
8166 bundle
->stream_update
.stream
= new_dm_crtc_state
->stream
;
8168 * TODO: DC modifies the surface during this call so we need
8169 * to lock here - find a way to do this without locking.
8171 mutex_lock(&dm
->dc_lock
);
8172 update_type
= dc_check_update_surfaces_for_stream(
8173 dc
, bundle
->surface_updates
, num_plane
,
8174 &bundle
->stream_update
, status
);
8175 mutex_unlock(&dm
->dc_lock
);
8177 if (update_type
> UPDATE_TYPE_MED
) {
8178 update_type
= UPDATE_TYPE_FULL
;
8186 *out_type
= update_type
;
8190 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state
*state
, struct drm_crtc
*crtc
)
8192 struct drm_connector
*connector
;
8193 struct drm_connector_state
*conn_state
;
8194 struct amdgpu_dm_connector
*aconnector
= NULL
;
8196 for_each_new_connector_in_state(state
, connector
, conn_state
, i
) {
8197 if (conn_state
->crtc
!= crtc
)
8200 aconnector
= to_amdgpu_dm_connector(connector
);
8201 if (!aconnector
->port
|| !aconnector
->mst_port
)
8210 return drm_dp_mst_add_affected_dsc_crtcs(state
, &aconnector
->mst_port
->mst_mgr
);
8214 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8215 * @dev: The DRM device
8216 * @state: The atomic state to commit
8218 * Validate that the given atomic state is programmable by DC into hardware.
8219 * This involves constructing a &struct dc_state reflecting the new hardware
8220 * state we wish to commit, then querying DC to see if it is programmable. It's
8221 * important not to modify the existing DC state. Otherwise, atomic_check
8222 * may unexpectedly commit hardware changes.
8224 * When validating the DC state, it's important that the right locks are
8225 * acquired. For full updates case which removes/adds/updates streams on one
8226 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8227 * that any such full update commit will wait for completion of any outstanding
8228 * flip using DRMs synchronization events. See
8229 * dm_determine_update_type_for_commit()
8231 * Note that DM adds the affected connectors for all CRTCs in state, when that
8232 * might not seem necessary. This is because DC stream creation requires the
8233 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8234 * be possible but non-trivial - a possible TODO item.
8236 * Return: -Error code if validation failed.
8238 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
8239 struct drm_atomic_state
*state
)
8241 struct amdgpu_device
*adev
= dev
->dev_private
;
8242 struct dm_atomic_state
*dm_state
= NULL
;
8243 struct dc
*dc
= adev
->dm
.dc
;
8244 struct drm_connector
*connector
;
8245 struct drm_connector_state
*old_con_state
, *new_con_state
;
8246 struct drm_crtc
*crtc
;
8247 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
8248 struct drm_plane
*plane
;
8249 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
8250 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
8251 enum surface_update_type overall_update_type
= UPDATE_TYPE_FAST
;
8256 * This bool will be set for true for any modeset/reset
8257 * or plane update which implies non fast surface update.
8259 bool lock_and_validation_needed
= false;
8261 ret
= drm_atomic_helper_check_modeset(dev
, state
);
8265 if (adev
->asic_type
>= CHIP_NAVI10
) {
8266 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8267 if (drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
8268 ret
= add_affected_mst_dsc_crtcs(state
, crtc
);
8275 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8276 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
8277 !new_crtc_state
->color_mgmt_changed
&&
8278 old_crtc_state
->vrr_enabled
== new_crtc_state
->vrr_enabled
)
8281 if (!new_crtc_state
->enable
)
8284 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
8288 ret
= drm_atomic_add_affected_planes(state
, crtc
);
8294 * Add all primary and overlay planes on the CRTC to the state
8295 * whenever a plane is enabled to maintain correct z-ordering
8296 * and to enable fast surface updates.
8298 drm_for_each_crtc(crtc
, dev
) {
8299 bool modified
= false;
8301 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8302 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8305 if (new_plane_state
->crtc
== crtc
||
8306 old_plane_state
->crtc
== crtc
) {
8315 drm_for_each_plane_mask(plane
, state
->dev
, crtc
->state
->plane_mask
) {
8316 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8320 drm_atomic_get_plane_state(state
, plane
);
8322 if (IS_ERR(new_plane_state
)) {
8323 ret
= PTR_ERR(new_plane_state
);
8329 /* Remove exiting planes if they are modified */
8330 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8331 ret
= dm_update_plane_state(dc
, state
, plane
,
8335 &lock_and_validation_needed
);
8340 /* Disable all crtcs which require disable */
8341 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8342 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8346 &lock_and_validation_needed
);
8351 /* Enable all crtcs which require enable */
8352 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8353 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8357 &lock_and_validation_needed
);
8362 /* Add new/modified planes */
8363 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8364 ret
= dm_update_plane_state(dc
, state
, plane
,
8368 &lock_and_validation_needed
);
8373 /* Run this here since we want to validate the streams we created */
8374 ret
= drm_atomic_helper_check_planes(dev
, state
);
8378 if (state
->legacy_cursor_update
) {
8380 * This is a fast cursor update coming from the plane update
8381 * helper, check if it can be done asynchronously for better
8384 state
->async_update
=
8385 !drm_atomic_helper_async_check(dev
, state
);
8388 * Skip the remaining global validation if this is an async
8389 * update. Cursor updates can be done without affecting
8390 * state or bandwidth calcs and this avoids the performance
8391 * penalty of locking the private state object and
8392 * allocating a new dc_state.
8394 if (state
->async_update
)
8398 /* Check scaling and underscan changes*/
8399 /* TODO Removed scaling changes validation due to inability to commit
8400 * new stream into context w\o causing full reset. Need to
8401 * decide how to handle.
8403 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
8404 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
8405 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
8406 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
8408 /* Skip any modesets/resets */
8409 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
8410 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
8413 /* Skip any thing not scale or underscan changes */
8414 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
8417 overall_update_type
= UPDATE_TYPE_FULL
;
8418 lock_and_validation_needed
= true;
8421 ret
= dm_determine_update_type_for_commit(&adev
->dm
, state
, &update_type
);
8425 if (overall_update_type
< update_type
)
8426 overall_update_type
= update_type
;
8429 * lock_and_validation_needed was an old way to determine if we need to set
8430 * the global lock. Leaving it in to check if we broke any corner cases
8431 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8432 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8434 if (lock_and_validation_needed
&& overall_update_type
<= UPDATE_TYPE_FAST
)
8435 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8437 if (overall_update_type
> UPDATE_TYPE_FAST
) {
8438 ret
= dm_atomic_get_state(state
, &dm_state
);
8442 ret
= do_aquire_global_lock(dev
, state
);
8446 #if defined(CONFIG_DRM_AMD_DC_DCN)
8447 if (!compute_mst_dsc_configs_for_state(state
, dm_state
->context
))
8450 ret
= dm_update_mst_vcpi_slots_for_dsc(state
, dm_state
->context
);
8456 * Perform validation of MST topology in the state:
8457 * We need to perform MST atomic check before calling
8458 * dc_validate_global_state(), or there is a chance
8459 * to get stuck in an infinite loop and hang eventually.
8461 ret
= drm_dp_mst_atomic_check(state
);
8465 if (dc_validate_global_state(dc
, dm_state
->context
, false) != DC_OK
) {
8471 * The commit is a fast update. Fast updates shouldn't change
8472 * the DC context, affect global validation, and can have their
8473 * commit work done in parallel with other commits not touching
8474 * the same resource. If we have a new DC context as part of
8475 * the DM atomic state from validation we need to free it and
8476 * retain the existing one instead.
8478 struct dm_atomic_state
*new_dm_state
, *old_dm_state
;
8480 new_dm_state
= dm_atomic_get_new_state(state
);
8481 old_dm_state
= dm_atomic_get_old_state(state
);
8483 if (new_dm_state
&& old_dm_state
) {
8484 if (new_dm_state
->context
)
8485 dc_release_state(new_dm_state
->context
);
8487 new_dm_state
->context
= old_dm_state
->context
;
8489 if (old_dm_state
->context
)
8490 dc_retain_state(old_dm_state
->context
);
8494 /* Store the overall update type for use later in atomic check. */
8495 for_each_new_crtc_in_state (state
, crtc
, new_crtc_state
, i
) {
8496 struct dm_crtc_state
*dm_new_crtc_state
=
8497 to_dm_crtc_state(new_crtc_state
);
8499 dm_new_crtc_state
->update_type
= (int)overall_update_type
;
8502 /* Must be success */
8507 if (ret
== -EDEADLK
)
8508 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8509 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
8510 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8512 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
8517 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
8518 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
8521 bool capable
= false;
8523 if (amdgpu_dm_connector
->dc_link
&&
8524 dm_helpers_dp_read_dpcd(
8526 amdgpu_dm_connector
->dc_link
,
8527 DP_DOWN_STREAM_PORT_COUNT
,
8529 sizeof(dpcd_data
))) {
8530 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
8535 void amdgpu_dm_update_freesync_caps(struct drm_connector
*connector
,
8539 bool edid_check_required
;
8540 struct detailed_timing
*timing
;
8541 struct detailed_non_pixel
*data
;
8542 struct detailed_data_monitor_range
*range
;
8543 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
8544 to_amdgpu_dm_connector(connector
);
8545 struct dm_connector_state
*dm_con_state
= NULL
;
8547 struct drm_device
*dev
= connector
->dev
;
8548 struct amdgpu_device
*adev
= dev
->dev_private
;
8549 bool freesync_capable
= false;
8551 if (!connector
->state
) {
8552 DRM_ERROR("%s - Connector has no state", __func__
);
8557 dm_con_state
= to_dm_connector_state(connector
->state
);
8559 amdgpu_dm_connector
->min_vfreq
= 0;
8560 amdgpu_dm_connector
->max_vfreq
= 0;
8561 amdgpu_dm_connector
->pixel_clock_mhz
= 0;
8566 dm_con_state
= to_dm_connector_state(connector
->state
);
8568 edid_check_required
= false;
8569 if (!amdgpu_dm_connector
->dc_sink
) {
8570 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8573 if (!adev
->dm
.freesync_module
)
8576 * if edid non zero restrict freesync only for dp and edp
8579 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
8580 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
8581 edid_check_required
= is_dp_capable_without_timing_msa(
8583 amdgpu_dm_connector
);
8586 if (edid_check_required
== true && (edid
->version
> 1 ||
8587 (edid
->version
== 1 && edid
->revision
> 1))) {
8588 for (i
= 0; i
< 4; i
++) {
8590 timing
= &edid
->detailed_timings
[i
];
8591 data
= &timing
->data
.other_data
;
8592 range
= &data
->data
.range
;
8594 * Check if monitor has continuous frequency mode
8596 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
8599 * Check for flag range limits only. If flag == 1 then
8600 * no additional timing information provided.
8601 * Default GTF, GTF Secondary curve and CVT are not
8604 if (range
->flags
!= 1)
8607 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
8608 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
8609 amdgpu_dm_connector
->pixel_clock_mhz
=
8610 range
->pixel_clock_mhz
* 10;
8614 if (amdgpu_dm_connector
->max_vfreq
-
8615 amdgpu_dm_connector
->min_vfreq
> 10) {
8617 freesync_capable
= true;
8623 dm_con_state
->freesync_capable
= freesync_capable
;
8625 if (connector
->vrr_capable_property
)
8626 drm_connector_set_vrr_capable_property(connector
,
8630 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
)
8632 uint8_t dpcd_data
[EDP_PSR_RECEIVER_CAP_SIZE
];
8634 if (!(link
->connector_signal
& SIGNAL_TYPE_EDP
))
8636 if (link
->type
== dc_connection_none
)
8638 if (dm_helpers_dp_read_dpcd(NULL
, link
, DP_PSR_SUPPORT
,
8639 dpcd_data
, sizeof(dpcd_data
))) {
8640 link
->dpcd_caps
.psr_caps
.psr_version
= dpcd_data
[0];
8642 if (dpcd_data
[0] == 0) {
8643 link
->psr_settings
.psr_version
= PSR_VERSION_UNSUPPORTED
;
8644 link
->psr_settings
.psr_feature_enabled
= false;
8646 link
->psr_settings
.psr_version
= PSR_VERSION_1
;
8647 link
->psr_settings
.psr_feature_enabled
= true;
8650 DRM_INFO("PSR support:%d\n", link
->psr_settings
.psr_feature_enabled
);
8655 * amdgpu_dm_link_setup_psr() - configure psr link
8656 * @stream: stream state
8658 * Return: true if success
8660 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
)
8662 struct dc_link
*link
= NULL
;
8663 struct psr_config psr_config
= {0};
8664 struct psr_context psr_context
= {0};
8665 struct dc
*dc
= NULL
;
8671 link
= stream
->link
;
8674 psr_config
.psr_version
= link
->dpcd_caps
.psr_caps
.psr_version
;
8676 if (psr_config
.psr_version
> 0) {
8677 psr_config
.psr_exit_link_training_required
= 0x1;
8678 psr_config
.psr_frame_capture_indication_req
= 0;
8679 psr_config
.psr_rfb_setup_time
= 0x37;
8680 psr_config
.psr_sdp_transmit_line_num_deadline
= 0x20;
8681 psr_config
.allow_smu_optimizations
= 0x0;
8683 ret
= dc_link_setup_psr(link
, stream
, &psr_config
, &psr_context
);
8686 DRM_DEBUG_DRIVER("PSR link: %d\n", link
->psr_settings
.psr_feature_enabled
);
8692 * amdgpu_dm_psr_enable() - enable psr f/w
8693 * @stream: stream state
8695 * Return: true if success
8697 bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
)
8699 struct dc_link
*link
= stream
->link
;
8700 unsigned int vsync_rate_hz
= 0;
8701 struct dc_static_screen_params params
= {0};
8702 /* Calculate number of static frames before generating interrupt to
8705 // Init fail safe of 2 frames static
8706 unsigned int num_frames_static
= 2;
8708 DRM_DEBUG_DRIVER("Enabling psr...\n");
8710 vsync_rate_hz
= div64_u64(div64_u64((
8711 stream
->timing
.pix_clk_100hz
* 100),
8712 stream
->timing
.v_total
),
8713 stream
->timing
.h_total
);
8716 * Calculate number of frames such that at least 30 ms of time has
8719 if (vsync_rate_hz
!= 0) {
8720 unsigned int frame_time_microsec
= 1000000 / vsync_rate_hz
;
8721 num_frames_static
= (30000 / frame_time_microsec
) + 1;
8724 params
.triggers
.cursor_update
= true;
8725 params
.triggers
.overlay_update
= true;
8726 params
.triggers
.surface_update
= true;
8727 params
.num_frames
= num_frames_static
;
8729 dc_stream_set_static_screen_params(link
->ctx
->dc
,
8733 return dc_link_set_psr_allow_active(link
, true, false);
8737 * amdgpu_dm_psr_disable() - disable psr f/w
8738 * @stream: stream state
8740 * Return: true if success
8742 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
)
8745 DRM_DEBUG_DRIVER("Disabling psr...\n");
8747 return dc_link_set_psr_allow_active(stream
->link
, false, true);