2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB
);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB
);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB
);
104 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU
);
107 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU
);
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
119 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121 * requests into DC requests, and DC responses into DRM responses.
123 * The root control structure is &struct amdgpu_display_manager.
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
128 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
131 * initializes drm_device display related structures, based on the information
132 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133 * drm_encoder, drm_mode_config
135 * Returns 0 on success
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
142 struct drm_plane
*plane
,
143 unsigned long possible_crtcs
,
144 const struct dc_plane_cap
*plane_cap
);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
146 struct drm_plane
*plane
,
147 uint32_t link_index
);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
149 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
151 struct amdgpu_encoder
*amdgpu_encoder
);
152 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
153 struct amdgpu_encoder
*aencoder
,
154 uint32_t link_index
);
156 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
158 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
159 struct drm_atomic_state
*state
,
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
164 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
165 struct drm_atomic_state
*state
);
167 static void handle_cursor_update(struct drm_plane
*plane
,
168 struct drm_plane_state
*old_plane_state
);
170 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
);
177 * dm_vblank_get_counter
180 * Get counter for number of vertical blanks
183 * struct amdgpu_device *adev - [in] desired amdgpu device
184 * int disp_idx - [in] which CRTC to get the counter from
187 * Counter for vertical blanks
189 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
191 if (crtc
>= adev
->mode_info
.num_crtc
)
194 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
195 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
199 if (acrtc_state
->stream
== NULL
) {
200 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
205 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
210 u32
*vbl
, u32
*position
)
212 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
214 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
217 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
218 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
221 if (acrtc_state
->stream
== NULL
) {
222 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
228 * TODO rework base driver to use values directly.
229 * for now parse it back into reg-format
231 dc_stream_get_scanoutpos(acrtc_state
->stream
,
237 *position
= v_position
| (h_position
<< 16);
238 *vbl
= v_blank_start
| (v_blank_end
<< 16);
244 static bool dm_is_idle(void *handle
)
250 static int dm_wait_for_idle(void *handle
)
256 static bool dm_check_soft_reset(void *handle
)
261 static int dm_soft_reset(void *handle
)
267 static struct amdgpu_crtc
*
268 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
271 struct drm_device
*dev
= adev
->ddev
;
272 struct drm_crtc
*crtc
;
273 struct amdgpu_crtc
*amdgpu_crtc
;
275 if (otg_inst
== -1) {
277 return adev
->mode_info
.crtcs
[0];
280 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
281 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
283 if (amdgpu_crtc
->otg_inst
== otg_inst
)
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state
*dm_state
)
292 return dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
||
293 dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_FIXED
;
297 * dm_pflip_high_irq() - Handle pageflip interrupt
298 * @interrupt_params: ignored
300 * Handles the pageflip interrupt by notifying all interested parties
301 * that the pageflip has been completed.
303 static void dm_pflip_high_irq(void *interrupt_params
)
305 struct amdgpu_crtc
*amdgpu_crtc
;
306 struct common_irq_params
*irq_params
= interrupt_params
;
307 struct amdgpu_device
*adev
= irq_params
->adev
;
309 struct drm_pending_vblank_event
*e
;
310 struct dm_crtc_state
*acrtc_state
;
311 uint32_t vpos
, hpos
, v_blank_start
, v_blank_end
;
314 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
316 /* IRQ could occur when in initial stage */
317 /* TODO work and BO cleanup */
318 if (amdgpu_crtc
== NULL
) {
319 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
323 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
325 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
326 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 amdgpu_crtc
->pflip_status
,
328 AMDGPU_FLIP_SUBMITTED
,
329 amdgpu_crtc
->crtc_id
,
331 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
335 /* page flip completed. */
336 e
= amdgpu_crtc
->event
;
337 amdgpu_crtc
->event
= NULL
;
342 acrtc_state
= to_dm_crtc_state(amdgpu_crtc
->base
.state
);
343 vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
345 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
347 !dc_stream_get_scanoutpos(acrtc_state
->stream
, &v_blank_start
,
348 &v_blank_end
, &hpos
, &vpos
) ||
349 (vpos
< v_blank_start
)) {
350 /* Update to correct count and vblank timestamp if racing with
351 * vblank irq. This also updates to the correct vblank timestamp
352 * even in VRR mode, as scanout is past the front-porch atm.
354 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
356 /* Wake up userspace by sending the pageflip event with proper
357 * count and timestamp of vblank of flip completion.
360 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, e
);
362 /* Event sent, so done with vblank for this flip */
363 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
366 /* VRR active and inside front-porch: vblank count and
367 * timestamp for pageflip event will only be up to date after
368 * drm_crtc_handle_vblank() has been executed from late vblank
369 * irq handler after start of back-porch (vline 0). We queue the
370 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 * updated timestamp and count, once it runs after us.
373 * We need to open-code this instead of using the helper
374 * drm_crtc_arm_vblank_event(), as that helper would
375 * call drm_crtc_accurate_vblank_count(), which we must
376 * not call in VRR mode while we are in front-porch!
379 /* sequence will be replaced by real count during send-out. */
380 e
->sequence
= drm_crtc_vblank_count(&amdgpu_crtc
->base
);
381 e
->pipe
= amdgpu_crtc
->crtc_id
;
383 list_add_tail(&e
->base
.link
, &adev
->ddev
->vblank_event_list
);
387 /* Keep track of vblank of this flip for flip throttling. We use the
388 * cooked hw counter, as that one incremented at start of this vblank
389 * of pageflip completion, so last_flip_vblank is the forbidden count
390 * for queueing new pageflips if vsync + VRR is enabled.
392 amdgpu_crtc
->last_flip_vblank
=
393 amdgpu_get_vblank_counter_kms(&amdgpu_crtc
->base
);
395 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
396 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
398 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 amdgpu_crtc
->crtc_id
, amdgpu_crtc
,
400 vrr_active
, (int) !e
);
403 static void dm_vupdate_high_irq(void *interrupt_params
)
405 struct common_irq_params
*irq_params
= interrupt_params
;
406 struct amdgpu_device
*adev
= irq_params
->adev
;
407 struct amdgpu_crtc
*acrtc
;
408 struct dm_crtc_state
*acrtc_state
;
411 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VUPDATE
);
414 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
416 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
418 amdgpu_dm_vrr_active(acrtc_state
));
420 /* Core vblank handling is done here after end of front-porch in
421 * vrr mode, as vblank timestamping will give valid results
422 * while now done after front-porch. This will also deliver
423 * page-flip completion events that have been queued to us
424 * if a pageflip happened inside front-porch.
426 if (amdgpu_dm_vrr_active(acrtc_state
)) {
427 drm_crtc_handle_vblank(&acrtc
->base
);
429 /* BTR processing for pre-DCE12 ASICs */
430 if (acrtc_state
->stream
&&
431 adev
->family
< AMDGPU_FAMILY_AI
) {
432 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
433 mod_freesync_handle_v_update(
434 adev
->dm
.freesync_module
,
436 &acrtc_state
->vrr_params
);
438 dc_stream_adjust_vmin_vmax(
441 &acrtc_state
->vrr_params
.adjust
);
442 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
449 * dm_crtc_high_irq() - Handles CRTC interrupt
450 * @interrupt_params: used for determining the CRTC instance
452 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
455 static void dm_crtc_high_irq(void *interrupt_params
)
457 struct common_irq_params
*irq_params
= interrupt_params
;
458 struct amdgpu_device
*adev
= irq_params
->adev
;
459 struct amdgpu_crtc
*acrtc
;
460 struct dm_crtc_state
*acrtc_state
;
463 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
467 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
469 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc
->crtc_id
,
470 amdgpu_dm_vrr_active(acrtc_state
),
471 acrtc_state
->active_planes
);
474 * Core vblank handling at start of front-porch is only possible
475 * in non-vrr mode, as only there vblank timestamping will give
476 * valid results while done in front-porch. Otherwise defer it
477 * to dm_vupdate_high_irq after end of front-porch.
479 if (!amdgpu_dm_vrr_active(acrtc_state
))
480 drm_crtc_handle_vblank(&acrtc
->base
);
483 * Following stuff must happen at start of vblank, for crc
484 * computation and below-the-range btr support in vrr mode.
486 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
488 /* BTR updates need to happen before VUPDATE on Vega and above. */
489 if (adev
->family
< AMDGPU_FAMILY_AI
)
492 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
494 if (acrtc_state
->stream
&& acrtc_state
->vrr_params
.supported
&&
495 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
496 mod_freesync_handle_v_update(adev
->dm
.freesync_module
,
498 &acrtc_state
->vrr_params
);
500 dc_stream_adjust_vmin_vmax(adev
->dm
.dc
, acrtc_state
->stream
,
501 &acrtc_state
->vrr_params
.adjust
);
505 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 * In that case, pageflip completion interrupts won't fire and pageflip
507 * completion events won't get delivered. Prevent this by sending
508 * pending pageflip events from here if a flip is still pending.
510 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 * avoid race conditions between flip programming and completion,
512 * which could cause too early flip completion events.
514 if (adev
->family
>= AMDGPU_FAMILY_RV
&&
515 acrtc
->pflip_status
== AMDGPU_FLIP_SUBMITTED
&&
516 acrtc_state
->active_planes
== 0) {
518 drm_crtc_send_vblank_event(&acrtc
->base
, acrtc
->event
);
520 drm_crtc_vblank_put(&acrtc
->base
);
522 acrtc
->pflip_status
= AMDGPU_FLIP_NONE
;
525 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
528 static int dm_set_clockgating_state(void *handle
,
529 enum amd_clockgating_state state
)
534 static int dm_set_powergating_state(void *handle
,
535 enum amd_powergating_state state
)
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle
);
543 /* Allocate memory for FBC compressed data */
544 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
546 struct drm_device
*dev
= connector
->dev
;
547 struct amdgpu_device
*adev
= dev
->dev_private
;
548 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
549 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
550 struct drm_display_mode
*mode
;
551 unsigned long max_size
= 0;
553 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
556 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
559 if (compressor
->bo_ptr
)
563 list_for_each_entry(mode
, &connector
->modes
, head
) {
564 if (max_size
< mode
->htotal
* mode
->vtotal
)
565 max_size
= mode
->htotal
* mode
->vtotal
;
569 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
570 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
571 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
574 DRM_ERROR("DM: Failed to initialize FBC\n");
576 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
577 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
584 static int amdgpu_dm_audio_component_get_eld(struct device
*kdev
, int port
,
585 int pipe
, bool *enabled
,
586 unsigned char *buf
, int max_bytes
)
588 struct drm_device
*dev
= dev_get_drvdata(kdev
);
589 struct amdgpu_device
*adev
= dev
->dev_private
;
590 struct drm_connector
*connector
;
591 struct drm_connector_list_iter conn_iter
;
592 struct amdgpu_dm_connector
*aconnector
;
597 mutex_lock(&adev
->dm
.audio_lock
);
599 drm_connector_list_iter_begin(dev
, &conn_iter
);
600 drm_for_each_connector_iter(connector
, &conn_iter
) {
601 aconnector
= to_amdgpu_dm_connector(connector
);
602 if (aconnector
->audio_inst
!= port
)
606 ret
= drm_eld_size(connector
->eld
);
607 memcpy(buf
, connector
->eld
, min(max_bytes
, ret
));
611 drm_connector_list_iter_end(&conn_iter
);
613 mutex_unlock(&adev
->dm
.audio_lock
);
615 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port
, ret
, *enabled
);
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops
= {
621 .get_eld
= amdgpu_dm_audio_component_get_eld
,
624 static int amdgpu_dm_audio_component_bind(struct device
*kdev
,
625 struct device
*hda_kdev
, void *data
)
627 struct drm_device
*dev
= dev_get_drvdata(kdev
);
628 struct amdgpu_device
*adev
= dev
->dev_private
;
629 struct drm_audio_component
*acomp
= data
;
631 acomp
->ops
= &amdgpu_dm_audio_component_ops
;
633 adev
->dm
.audio_component
= acomp
;
638 static void amdgpu_dm_audio_component_unbind(struct device
*kdev
,
639 struct device
*hda_kdev
, void *data
)
641 struct drm_device
*dev
= dev_get_drvdata(kdev
);
642 struct amdgpu_device
*adev
= dev
->dev_private
;
643 struct drm_audio_component
*acomp
= data
;
647 adev
->dm
.audio_component
= NULL
;
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops
= {
651 .bind
= amdgpu_dm_audio_component_bind
,
652 .unbind
= amdgpu_dm_audio_component_unbind
,
655 static int amdgpu_dm_audio_init(struct amdgpu_device
*adev
)
662 adev
->mode_info
.audio
.enabled
= true;
664 adev
->mode_info
.audio
.num_pins
= adev
->dm
.dc
->res_pool
->audio_count
;
666 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
667 adev
->mode_info
.audio
.pin
[i
].channels
= -1;
668 adev
->mode_info
.audio
.pin
[i
].rate
= -1;
669 adev
->mode_info
.audio
.pin
[i
].bits_per_sample
= -1;
670 adev
->mode_info
.audio
.pin
[i
].status_bits
= 0;
671 adev
->mode_info
.audio
.pin
[i
].category_code
= 0;
672 adev
->mode_info
.audio
.pin
[i
].connected
= false;
673 adev
->mode_info
.audio
.pin
[i
].id
=
674 adev
->dm
.dc
->res_pool
->audios
[i
]->inst
;
675 adev
->mode_info
.audio
.pin
[i
].offset
= 0;
678 ret
= component_add(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
682 adev
->dm
.audio_registered
= true;
687 static void amdgpu_dm_audio_fini(struct amdgpu_device
*adev
)
692 if (!adev
->mode_info
.audio
.enabled
)
695 if (adev
->dm
.audio_registered
) {
696 component_del(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
697 adev
->dm
.audio_registered
= false;
700 /* TODO: Disable audio? */
702 adev
->mode_info
.audio
.enabled
= false;
705 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device
*adev
, int pin
)
707 struct drm_audio_component
*acomp
= adev
->dm
.audio_component
;
709 if (acomp
&& acomp
->audio_ops
&& acomp
->audio_ops
->pin_eld_notify
) {
710 DRM_DEBUG_KMS("Notify ELD: %d\n", pin
);
712 acomp
->audio_ops
->pin_eld_notify(acomp
->audio_ops
->audio_ptr
,
717 static int dm_dmub_hw_init(struct amdgpu_device
*adev
)
719 const struct dmcub_firmware_header_v1_0
*hdr
;
720 struct dmub_srv
*dmub_srv
= adev
->dm
.dmub_srv
;
721 struct dmub_srv_fb_info
*fb_info
= adev
->dm
.dmub_fb_info
;
722 const struct firmware
*dmub_fw
= adev
->dm
.dmub_fw
;
723 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
724 struct abm
*abm
= adev
->dm
.dc
->res_pool
->abm
;
725 struct dmub_srv_hw_params hw_params
;
726 enum dmub_status status
;
727 const unsigned char *fw_inst_const
, *fw_bss_data
;
728 uint32_t i
, fw_inst_const_size
, fw_bss_data_size
;
732 /* DMUB isn't supported on the ASIC. */
736 DRM_ERROR("No framebuffer info for DMUB service.\n");
741 /* Firmware required for DMUB support. */
742 DRM_ERROR("No firmware provided for DMUB.\n");
746 status
= dmub_srv_has_hw_support(dmub_srv
, &has_hw_support
);
747 if (status
!= DMUB_STATUS_OK
) {
748 DRM_ERROR("Error checking HW support for DMUB: %d\n", status
);
752 if (!has_hw_support
) {
753 DRM_INFO("DMUB unsupported on ASIC\n");
757 hdr
= (const struct dmcub_firmware_header_v1_0
*)dmub_fw
->data
;
759 fw_inst_const
= dmub_fw
->data
+
760 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
763 fw_bss_data
= dmub_fw
->data
+
764 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
765 le32_to_cpu(hdr
->inst_const_bytes
);
767 /* Copy firmware and bios info into FB memory. */
768 fw_inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
769 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
771 fw_bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
773 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 * amdgpu_ucode_init_single_fw will load dmub firmware
775 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 * will be done by dm_dmub_hw_init
778 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
779 memcpy(fb_info
->fb
[DMUB_WINDOW_0_INST_CONST
].cpu_addr
, fw_inst_const
,
783 if (fw_bss_data_size
)
784 memcpy(fb_info
->fb
[DMUB_WINDOW_2_BSS_DATA
].cpu_addr
,
785 fw_bss_data
, fw_bss_data_size
);
787 /* Copy firmware bios info into FB memory. */
788 memcpy(fb_info
->fb
[DMUB_WINDOW_3_VBIOS
].cpu_addr
, adev
->bios
,
791 /* Reset regions that need to be reset. */
792 memset(fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].cpu_addr
, 0,
793 fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].size
);
795 memset(fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].cpu_addr
, 0,
796 fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].size
);
798 memset(fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].cpu_addr
, 0,
799 fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].size
);
801 /* Initialize hardware. */
802 memset(&hw_params
, 0, sizeof(hw_params
));
803 hw_params
.fb_base
= adev
->gmc
.fb_start
;
804 hw_params
.fb_offset
= adev
->gmc
.aper_base
;
806 /* backdoor load firmware and trigger dmub running */
807 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
)
808 hw_params
.load_inst_const
= true;
811 hw_params
.psp_version
= dmcu
->psp_version
;
813 for (i
= 0; i
< fb_info
->num_fb
; ++i
)
814 hw_params
.fb
[i
] = &fb_info
->fb
[i
];
816 status
= dmub_srv_hw_init(dmub_srv
, &hw_params
);
817 if (status
!= DMUB_STATUS_OK
) {
818 DRM_ERROR("Error initializing DMUB HW: %d\n", status
);
822 /* Wait for firmware load to finish. */
823 status
= dmub_srv_wait_for_auto_load(dmub_srv
, 100000);
824 if (status
!= DMUB_STATUS_OK
)
825 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status
);
827 /* Init DMCU and ABM if available. */
829 dmcu
->funcs
->dmcu_init(dmcu
);
830 abm
->dmcu_is_running
= dmcu
->funcs
->is_dmcu_initialized(dmcu
);
833 adev
->dm
.dc
->ctx
->dmub_srv
= dc_dmub_srv_create(adev
->dm
.dc
, dmub_srv
);
834 if (!adev
->dm
.dc
->ctx
->dmub_srv
) {
835 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
839 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 adev
->dm
.dmcub_fw_version
);
845 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
847 struct dc_init_data init_data
;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 struct dc_callback_init init_params
;
853 adev
->dm
.ddev
= adev
->ddev
;
854 adev
->dm
.adev
= adev
;
856 /* Zero all the fields */
857 memset(&init_data
, 0, sizeof(init_data
));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 memset(&init_params
, 0, sizeof(init_params
));
862 mutex_init(&adev
->dm
.dc_lock
);
863 mutex_init(&adev
->dm
.audio_lock
);
865 if(amdgpu_dm_irq_init(adev
)) {
866 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
870 init_data
.asic_id
.chip_family
= adev
->family
;
872 init_data
.asic_id
.pci_revision_id
= adev
->pdev
->revision
;
873 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
875 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
876 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 init_data
.asic_id
.atombios_base_address
=
878 adev
->mode_info
.atom_context
->bios
;
880 init_data
.driver
= adev
;
882 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
884 if (!adev
->dm
.cgs_device
) {
885 DRM_ERROR("amdgpu: failed to create cgs device.\n");
889 init_data
.cgs_device
= adev
->dm
.cgs_device
;
891 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
893 switch (adev
->asic_type
) {
898 init_data
.flags
.gpu_vm_support
= true;
904 if (amdgpu_dc_feature_mask
& DC_FBC_MASK
)
905 init_data
.flags
.fbc_support
= true;
907 if (amdgpu_dc_feature_mask
& DC_MULTI_MON_PP_MCLK_SWITCH_MASK
)
908 init_data
.flags
.multi_mon_pp_mclk_switch
= true;
910 if (amdgpu_dc_feature_mask
& DC_DISABLE_FRACTIONAL_PWM_MASK
)
911 init_data
.flags
.disable_fractional_pwm
= true;
913 init_data
.flags
.power_down_display_on_boot
= true;
915 init_data
.soc_bounding_box
= adev
->dm
.soc_bounding_box
;
917 /* Display Core create. */
918 adev
->dm
.dc
= dc_create(&init_data
);
921 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
923 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
927 if (amdgpu_dc_debug_mask
& DC_DISABLE_PIPE_SPLIT
) {
928 adev
->dm
.dc
->debug
.force_single_disp_pipe_split
= false;
929 adev
->dm
.dc
->debug
.pipe_split_policy
= MPC_SPLIT_AVOID
;
932 if (adev
->asic_type
!= CHIP_CARRIZO
&& adev
->asic_type
!= CHIP_STONEY
)
933 adev
->dm
.dc
->debug
.disable_stutter
= amdgpu_pp_feature_mask
& PP_STUTTER_MODE
? false : true;
935 if (amdgpu_dc_debug_mask
& DC_DISABLE_STUTTER
)
936 adev
->dm
.dc
->debug
.disable_stutter
= true;
938 if (amdgpu_dc_debug_mask
& DC_DISABLE_DSC
)
939 adev
->dm
.dc
->debug
.disable_dsc
= true;
941 if (amdgpu_dc_debug_mask
& DC_DISABLE_CLOCK_GATING
)
942 adev
->dm
.dc
->debug
.disable_clock_gate
= true;
944 r
= dm_dmub_hw_init(adev
);
946 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
950 dc_hardware_init(adev
->dm
.dc
);
952 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
953 if (!adev
->dm
.freesync_module
) {
955 "amdgpu: failed to initialize freesync_module.\n");
957 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 adev
->dm
.freesync_module
);
960 amdgpu_dm_init_color_mod();
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 if (adev
->asic_type
>= CHIP_RAVEN
) {
964 adev
->dm
.hdcp_workqueue
= hdcp_create_workqueue(adev
, &init_params
.cp_psp
, adev
->dm
.dc
);
966 if (!adev
->dm
.hdcp_workqueue
)
967 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
969 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev
->dm
.hdcp_workqueue
);
971 dc_init_callbacks(adev
->dm
.dc
, &init_params
);
974 if (amdgpu_dm_initialize_drm_device(adev
)) {
976 "amdgpu: failed to initialize sw for display support.\n");
980 /* Update the actual used number of crtc */
981 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
983 /* create fake encoders for MST */
984 dm_dp_create_fake_mst_encoders(adev
);
986 /* TODO: Add_display_info? */
988 /* TODO use dynamic cursor width */
989 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
990 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
992 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
994 "amdgpu: failed to initialize sw for display support.\n");
998 DRM_DEBUG_DRIVER("KMS initialized.\n");
1002 amdgpu_dm_fini(adev
);
1007 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
1011 for (i
= 0; i
< adev
->dm
.display_indexes_num
; i
++) {
1012 drm_encoder_cleanup(&adev
->dm
.mst_encoders
[i
].base
);
1015 amdgpu_dm_audio_fini(adev
);
1017 amdgpu_dm_destroy_drm_device(&adev
->dm
);
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 if (adev
->dm
.hdcp_workqueue
) {
1021 hdcp_destroy(adev
->dm
.hdcp_workqueue
);
1022 adev
->dm
.hdcp_workqueue
= NULL
;
1026 dc_deinit_callbacks(adev
->dm
.dc
);
1028 if (adev
->dm
.dc
->ctx
->dmub_srv
) {
1029 dc_dmub_srv_destroy(&adev
->dm
.dc
->ctx
->dmub_srv
);
1030 adev
->dm
.dc
->ctx
->dmub_srv
= NULL
;
1033 if (adev
->dm
.dmub_bo
)
1034 amdgpu_bo_free_kernel(&adev
->dm
.dmub_bo
,
1035 &adev
->dm
.dmub_bo_gpu_addr
,
1036 &adev
->dm
.dmub_bo_cpu_addr
);
1038 /* DC Destroy TODO: Replace destroy DAL */
1040 dc_destroy(&adev
->dm
.dc
);
1042 * TODO: pageflip, vlank interrupt
1044 * amdgpu_dm_irq_fini(adev);
1047 if (adev
->dm
.cgs_device
) {
1048 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
1049 adev
->dm
.cgs_device
= NULL
;
1051 if (adev
->dm
.freesync_module
) {
1052 mod_freesync_destroy(adev
->dm
.freesync_module
);
1053 adev
->dm
.freesync_module
= NULL
;
1056 mutex_destroy(&adev
->dm
.audio_lock
);
1057 mutex_destroy(&adev
->dm
.dc_lock
);
1062 static int load_dmcu_fw(struct amdgpu_device
*adev
)
1064 const char *fw_name_dmcu
= NULL
;
1066 const struct dmcu_firmware_header_v1_0
*hdr
;
1068 switch(adev
->asic_type
) {
1069 #if defined(CONFIG_DRM_AMD_DC_SI)
1084 case CHIP_POLARIS11
:
1085 case CHIP_POLARIS10
:
1086 case CHIP_POLARIS12
:
1094 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1095 case CHIP_SIENNA_CICHLID
:
1096 case CHIP_NAVY_FLOUNDER
:
1100 fw_name_dmcu
= FIRMWARE_NAVI12_DMCU
;
1103 if (ASICREV_IS_PICASSO(adev
->external_rev_id
))
1104 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1105 else if (ASICREV_IS_RAVEN2(adev
->external_rev_id
))
1106 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1111 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1115 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
1116 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1120 r
= request_firmware_direct(&adev
->dm
.fw_dmcu
, fw_name_dmcu
, adev
->dev
);
1122 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1123 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1124 adev
->dm
.fw_dmcu
= NULL
;
1128 dev_err(adev
->dev
, "amdgpu_dm: Can't load firmware \"%s\"\n",
1133 r
= amdgpu_ucode_validate(adev
->dm
.fw_dmcu
);
1135 dev_err(adev
->dev
, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1137 release_firmware(adev
->dm
.fw_dmcu
);
1138 adev
->dm
.fw_dmcu
= NULL
;
1142 hdr
= (const struct dmcu_firmware_header_v1_0
*)adev
->dm
.fw_dmcu
->data
;
1143 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].ucode_id
= AMDGPU_UCODE_ID_DMCU_ERAM
;
1144 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].fw
= adev
->dm
.fw_dmcu
;
1145 adev
->firmware
.fw_size
+=
1146 ALIGN(le32_to_cpu(hdr
->header
.ucode_size_bytes
) - le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1148 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].ucode_id
= AMDGPU_UCODE_ID_DMCU_INTV
;
1149 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].fw
= adev
->dm
.fw_dmcu
;
1150 adev
->firmware
.fw_size
+=
1151 ALIGN(le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1153 adev
->dm
.dmcu_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1155 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1160 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx
, uint32_t address
)
1162 struct amdgpu_device
*adev
= ctx
;
1164 return dm_read_reg(adev
->dm
.dc
->ctx
, address
);
1167 static void amdgpu_dm_dmub_reg_write(void *ctx
, uint32_t address
,
1170 struct amdgpu_device
*adev
= ctx
;
1172 return dm_write_reg(adev
->dm
.dc
->ctx
, address
, value
);
1175 static int dm_dmub_sw_init(struct amdgpu_device
*adev
)
1177 struct dmub_srv_create_params create_params
;
1178 struct dmub_srv_region_params region_params
;
1179 struct dmub_srv_region_info region_info
;
1180 struct dmub_srv_fb_params fb_params
;
1181 struct dmub_srv_fb_info
*fb_info
;
1182 struct dmub_srv
*dmub_srv
;
1183 const struct dmcub_firmware_header_v1_0
*hdr
;
1184 const char *fw_name_dmub
;
1185 enum dmub_asic dmub_asic
;
1186 enum dmub_status status
;
1189 switch (adev
->asic_type
) {
1191 dmub_asic
= DMUB_ASIC_DCN21
;
1192 fw_name_dmub
= FIRMWARE_RENOIR_DMUB
;
1194 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1195 case CHIP_SIENNA_CICHLID
:
1196 dmub_asic
= DMUB_ASIC_DCN30
;
1197 fw_name_dmub
= FIRMWARE_SIENNA_CICHLID_DMUB
;
1199 case CHIP_NAVY_FLOUNDER
:
1200 dmub_asic
= DMUB_ASIC_DCN30
;
1201 fw_name_dmub
= FIRMWARE_NAVY_FLOUNDER_DMUB
;
1206 /* ASIC doesn't support DMUB. */
1210 r
= request_firmware_direct(&adev
->dm
.dmub_fw
, fw_name_dmub
, adev
->dev
);
1212 DRM_ERROR("DMUB firmware loading failed: %d\n", r
);
1216 r
= amdgpu_ucode_validate(adev
->dm
.dmub_fw
);
1218 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r
);
1222 hdr
= (const struct dmcub_firmware_header_v1_0
*)adev
->dm
.dmub_fw
->data
;
1224 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
1225 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].ucode_id
=
1226 AMDGPU_UCODE_ID_DMCUB
;
1227 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].fw
=
1229 adev
->firmware
.fw_size
+=
1230 ALIGN(le32_to_cpu(hdr
->inst_const_bytes
), PAGE_SIZE
);
1232 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1233 adev
->dm
.dmcub_fw_version
);
1236 adev
->dm
.dmcub_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1238 adev
->dm
.dmub_srv
= kzalloc(sizeof(*adev
->dm
.dmub_srv
), GFP_KERNEL
);
1239 dmub_srv
= adev
->dm
.dmub_srv
;
1242 DRM_ERROR("Failed to allocate DMUB service!\n");
1246 memset(&create_params
, 0, sizeof(create_params
));
1247 create_params
.user_ctx
= adev
;
1248 create_params
.funcs
.reg_read
= amdgpu_dm_dmub_reg_read
;
1249 create_params
.funcs
.reg_write
= amdgpu_dm_dmub_reg_write
;
1250 create_params
.asic
= dmub_asic
;
1252 /* Create the DMUB service. */
1253 status
= dmub_srv_create(dmub_srv
, &create_params
);
1254 if (status
!= DMUB_STATUS_OK
) {
1255 DRM_ERROR("Error creating DMUB service: %d\n", status
);
1259 /* Calculate the size of all the regions for the DMUB service. */
1260 memset(®ion_params
, 0, sizeof(region_params
));
1262 region_params
.inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
1263 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
1264 region_params
.bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
1265 region_params
.vbios_size
= adev
->bios_size
;
1266 region_params
.fw_bss_data
= region_params
.bss_data_size
?
1267 adev
->dm
.dmub_fw
->data
+
1268 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
1269 le32_to_cpu(hdr
->inst_const_bytes
) : NULL
;
1270 region_params
.fw_inst_const
=
1271 adev
->dm
.dmub_fw
->data
+
1272 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
1275 status
= dmub_srv_calc_region_info(dmub_srv
, ®ion_params
,
1278 if (status
!= DMUB_STATUS_OK
) {
1279 DRM_ERROR("Error calculating DMUB region info: %d\n", status
);
1284 * Allocate a framebuffer based on the total size of all the regions.
1285 * TODO: Move this into GART.
1287 r
= amdgpu_bo_create_kernel(adev
, region_info
.fb_size
, PAGE_SIZE
,
1288 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->dm
.dmub_bo
,
1289 &adev
->dm
.dmub_bo_gpu_addr
,
1290 &adev
->dm
.dmub_bo_cpu_addr
);
1294 /* Rebase the regions on the framebuffer address. */
1295 memset(&fb_params
, 0, sizeof(fb_params
));
1296 fb_params
.cpu_addr
= adev
->dm
.dmub_bo_cpu_addr
;
1297 fb_params
.gpu_addr
= adev
->dm
.dmub_bo_gpu_addr
;
1298 fb_params
.region_info
= ®ion_info
;
1300 adev
->dm
.dmub_fb_info
=
1301 kzalloc(sizeof(*adev
->dm
.dmub_fb_info
), GFP_KERNEL
);
1302 fb_info
= adev
->dm
.dmub_fb_info
;
1306 "Failed to allocate framebuffer info for DMUB service!\n");
1310 status
= dmub_srv_calc_fb_info(dmub_srv
, &fb_params
, fb_info
);
1311 if (status
!= DMUB_STATUS_OK
) {
1312 DRM_ERROR("Error calculating DMUB FB info: %d\n", status
);
1319 static int dm_sw_init(void *handle
)
1321 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1324 r
= dm_dmub_sw_init(adev
);
1328 return load_dmcu_fw(adev
);
1331 static int dm_sw_fini(void *handle
)
1333 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1335 kfree(adev
->dm
.dmub_fb_info
);
1336 adev
->dm
.dmub_fb_info
= NULL
;
1338 if (adev
->dm
.dmub_srv
) {
1339 dmub_srv_destroy(adev
->dm
.dmub_srv
);
1340 adev
->dm
.dmub_srv
= NULL
;
1343 release_firmware(adev
->dm
.dmub_fw
);
1344 adev
->dm
.dmub_fw
= NULL
;
1346 release_firmware(adev
->dm
.fw_dmcu
);
1347 adev
->dm
.fw_dmcu
= NULL
;
1352 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
1354 struct amdgpu_dm_connector
*aconnector
;
1355 struct drm_connector
*connector
;
1356 struct drm_connector_list_iter iter
;
1359 drm_connector_list_iter_begin(dev
, &iter
);
1360 drm_for_each_connector_iter(connector
, &iter
) {
1361 aconnector
= to_amdgpu_dm_connector(connector
);
1362 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
1363 aconnector
->mst_mgr
.aux
) {
1364 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1366 aconnector
->base
.base
.id
);
1368 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
1370 DRM_ERROR("DM_MST: Failed to start MST\n");
1371 aconnector
->dc_link
->type
=
1372 dc_connection_single
;
1377 drm_connector_list_iter_end(&iter
);
1382 static int dm_late_init(void *handle
)
1384 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1386 struct dmcu_iram_parameters params
;
1387 unsigned int linear_lut
[16];
1389 struct dmcu
*dmcu
= NULL
;
1392 if (!adev
->dm
.fw_dmcu
)
1393 return detect_mst_link_for_all_connectors(adev
->ddev
);
1395 dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
1397 for (i
= 0; i
< 16; i
++)
1398 linear_lut
[i
] = 0xFFFF * i
/ 15;
1401 params
.backlight_ramping_start
= 0xCCCC;
1402 params
.backlight_ramping_reduction
= 0xCCCCCCCC;
1403 params
.backlight_lut_array_size
= 16;
1404 params
.backlight_lut_array
= linear_lut
;
1406 /* Min backlight level after ABM reduction, Don't allow below 1%
1407 * 0xFFFF x 0.01 = 0x28F
1409 params
.min_abm_backlight
= 0x28F;
1411 /* In the case where abm is implemented on dmcub,
1412 * dmcu object will be null.
1413 * ABM 2.4 and up are implemented on dmcub.
1416 ret
= dmcu_load_iram(dmcu
, params
);
1417 else if (adev
->dm
.dc
->ctx
->dmub_srv
)
1418 ret
= dmub_init_abm_config(adev
->dm
.dc
->res_pool
->abm
, params
);
1423 return detect_mst_link_for_all_connectors(adev
->ddev
);
1426 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
1428 struct amdgpu_dm_connector
*aconnector
;
1429 struct drm_connector
*connector
;
1430 struct drm_connector_list_iter iter
;
1431 struct drm_dp_mst_topology_mgr
*mgr
;
1433 bool need_hotplug
= false;
1435 drm_connector_list_iter_begin(dev
, &iter
);
1436 drm_for_each_connector_iter(connector
, &iter
) {
1437 aconnector
= to_amdgpu_dm_connector(connector
);
1438 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
||
1439 aconnector
->mst_port
)
1442 mgr
= &aconnector
->mst_mgr
;
1445 drm_dp_mst_topology_mgr_suspend(mgr
);
1447 ret
= drm_dp_mst_topology_mgr_resume(mgr
, true);
1449 drm_dp_mst_topology_mgr_set_mst(mgr
, false);
1450 need_hotplug
= true;
1454 drm_connector_list_iter_end(&iter
);
1457 drm_kms_helper_hotplug_event(dev
);
1460 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device
*adev
)
1462 struct smu_context
*smu
= &adev
->smu
;
1465 if (!is_support_sw_smu(adev
))
1468 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1469 * on window driver dc implementation.
1470 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1471 * should be passed to smu during boot up and resume from s3.
1472 * boot up: dc calculate dcn watermark clock settings within dc_create,
1473 * dcn20_resource_construct
1474 * then call pplib functions below to pass the settings to smu:
1475 * smu_set_watermarks_for_clock_ranges
1476 * smu_set_watermarks_table
1477 * navi10_set_watermarks_table
1478 * smu_write_watermarks_table
1480 * For Renoir, clock settings of dcn watermark are also fixed values.
1481 * dc has implemented different flow for window driver:
1482 * dc_hardware_init / dc_set_power_state
1487 * smu_set_watermarks_for_clock_ranges
1488 * renoir_set_watermarks_table
1489 * smu_write_watermarks_table
1492 * dc_hardware_init -> amdgpu_dm_init
1493 * dc_set_power_state --> dm_resume
1495 * therefore, this function apply to navi10/12/14 but not Renoir
1498 switch(adev
->asic_type
) {
1507 ret
= smu_write_watermarks_table(smu
);
1509 DRM_ERROR("Failed to update WMTABLE!\n");
1517 * dm_hw_init() - Initialize DC device
1518 * @handle: The base driver device containing the amdgpu_dm device.
1520 * Initialize the &struct amdgpu_display_manager device. This involves calling
1521 * the initializers of each DM component, then populating the struct with them.
1523 * Although the function implies hardware initialization, both hardware and
1524 * software are initialized here. Splitting them out to their relevant init
1525 * hooks is a future TODO item.
1527 * Some notable things that are initialized here:
1529 * - Display Core, both software and hardware
1530 * - DC modules that we need (freesync and color management)
1531 * - DRM software states
1532 * - Interrupt sources and handlers
1534 * - Debug FS entries, if enabled
1536 static int dm_hw_init(void *handle
)
1538 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1539 /* Create DAL display manager */
1540 amdgpu_dm_init(adev
);
1541 amdgpu_dm_hpd_init(adev
);
1547 * dm_hw_fini() - Teardown DC device
1548 * @handle: The base driver device containing the amdgpu_dm device.
1550 * Teardown components within &struct amdgpu_display_manager that require
1551 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552 * were loaded. Also flush IRQ workqueues and disable them.
1554 static int dm_hw_fini(void *handle
)
1556 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1558 amdgpu_dm_hpd_fini(adev
);
1560 amdgpu_dm_irq_fini(adev
);
1561 amdgpu_dm_fini(adev
);
1566 static int dm_enable_vblank(struct drm_crtc
*crtc
);
1567 static void dm_disable_vblank(struct drm_crtc
*crtc
);
1569 static void dm_gpureset_toggle_interrupts(struct amdgpu_device
*adev
,
1570 struct dc_state
*state
, bool enable
)
1572 enum dc_irq_source irq_source
;
1573 struct amdgpu_crtc
*acrtc
;
1577 for (i
= 0; i
< state
->stream_count
; i
++) {
1578 acrtc
= get_crtc_by_otg_inst(
1579 adev
, state
->stream_status
[i
].primary_otg_inst
);
1581 if (acrtc
&& state
->stream_status
[i
].plane_count
!= 0) {
1582 irq_source
= IRQ_TYPE_PFLIP
+ acrtc
->otg_inst
;
1583 rc
= dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
1584 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1585 acrtc
->crtc_id
, enable
? "en" : "dis", rc
);
1587 DRM_WARN("Failed to %s pflip interrupts\n",
1588 enable
? "enable" : "disable");
1591 rc
= dm_enable_vblank(&acrtc
->base
);
1593 DRM_WARN("Failed to enable vblank interrupts\n");
1595 dm_disable_vblank(&acrtc
->base
);
1603 static enum dc_status
amdgpu_dm_commit_zero_streams(struct dc
*dc
)
1605 struct dc_state
*context
= NULL
;
1606 enum dc_status res
= DC_ERROR_UNEXPECTED
;
1608 struct dc_stream_state
*del_streams
[MAX_PIPES
];
1609 int del_streams_count
= 0;
1611 memset(del_streams
, 0, sizeof(del_streams
));
1613 context
= dc_create_state(dc
);
1614 if (context
== NULL
)
1615 goto context_alloc_fail
;
1617 dc_resource_state_copy_construct_current(dc
, context
);
1619 /* First remove from context all streams */
1620 for (i
= 0; i
< context
->stream_count
; i
++) {
1621 struct dc_stream_state
*stream
= context
->streams
[i
];
1623 del_streams
[del_streams_count
++] = stream
;
1626 /* Remove all planes for removed streams and then remove the streams */
1627 for (i
= 0; i
< del_streams_count
; i
++) {
1628 if (!dc_rem_all_planes_for_stream(dc
, del_streams
[i
], context
)) {
1629 res
= DC_FAIL_DETACH_SURFACES
;
1633 res
= dc_remove_stream_from_ctx(dc
, context
, del_streams
[i
]);
1639 res
= dc_validate_global_state(dc
, context
, false);
1642 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__
, res
);
1646 res
= dc_commit_state(dc
, context
);
1649 dc_release_state(context
);
1655 static int dm_suspend(void *handle
)
1657 struct amdgpu_device
*adev
= handle
;
1658 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1661 if (amdgpu_in_reset(adev
)) {
1662 mutex_lock(&dm
->dc_lock
);
1663 dm
->cached_dc_state
= dc_copy_state(dm
->dc
->current_state
);
1665 dm_gpureset_toggle_interrupts(adev
, dm
->cached_dc_state
, false);
1667 amdgpu_dm_commit_zero_streams(dm
->dc
);
1669 amdgpu_dm_irq_suspend(adev
);
1674 WARN_ON(adev
->dm
.cached_state
);
1675 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
1677 s3_handle_mst(adev
->ddev
, true);
1679 amdgpu_dm_irq_suspend(adev
);
1682 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
1687 static struct amdgpu_dm_connector
*
1688 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
1689 struct drm_crtc
*crtc
)
1692 struct drm_connector_state
*new_con_state
;
1693 struct drm_connector
*connector
;
1694 struct drm_crtc
*crtc_from_state
;
1696 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
1697 crtc_from_state
= new_con_state
->crtc
;
1699 if (crtc_from_state
== crtc
)
1700 return to_amdgpu_dm_connector(connector
);
1706 static void emulated_link_detect(struct dc_link
*link
)
1708 struct dc_sink_init_data sink_init_data
= { 0 };
1709 struct display_sink_capability sink_caps
= { 0 };
1710 enum dc_edid_status edid_status
;
1711 struct dc_context
*dc_ctx
= link
->ctx
;
1712 struct dc_sink
*sink
= NULL
;
1713 struct dc_sink
*prev_sink
= NULL
;
1715 link
->type
= dc_connection_none
;
1716 prev_sink
= link
->local_sink
;
1718 if (prev_sink
!= NULL
)
1719 dc_sink_retain(prev_sink
);
1721 switch (link
->connector_signal
) {
1722 case SIGNAL_TYPE_HDMI_TYPE_A
: {
1723 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1724 sink_caps
.signal
= SIGNAL_TYPE_HDMI_TYPE_A
;
1728 case SIGNAL_TYPE_DVI_SINGLE_LINK
: {
1729 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1730 sink_caps
.signal
= SIGNAL_TYPE_DVI_SINGLE_LINK
;
1734 case SIGNAL_TYPE_DVI_DUAL_LINK
: {
1735 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1736 sink_caps
.signal
= SIGNAL_TYPE_DVI_DUAL_LINK
;
1740 case SIGNAL_TYPE_LVDS
: {
1741 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1742 sink_caps
.signal
= SIGNAL_TYPE_LVDS
;
1746 case SIGNAL_TYPE_EDP
: {
1747 sink_caps
.transaction_type
=
1748 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1749 sink_caps
.signal
= SIGNAL_TYPE_EDP
;
1753 case SIGNAL_TYPE_DISPLAY_PORT
: {
1754 sink_caps
.transaction_type
=
1755 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1756 sink_caps
.signal
= SIGNAL_TYPE_VIRTUAL
;
1761 DC_ERROR("Invalid connector type! signal:%d\n",
1762 link
->connector_signal
);
1766 sink_init_data
.link
= link
;
1767 sink_init_data
.sink_signal
= sink_caps
.signal
;
1769 sink
= dc_sink_create(&sink_init_data
);
1771 DC_ERROR("Failed to create sink!\n");
1775 /* dc_sink_create returns a new reference */
1776 link
->local_sink
= sink
;
1778 edid_status
= dm_helpers_read_local_edid(
1783 if (edid_status
!= EDID_OK
)
1784 DC_ERROR("Failed to read EDID");
1788 static void dm_gpureset_commit_state(struct dc_state
*dc_state
,
1789 struct amdgpu_display_manager
*dm
)
1792 struct dc_surface_update surface_updates
[MAX_SURFACES
];
1793 struct dc_plane_info plane_infos
[MAX_SURFACES
];
1794 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
1795 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
1796 struct dc_stream_update stream_update
;
1800 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
1803 dm_error("Failed to allocate update bundle\n");
1807 for (k
= 0; k
< dc_state
->stream_count
; k
++) {
1808 bundle
->stream_update
.stream
= dc_state
->streams
[k
];
1810 for (m
= 0; m
< dc_state
->stream_status
->plane_count
; m
++) {
1811 bundle
->surface_updates
[m
].surface
=
1812 dc_state
->stream_status
->plane_states
[m
];
1813 bundle
->surface_updates
[m
].surface
->force_full_update
=
1816 dc_commit_updates_for_stream(
1817 dm
->dc
, bundle
->surface_updates
,
1818 dc_state
->stream_status
->plane_count
,
1819 dc_state
->streams
[k
], &bundle
->stream_update
, dc_state
);
1828 static int dm_resume(void *handle
)
1830 struct amdgpu_device
*adev
= handle
;
1831 struct drm_device
*ddev
= adev
->ddev
;
1832 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1833 struct amdgpu_dm_connector
*aconnector
;
1834 struct drm_connector
*connector
;
1835 struct drm_connector_list_iter iter
;
1836 struct drm_crtc
*crtc
;
1837 struct drm_crtc_state
*new_crtc_state
;
1838 struct dm_crtc_state
*dm_new_crtc_state
;
1839 struct drm_plane
*plane
;
1840 struct drm_plane_state
*new_plane_state
;
1841 struct dm_plane_state
*dm_new_plane_state
;
1842 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(dm
->atomic_obj
.state
);
1843 enum dc_connection_type new_connection_type
= dc_connection_none
;
1844 struct dc_state
*dc_state
;
1847 if (amdgpu_in_reset(adev
)) {
1848 dc_state
= dm
->cached_dc_state
;
1850 r
= dm_dmub_hw_init(adev
);
1852 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
1854 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
1857 amdgpu_dm_irq_resume_early(adev
);
1859 for (i
= 0; i
< dc_state
->stream_count
; i
++) {
1860 dc_state
->streams
[i
]->mode_changed
= true;
1861 for (j
= 0; j
< dc_state
->stream_status
->plane_count
; j
++) {
1862 dc_state
->stream_status
->plane_states
[j
]->update_flags
.raw
1867 WARN_ON(!dc_commit_state(dm
->dc
, dc_state
));
1869 dm_gpureset_commit_state(dm
->cached_dc_state
, dm
);
1871 dm_gpureset_toggle_interrupts(adev
, dm
->cached_dc_state
, true);
1873 dc_release_state(dm
->cached_dc_state
);
1874 dm
->cached_dc_state
= NULL
;
1876 amdgpu_dm_irq_resume_late(adev
);
1878 mutex_unlock(&dm
->dc_lock
);
1882 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1883 dc_release_state(dm_state
->context
);
1884 dm_state
->context
= dc_create_state(dm
->dc
);
1885 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1886 dc_resource_state_construct(dm
->dc
, dm_state
->context
);
1888 /* Before powering on DC we need to re-initialize DMUB. */
1889 r
= dm_dmub_hw_init(adev
);
1891 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
1893 /* power on hardware */
1894 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
1896 /* program HPD filter */
1900 * early enable HPD Rx IRQ, should be done before set mode as short
1901 * pulse interrupts are used for MST
1903 amdgpu_dm_irq_resume_early(adev
);
1905 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1906 s3_handle_mst(ddev
, false);
1909 drm_connector_list_iter_begin(ddev
, &iter
);
1910 drm_for_each_connector_iter(connector
, &iter
) {
1911 aconnector
= to_amdgpu_dm_connector(connector
);
1914 * this is the case when traversing through already created
1915 * MST connectors, should be skipped
1917 if (aconnector
->mst_port
)
1920 mutex_lock(&aconnector
->hpd_lock
);
1921 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1922 DRM_ERROR("KMS: Failed to detect connector\n");
1924 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
)
1925 emulated_link_detect(aconnector
->dc_link
);
1927 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
1929 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
1930 aconnector
->fake_enable
= false;
1932 if (aconnector
->dc_sink
)
1933 dc_sink_release(aconnector
->dc_sink
);
1934 aconnector
->dc_sink
= NULL
;
1935 amdgpu_dm_update_connector_after_detect(aconnector
);
1936 mutex_unlock(&aconnector
->hpd_lock
);
1938 drm_connector_list_iter_end(&iter
);
1940 /* Force mode set in atomic commit */
1941 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
1942 new_crtc_state
->active_changed
= true;
1945 * atomic_check is expected to create the dc states. We need to release
1946 * them here, since they were duplicated as part of the suspend
1949 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
1950 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
1951 if (dm_new_crtc_state
->stream
) {
1952 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
1953 dc_stream_release(dm_new_crtc_state
->stream
);
1954 dm_new_crtc_state
->stream
= NULL
;
1958 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
1959 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
1960 if (dm_new_plane_state
->dc_state
) {
1961 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
1962 dc_plane_state_release(dm_new_plane_state
->dc_state
);
1963 dm_new_plane_state
->dc_state
= NULL
;
1967 drm_atomic_helper_resume(ddev
, dm
->cached_state
);
1969 dm
->cached_state
= NULL
;
1971 amdgpu_dm_irq_resume_late(adev
);
1973 amdgpu_dm_smu_write_watermarks_table(adev
);
1981 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1982 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1983 * the base driver's device list to be initialized and torn down accordingly.
1985 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1988 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
1990 .early_init
= dm_early_init
,
1991 .late_init
= dm_late_init
,
1992 .sw_init
= dm_sw_init
,
1993 .sw_fini
= dm_sw_fini
,
1994 .hw_init
= dm_hw_init
,
1995 .hw_fini
= dm_hw_fini
,
1996 .suspend
= dm_suspend
,
1997 .resume
= dm_resume
,
1998 .is_idle
= dm_is_idle
,
1999 .wait_for_idle
= dm_wait_for_idle
,
2000 .check_soft_reset
= dm_check_soft_reset
,
2001 .soft_reset
= dm_soft_reset
,
2002 .set_clockgating_state
= dm_set_clockgating_state
,
2003 .set_powergating_state
= dm_set_powergating_state
,
2006 const struct amdgpu_ip_block_version dm_ip_block
=
2008 .type
= AMD_IP_BLOCK_TYPE_DCE
,
2012 .funcs
= &amdgpu_dm_funcs
,
2022 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
2023 .fb_create
= amdgpu_display_user_framebuffer_create
,
2024 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
2025 .atomic_check
= amdgpu_dm_atomic_check
,
2026 .atomic_commit
= amdgpu_dm_atomic_commit
,
2029 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
2030 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
2033 static void update_connector_ext_caps(struct amdgpu_dm_connector
*aconnector
)
2035 u32 max_cll
, min_cll
, max
, min
, q
, r
;
2036 struct amdgpu_dm_backlight_caps
*caps
;
2037 struct amdgpu_display_manager
*dm
;
2038 struct drm_connector
*conn_base
;
2039 struct amdgpu_device
*adev
;
2040 struct dc_link
*link
= NULL
;
2041 static const u8 pre_computed_values
[] = {
2042 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2043 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2045 if (!aconnector
|| !aconnector
->dc_link
)
2048 link
= aconnector
->dc_link
;
2049 if (link
->connector_signal
!= SIGNAL_TYPE_EDP
)
2052 conn_base
= &aconnector
->base
;
2053 adev
= conn_base
->dev
->dev_private
;
2055 caps
= &dm
->backlight_caps
;
2056 caps
->ext_caps
= &aconnector
->dc_link
->dpcd_sink_ext_caps
;
2057 caps
->aux_support
= false;
2058 max_cll
= conn_base
->hdr_sink_metadata
.hdmi_type1
.max_cll
;
2059 min_cll
= conn_base
->hdr_sink_metadata
.hdmi_type1
.min_cll
;
2061 if (caps
->ext_caps
->bits
.oled
== 1 ||
2062 caps
->ext_caps
->bits
.sdr_aux_backlight_control
== 1 ||
2063 caps
->ext_caps
->bits
.hdr_aux_backlight_control
== 1)
2064 caps
->aux_support
= true;
2066 /* From the specification (CTA-861-G), for calculating the maximum
2067 * luminance we need to use:
2068 * Luminance = 50*2**(CV/32)
2069 * Where CV is a one-byte value.
2070 * For calculating this expression we may need float point precision;
2071 * to avoid this complexity level, we take advantage that CV is divided
2072 * by a constant. From the Euclids division algorithm, we know that CV
2073 * can be written as: CV = 32*q + r. Next, we replace CV in the
2074 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2075 * need to pre-compute the value of r/32. For pre-computing the values
2076 * We just used the following Ruby line:
2077 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2078 * The results of the above expressions can be verified at
2079 * pre_computed_values.
2083 max
= (1 << q
) * pre_computed_values
[r
];
2085 // min luminance: maxLum * (CV/255)^2 / 100
2086 q
= DIV_ROUND_CLOSEST(min_cll
, 255);
2087 min
= max
* DIV_ROUND_CLOSEST((q
* q
), 100);
2089 caps
->aux_max_input_signal
= max
;
2090 caps
->aux_min_input_signal
= min
;
2093 void amdgpu_dm_update_connector_after_detect(
2094 struct amdgpu_dm_connector
*aconnector
)
2096 struct drm_connector
*connector
= &aconnector
->base
;
2097 struct drm_device
*dev
= connector
->dev
;
2098 struct dc_sink
*sink
;
2100 /* MST handled by drm_mst framework */
2101 if (aconnector
->mst_mgr
.mst_state
== true)
2105 sink
= aconnector
->dc_link
->local_sink
;
2107 dc_sink_retain(sink
);
2110 * Edid mgmt connector gets first update only in mode_valid hook and then
2111 * the connector sink is set to either fake or physical sink depends on link status.
2112 * Skip if already done during boot.
2114 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
2115 && aconnector
->dc_em_sink
) {
2118 * For S3 resume with headless use eml_sink to fake stream
2119 * because on resume connector->sink is set to NULL
2121 mutex_lock(&dev
->mode_config
.mutex
);
2124 if (aconnector
->dc_sink
) {
2125 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2127 * retain and release below are used to
2128 * bump up refcount for sink because the link doesn't point
2129 * to it anymore after disconnect, so on next crtc to connector
2130 * reshuffle by UMD we will get into unwanted dc_sink release
2132 dc_sink_release(aconnector
->dc_sink
);
2134 aconnector
->dc_sink
= sink
;
2135 dc_sink_retain(aconnector
->dc_sink
);
2136 amdgpu_dm_update_freesync_caps(connector
,
2139 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2140 if (!aconnector
->dc_sink
) {
2141 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
2142 dc_sink_retain(aconnector
->dc_sink
);
2146 mutex_unlock(&dev
->mode_config
.mutex
);
2149 dc_sink_release(sink
);
2154 * TODO: temporary guard to look for proper fix
2155 * if this sink is MST sink, we should not do anything
2157 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
2158 dc_sink_release(sink
);
2162 if (aconnector
->dc_sink
== sink
) {
2164 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2167 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2168 aconnector
->connector_id
);
2170 dc_sink_release(sink
);
2174 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2175 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
2177 mutex_lock(&dev
->mode_config
.mutex
);
2180 * 1. Update status of the drm connector
2181 * 2. Send an event and let userspace tell us what to do
2185 * TODO: check if we still need the S3 mode update workaround.
2186 * If yes, put it here.
2188 if (aconnector
->dc_sink
)
2189 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2191 aconnector
->dc_sink
= sink
;
2192 dc_sink_retain(aconnector
->dc_sink
);
2193 if (sink
->dc_edid
.length
== 0) {
2194 aconnector
->edid
= NULL
;
2195 if (aconnector
->dc_link
->aux_mode
) {
2196 drm_dp_cec_unset_edid(
2197 &aconnector
->dm_dp_aux
.aux
);
2201 (struct edid
*)sink
->dc_edid
.raw_edid
;
2203 drm_connector_update_edid_property(connector
,
2206 if (aconnector
->dc_link
->aux_mode
)
2207 drm_dp_cec_set_edid(&aconnector
->dm_dp_aux
.aux
,
2211 amdgpu_dm_update_freesync_caps(connector
, aconnector
->edid
);
2212 update_connector_ext_caps(aconnector
);
2214 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
2215 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2216 drm_connector_update_edid_property(connector
, NULL
);
2217 aconnector
->num_modes
= 0;
2218 dc_sink_release(aconnector
->dc_sink
);
2219 aconnector
->dc_sink
= NULL
;
2220 aconnector
->edid
= NULL
;
2221 #ifdef CONFIG_DRM_AMD_DC_HDCP
2222 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2223 if (connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
2224 connector
->state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
2228 mutex_unlock(&dev
->mode_config
.mutex
);
2231 dc_sink_release(sink
);
2234 static void handle_hpd_irq(void *param
)
2236 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2237 struct drm_connector
*connector
= &aconnector
->base
;
2238 struct drm_device
*dev
= connector
->dev
;
2239 enum dc_connection_type new_connection_type
= dc_connection_none
;
2240 #ifdef CONFIG_DRM_AMD_DC_HDCP
2241 struct amdgpu_device
*adev
= dev
->dev_private
;
2245 * In case of failure or MST no need to update connector status or notify the OS
2246 * since (for MST case) MST does this in its own context.
2248 mutex_lock(&aconnector
->hpd_lock
);
2250 #ifdef CONFIG_DRM_AMD_DC_HDCP
2251 if (adev
->dm
.hdcp_workqueue
)
2252 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
2254 if (aconnector
->fake_enable
)
2255 aconnector
->fake_enable
= false;
2257 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
2258 DRM_ERROR("KMS: Failed to detect connector\n");
2260 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2261 emulated_link_detect(aconnector
->dc_link
);
2264 drm_modeset_lock_all(dev
);
2265 dm_restore_drm_connector_state(dev
, connector
);
2266 drm_modeset_unlock_all(dev
);
2268 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2269 drm_kms_helper_hotplug_event(dev
);
2271 } else if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
2272 amdgpu_dm_update_connector_after_detect(aconnector
);
2275 drm_modeset_lock_all(dev
);
2276 dm_restore_drm_connector_state(dev
, connector
);
2277 drm_modeset_unlock_all(dev
);
2279 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2280 drm_kms_helper_hotplug_event(dev
);
2282 mutex_unlock(&aconnector
->hpd_lock
);
2286 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
2288 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
2290 bool new_irq_handled
= false;
2292 int dpcd_bytes_to_read
;
2294 const int max_process_count
= 30;
2295 int process_count
= 0;
2297 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
2299 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
2300 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
2301 /* DPCD 0x200 - 0x201 for downstream IRQ */
2302 dpcd_addr
= DP_SINK_COUNT
;
2304 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
2305 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2306 dpcd_addr
= DP_SINK_COUNT_ESI
;
2309 dret
= drm_dp_dpcd_read(
2310 &aconnector
->dm_dp_aux
.aux
,
2313 dpcd_bytes_to_read
);
2315 while (dret
== dpcd_bytes_to_read
&&
2316 process_count
< max_process_count
) {
2322 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
2323 /* handle HPD short pulse irq */
2324 if (aconnector
->mst_mgr
.mst_state
)
2326 &aconnector
->mst_mgr
,
2330 if (new_irq_handled
) {
2331 /* ACK at DPCD to notify down stream */
2332 const int ack_dpcd_bytes_to_write
=
2333 dpcd_bytes_to_read
- 1;
2335 for (retry
= 0; retry
< 3; retry
++) {
2338 wret
= drm_dp_dpcd_write(
2339 &aconnector
->dm_dp_aux
.aux
,
2342 ack_dpcd_bytes_to_write
);
2343 if (wret
== ack_dpcd_bytes_to_write
)
2347 /* check if there is new irq to be handled */
2348 dret
= drm_dp_dpcd_read(
2349 &aconnector
->dm_dp_aux
.aux
,
2352 dpcd_bytes_to_read
);
2354 new_irq_handled
= false;
2360 if (process_count
== max_process_count
)
2361 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2364 static void handle_hpd_rx_irq(void *param
)
2366 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2367 struct drm_connector
*connector
= &aconnector
->base
;
2368 struct drm_device
*dev
= connector
->dev
;
2369 struct dc_link
*dc_link
= aconnector
->dc_link
;
2370 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
2371 enum dc_connection_type new_connection_type
= dc_connection_none
;
2372 #ifdef CONFIG_DRM_AMD_DC_HDCP
2373 union hpd_irq_data hpd_irq_data
;
2374 struct amdgpu_device
*adev
= dev
->dev_private
;
2376 memset(&hpd_irq_data
, 0, sizeof(hpd_irq_data
));
2380 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2381 * conflict, after implement i2c helper, this mutex should be
2384 if (dc_link
->type
!= dc_connection_mst_branch
)
2385 mutex_lock(&aconnector
->hpd_lock
);
2388 #ifdef CONFIG_DRM_AMD_DC_HDCP
2389 if (dc_link_handle_hpd_rx_irq(dc_link
, &hpd_irq_data
, NULL
) &&
2391 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
, NULL
) &&
2393 !is_mst_root_connector
) {
2394 /* Downstream Port status changed. */
2395 if (!dc_link_detect_sink(dc_link
, &new_connection_type
))
2396 DRM_ERROR("KMS: Failed to detect connector\n");
2398 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2399 emulated_link_detect(dc_link
);
2401 if (aconnector
->fake_enable
)
2402 aconnector
->fake_enable
= false;
2404 amdgpu_dm_update_connector_after_detect(aconnector
);
2407 drm_modeset_lock_all(dev
);
2408 dm_restore_drm_connector_state(dev
, connector
);
2409 drm_modeset_unlock_all(dev
);
2411 drm_kms_helper_hotplug_event(dev
);
2412 } else if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
2414 if (aconnector
->fake_enable
)
2415 aconnector
->fake_enable
= false;
2417 amdgpu_dm_update_connector_after_detect(aconnector
);
2420 drm_modeset_lock_all(dev
);
2421 dm_restore_drm_connector_state(dev
, connector
);
2422 drm_modeset_unlock_all(dev
);
2424 drm_kms_helper_hotplug_event(dev
);
2427 #ifdef CONFIG_DRM_AMD_DC_HDCP
2428 if (hpd_irq_data
.bytes
.device_service_irq
.bits
.CP_IRQ
) {
2429 if (adev
->dm
.hdcp_workqueue
)
2430 hdcp_handle_cpirq(adev
->dm
.hdcp_workqueue
, aconnector
->base
.index
);
2433 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
2434 (dc_link
->type
== dc_connection_mst_branch
))
2435 dm_handle_hpd_rx_irq(aconnector
);
2437 if (dc_link
->type
!= dc_connection_mst_branch
) {
2438 drm_dp_cec_irq(&aconnector
->dm_dp_aux
.aux
);
2439 mutex_unlock(&aconnector
->hpd_lock
);
2443 static void register_hpd_handlers(struct amdgpu_device
*adev
)
2445 struct drm_device
*dev
= adev
->ddev
;
2446 struct drm_connector
*connector
;
2447 struct amdgpu_dm_connector
*aconnector
;
2448 const struct dc_link
*dc_link
;
2449 struct dc_interrupt_params int_params
= {0};
2451 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2452 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2454 list_for_each_entry(connector
,
2455 &dev
->mode_config
.connector_list
, head
) {
2457 aconnector
= to_amdgpu_dm_connector(connector
);
2458 dc_link
= aconnector
->dc_link
;
2460 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
2461 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2462 int_params
.irq_source
= dc_link
->irq_source_hpd
;
2464 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2466 (void *) aconnector
);
2469 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
2471 /* Also register for DP short pulse (hpd_rx). */
2472 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2473 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
2475 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2477 (void *) aconnector
);
2482 #if defined(CONFIG_DRM_AMD_DC_SI)
2483 /* Register IRQ sources and initialize IRQ callbacks */
2484 static int dce60_register_irq_handlers(struct amdgpu_device
*adev
)
2486 struct dc
*dc
= adev
->dm
.dc
;
2487 struct common_irq_params
*c_irq_params
;
2488 struct dc_interrupt_params int_params
= {0};
2491 unsigned client_id
= AMDGPU_IRQ_CLIENTID_LEGACY
;
2493 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2494 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2497 * Actions of amdgpu_irq_add_id():
2498 * 1. Register a set() function with base driver.
2499 * Base driver will call set() function to enable/disable an
2500 * interrupt in DC hardware.
2501 * 2. Register amdgpu_dm_irq_handler().
2502 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2503 * coming from DC hardware.
2504 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2505 * for acknowledging and handling. */
2507 /* Use VBLANK interrupt */
2508 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
2509 r
= amdgpu_irq_add_id(adev
, client_id
, i
+1 , &adev
->crtc_irq
);
2511 DRM_ERROR("Failed to add crtc irq id!\n");
2515 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2516 int_params
.irq_source
=
2517 dc_interrupt_to_irq_source(dc
, i
+1 , 0);
2519 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2521 c_irq_params
->adev
= adev
;
2522 c_irq_params
->irq_src
= int_params
.irq_source
;
2524 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2525 dm_crtc_high_irq
, c_irq_params
);
2528 /* Use GRPH_PFLIP interrupt */
2529 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
2530 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
2531 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
2533 DRM_ERROR("Failed to add page flip irq id!\n");
2537 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2538 int_params
.irq_source
=
2539 dc_interrupt_to_irq_source(dc
, i
, 0);
2541 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2543 c_irq_params
->adev
= adev
;
2544 c_irq_params
->irq_src
= int_params
.irq_source
;
2546 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2547 dm_pflip_high_irq
, c_irq_params
);
2552 r
= amdgpu_irq_add_id(adev
, client_id
,
2553 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
2555 DRM_ERROR("Failed to add hpd irq id!\n");
2559 register_hpd_handlers(adev
);
2565 /* Register IRQ sources and initialize IRQ callbacks */
2566 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
2568 struct dc
*dc
= adev
->dm
.dc
;
2569 struct common_irq_params
*c_irq_params
;
2570 struct dc_interrupt_params int_params
= {0};
2573 unsigned client_id
= AMDGPU_IRQ_CLIENTID_LEGACY
;
2575 if (adev
->asic_type
>= CHIP_VEGA10
)
2576 client_id
= SOC15_IH_CLIENTID_DCE
;
2578 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2579 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2582 * Actions of amdgpu_irq_add_id():
2583 * 1. Register a set() function with base driver.
2584 * Base driver will call set() function to enable/disable an
2585 * interrupt in DC hardware.
2586 * 2. Register amdgpu_dm_irq_handler().
2587 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2588 * coming from DC hardware.
2589 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2590 * for acknowledging and handling. */
2592 /* Use VBLANK interrupt */
2593 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
2594 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
2596 DRM_ERROR("Failed to add crtc irq id!\n");
2600 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2601 int_params
.irq_source
=
2602 dc_interrupt_to_irq_source(dc
, i
, 0);
2604 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2606 c_irq_params
->adev
= adev
;
2607 c_irq_params
->irq_src
= int_params
.irq_source
;
2609 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2610 dm_crtc_high_irq
, c_irq_params
);
2613 /* Use VUPDATE interrupt */
2614 for (i
= VISLANDS30_IV_SRCID_D1_V_UPDATE_INT
; i
<= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT
; i
+= 2) {
2615 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->vupdate_irq
);
2617 DRM_ERROR("Failed to add vupdate irq id!\n");
2621 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2622 int_params
.irq_source
=
2623 dc_interrupt_to_irq_source(dc
, i
, 0);
2625 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
2627 c_irq_params
->adev
= adev
;
2628 c_irq_params
->irq_src
= int_params
.irq_source
;
2630 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2631 dm_vupdate_high_irq
, c_irq_params
);
2634 /* Use GRPH_PFLIP interrupt */
2635 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
2636 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
2637 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
2639 DRM_ERROR("Failed to add page flip irq id!\n");
2643 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2644 int_params
.irq_source
=
2645 dc_interrupt_to_irq_source(dc
, i
, 0);
2647 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2649 c_irq_params
->adev
= adev
;
2650 c_irq_params
->irq_src
= int_params
.irq_source
;
2652 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2653 dm_pflip_high_irq
, c_irq_params
);
2658 r
= amdgpu_irq_add_id(adev
, client_id
,
2659 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
2661 DRM_ERROR("Failed to add hpd irq id!\n");
2665 register_hpd_handlers(adev
);
2670 #if defined(CONFIG_DRM_AMD_DC_DCN)
2671 /* Register IRQ sources and initialize IRQ callbacks */
2672 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
2674 struct dc
*dc
= adev
->dm
.dc
;
2675 struct common_irq_params
*c_irq_params
;
2676 struct dc_interrupt_params int_params
= {0};
2680 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2681 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2684 * Actions of amdgpu_irq_add_id():
2685 * 1. Register a set() function with base driver.
2686 * Base driver will call set() function to enable/disable an
2687 * interrupt in DC hardware.
2688 * 2. Register amdgpu_dm_irq_handler().
2689 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2690 * coming from DC hardware.
2691 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2692 * for acknowledging and handling.
2695 /* Use VSTARTUP interrupt */
2696 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
2697 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
2699 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
2702 DRM_ERROR("Failed to add crtc irq id!\n");
2706 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2707 int_params
.irq_source
=
2708 dc_interrupt_to_irq_source(dc
, i
, 0);
2710 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2712 c_irq_params
->adev
= adev
;
2713 c_irq_params
->irq_src
= int_params
.irq_source
;
2715 amdgpu_dm_irq_register_interrupt(
2716 adev
, &int_params
, dm_crtc_high_irq
, c_irq_params
);
2719 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2720 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2721 * to trigger at end of each vblank, regardless of state of the lock,
2722 * matching DCE behaviour.
2724 for (i
= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT
;
2725 i
<= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
2727 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->vupdate_irq
);
2730 DRM_ERROR("Failed to add vupdate irq id!\n");
2734 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2735 int_params
.irq_source
=
2736 dc_interrupt_to_irq_source(dc
, i
, 0);
2738 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
2740 c_irq_params
->adev
= adev
;
2741 c_irq_params
->irq_src
= int_params
.irq_source
;
2743 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2744 dm_vupdate_high_irq
, c_irq_params
);
2747 /* Use GRPH_PFLIP interrupt */
2748 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
2749 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
2751 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
2753 DRM_ERROR("Failed to add page flip irq id!\n");
2757 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2758 int_params
.irq_source
=
2759 dc_interrupt_to_irq_source(dc
, i
, 0);
2761 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2763 c_irq_params
->adev
= adev
;
2764 c_irq_params
->irq_src
= int_params
.irq_source
;
2766 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2767 dm_pflip_high_irq
, c_irq_params
);
2772 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
2775 DRM_ERROR("Failed to add hpd irq id!\n");
2779 register_hpd_handlers(adev
);
2786 * Acquires the lock for the atomic state object and returns
2787 * the new atomic state.
2789 * This should only be called during atomic check.
2791 static int dm_atomic_get_state(struct drm_atomic_state
*state
,
2792 struct dm_atomic_state
**dm_state
)
2794 struct drm_device
*dev
= state
->dev
;
2795 struct amdgpu_device
*adev
= dev
->dev_private
;
2796 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2797 struct drm_private_state
*priv_state
;
2802 priv_state
= drm_atomic_get_private_obj_state(state
, &dm
->atomic_obj
);
2803 if (IS_ERR(priv_state
))
2804 return PTR_ERR(priv_state
);
2806 *dm_state
= to_dm_atomic_state(priv_state
);
2811 static struct dm_atomic_state
*
2812 dm_atomic_get_new_state(struct drm_atomic_state
*state
)
2814 struct drm_device
*dev
= state
->dev
;
2815 struct amdgpu_device
*adev
= dev
->dev_private
;
2816 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2817 struct drm_private_obj
*obj
;
2818 struct drm_private_state
*new_obj_state
;
2821 for_each_new_private_obj_in_state(state
, obj
, new_obj_state
, i
) {
2822 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2823 return to_dm_atomic_state(new_obj_state
);
2829 static struct dm_atomic_state
*
2830 dm_atomic_get_old_state(struct drm_atomic_state
*state
)
2832 struct drm_device
*dev
= state
->dev
;
2833 struct amdgpu_device
*adev
= dev
->dev_private
;
2834 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2835 struct drm_private_obj
*obj
;
2836 struct drm_private_state
*old_obj_state
;
2839 for_each_old_private_obj_in_state(state
, obj
, old_obj_state
, i
) {
2840 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2841 return to_dm_atomic_state(old_obj_state
);
2847 static struct drm_private_state
*
2848 dm_atomic_duplicate_state(struct drm_private_obj
*obj
)
2850 struct dm_atomic_state
*old_state
, *new_state
;
2852 new_state
= kzalloc(sizeof(*new_state
), GFP_KERNEL
);
2856 __drm_atomic_helper_private_obj_duplicate_state(obj
, &new_state
->base
);
2858 old_state
= to_dm_atomic_state(obj
->state
);
2860 if (old_state
&& old_state
->context
)
2861 new_state
->context
= dc_copy_state(old_state
->context
);
2863 if (!new_state
->context
) {
2868 return &new_state
->base
;
2871 static void dm_atomic_destroy_state(struct drm_private_obj
*obj
,
2872 struct drm_private_state
*state
)
2874 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
2876 if (dm_state
&& dm_state
->context
)
2877 dc_release_state(dm_state
->context
);
2882 static struct drm_private_state_funcs dm_atomic_state_funcs
= {
2883 .atomic_duplicate_state
= dm_atomic_duplicate_state
,
2884 .atomic_destroy_state
= dm_atomic_destroy_state
,
2887 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
2889 struct dm_atomic_state
*state
;
2892 adev
->mode_info
.mode_config_initialized
= true;
2894 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
2895 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
2897 adev
->ddev
->mode_config
.max_width
= 16384;
2898 adev
->ddev
->mode_config
.max_height
= 16384;
2900 adev
->ddev
->mode_config
.preferred_depth
= 24;
2901 adev
->ddev
->mode_config
.prefer_shadow
= 1;
2902 /* indicates support for immediate flip */
2903 adev
->ddev
->mode_config
.async_page_flip
= true;
2905 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
2907 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2911 state
->context
= dc_create_state(adev
->dm
.dc
);
2912 if (!state
->context
) {
2917 dc_resource_state_copy_construct_current(adev
->dm
.dc
, state
->context
);
2919 drm_atomic_private_obj_init(adev
->ddev
,
2920 &adev
->dm
.atomic_obj
,
2922 &dm_atomic_state_funcs
);
2924 r
= amdgpu_display_modeset_create_props(adev
);
2928 r
= amdgpu_dm_audio_init(adev
);
2935 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2936 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2937 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2939 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2940 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2942 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager
*dm
)
2944 #if defined(CONFIG_ACPI)
2945 struct amdgpu_dm_backlight_caps caps
;
2947 if (dm
->backlight_caps
.caps_valid
)
2950 amdgpu_acpi_get_backlight_caps(dm
->adev
, &caps
);
2951 if (caps
.caps_valid
) {
2952 dm
->backlight_caps
.caps_valid
= true;
2953 if (caps
.aux_support
)
2955 dm
->backlight_caps
.min_input_signal
= caps
.min_input_signal
;
2956 dm
->backlight_caps
.max_input_signal
= caps
.max_input_signal
;
2958 dm
->backlight_caps
.min_input_signal
=
2959 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2960 dm
->backlight_caps
.max_input_signal
=
2961 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2964 if (dm
->backlight_caps
.aux_support
)
2967 dm
->backlight_caps
.min_input_signal
= AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2968 dm
->backlight_caps
.max_input_signal
= AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2972 static int set_backlight_via_aux(struct dc_link
*link
, uint32_t brightness
)
2979 rc
= dc_link_set_backlight_level_nits(link
, true, brightness
,
2980 AUX_BL_DEFAULT_TRANSITION_TIME_MS
);
2985 static u32
convert_brightness(const struct amdgpu_dm_backlight_caps
*caps
,
2986 const uint32_t user_brightness
)
2988 u32 min
, max
, conversion_pace
;
2989 u32 brightness
= user_brightness
;
2994 if (!caps
->aux_support
) {
2995 max
= caps
->max_input_signal
;
2996 min
= caps
->min_input_signal
;
2998 * The brightness input is in the range 0-255
2999 * It needs to be rescaled to be between the
3000 * requested min and max input signal
3001 * It also needs to be scaled up by 0x101 to
3002 * match the DC interface which has a range of
3005 conversion_pace
= 0x101;
3010 / AMDGPU_MAX_BL_LEVEL
3011 + min
* conversion_pace
;
3014 * We are doing a linear interpolation here, which is OK but
3015 * does not provide the optimal result. We probably want
3016 * something close to the Perceptual Quantizer (PQ) curve.
3018 max
= caps
->aux_max_input_signal
;
3019 min
= caps
->aux_min_input_signal
;
3021 brightness
= (AMDGPU_MAX_BL_LEVEL
- user_brightness
) * min
3022 + user_brightness
* max
;
3023 // Multiple the value by 1000 since we use millinits
3025 brightness
= DIV_ROUND_CLOSEST(brightness
, AMDGPU_MAX_BL_LEVEL
);
3032 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
3034 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
3035 struct amdgpu_dm_backlight_caps caps
;
3036 struct dc_link
*link
= NULL
;
3040 amdgpu_dm_update_backlight_caps(dm
);
3041 caps
= dm
->backlight_caps
;
3043 link
= (struct dc_link
*)dm
->backlight_link
;
3045 brightness
= convert_brightness(&caps
, bd
->props
.brightness
);
3046 // Change brightness based on AUX property
3047 if (caps
.aux_support
)
3048 return set_backlight_via_aux(link
, brightness
);
3050 rc
= dc_link_set_backlight_level(dm
->backlight_link
, brightness
, 0);
3055 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
3057 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
3058 int ret
= dc_link_get_backlight_level(dm
->backlight_link
);
3060 if (ret
== DC_ERROR_UNEXPECTED
)
3061 return bd
->props
.brightness
;
3065 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
3066 .options
= BL_CORE_SUSPENDRESUME
,
3067 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
3068 .update_status
= amdgpu_dm_backlight_update_status
,
3072 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
3075 struct backlight_properties props
= { 0 };
3077 amdgpu_dm_update_backlight_caps(dm
);
3079 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
3080 props
.brightness
= AMDGPU_MAX_BL_LEVEL
;
3081 props
.type
= BACKLIGHT_RAW
;
3083 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
3084 dm
->adev
->ddev
->primary
->index
);
3086 dm
->backlight_dev
= backlight_device_register(bl_name
,
3087 dm
->adev
->ddev
->dev
,
3089 &amdgpu_dm_backlight_ops
,
3092 if (IS_ERR(dm
->backlight_dev
))
3093 DRM_ERROR("DM: Backlight registration failed!\n");
3095 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
3100 static int initialize_plane(struct amdgpu_display_manager
*dm
,
3101 struct amdgpu_mode_info
*mode_info
, int plane_id
,
3102 enum drm_plane_type plane_type
,
3103 const struct dc_plane_cap
*plane_cap
)
3105 struct drm_plane
*plane
;
3106 unsigned long possible_crtcs
;
3109 plane
= kzalloc(sizeof(struct drm_plane
), GFP_KERNEL
);
3111 DRM_ERROR("KMS: Failed to allocate plane\n");
3114 plane
->type
= plane_type
;
3117 * HACK: IGT tests expect that the primary plane for a CRTC
3118 * can only have one possible CRTC. Only expose support for
3119 * any CRTC if they're not going to be used as a primary plane
3120 * for a CRTC - like overlay or underlay planes.
3122 possible_crtcs
= 1 << plane_id
;
3123 if (plane_id
>= dm
->dc
->caps
.max_streams
)
3124 possible_crtcs
= 0xff;
3126 ret
= amdgpu_dm_plane_init(dm
, plane
, possible_crtcs
, plane_cap
);
3129 DRM_ERROR("KMS: Failed to initialize plane\n");
3135 mode_info
->planes
[plane_id
] = plane
;
3141 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
3142 struct dc_link
*link
)
3144 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3145 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3147 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
3148 link
->type
!= dc_connection_none
) {
3150 * Event if registration failed, we should continue with
3151 * DM initialization because not having a backlight control
3152 * is better then a black screen.
3154 amdgpu_dm_register_backlight_device(dm
);
3156 if (dm
->backlight_dev
)
3157 dm
->backlight_link
= link
;
3164 * In this architecture, the association
3165 * connector -> encoder -> crtc
3166 * id not really requried. The crtc and connector will hold the
3167 * display_index as an abstraction to use with DAL component
3169 * Returns 0 on success
3171 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
3173 struct amdgpu_display_manager
*dm
= &adev
->dm
;
3175 struct amdgpu_dm_connector
*aconnector
= NULL
;
3176 struct amdgpu_encoder
*aencoder
= NULL
;
3177 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
3179 int32_t primary_planes
;
3180 enum dc_connection_type new_connection_type
= dc_connection_none
;
3181 const struct dc_plane_cap
*plane
;
3183 link_cnt
= dm
->dc
->caps
.max_links
;
3184 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
3185 DRM_ERROR("DM: Failed to initialize mode config\n");
3189 /* There is one primary plane per CRTC */
3190 primary_planes
= dm
->dc
->caps
.max_streams
;
3191 ASSERT(primary_planes
<= AMDGPU_MAX_PLANES
);
3194 * Initialize primary planes, implicit planes for legacy IOCTLS.
3195 * Order is reversed to match iteration order in atomic check.
3197 for (i
= (primary_planes
- 1); i
>= 0; i
--) {
3198 plane
= &dm
->dc
->caps
.planes
[i
];
3200 if (initialize_plane(dm
, mode_info
, i
,
3201 DRM_PLANE_TYPE_PRIMARY
, plane
)) {
3202 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3208 * Initialize overlay planes, index starting after primary planes.
3209 * These planes have a higher DRM index than the primary planes since
3210 * they should be considered as having a higher z-order.
3211 * Order is reversed to match iteration order in atomic check.
3213 * Only support DCN for now, and only expose one so we don't encourage
3214 * userspace to use up all the pipes.
3216 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; ++i
) {
3217 struct dc_plane_cap
*plane
= &dm
->dc
->caps
.planes
[i
];
3219 if (plane
->type
!= DC_PLANE_TYPE_DCN_UNIVERSAL
)
3222 if (!plane
->blends_with_above
|| !plane
->blends_with_below
)
3225 if (!plane
->pixel_format_support
.argb8888
)
3228 if (initialize_plane(dm
, NULL
, primary_planes
+ i
,
3229 DRM_PLANE_TYPE_OVERLAY
, plane
)) {
3230 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3234 /* Only create one overlay plane. */
3238 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
3239 if (amdgpu_dm_crtc_init(dm
, mode_info
->planes
[i
], i
)) {
3240 DRM_ERROR("KMS: Failed to initialize crtc\n");
3244 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
3246 /* loops over all connectors on the board */
3247 for (i
= 0; i
< link_cnt
; i
++) {
3248 struct dc_link
*link
= NULL
;
3250 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
3252 "KMS: Cannot support more than %d display indexes\n",
3253 AMDGPU_DM_MAX_DISPLAY_INDEX
);
3257 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
3261 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
3265 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
3266 DRM_ERROR("KMS: Failed to initialize encoder\n");
3270 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
3271 DRM_ERROR("KMS: Failed to initialize connector\n");
3275 link
= dc_get_link_at_index(dm
->dc
, i
);
3277 if (!dc_link_detect_sink(link
, &new_connection_type
))
3278 DRM_ERROR("KMS: Failed to detect connector\n");
3280 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
3281 emulated_link_detect(link
);
3282 amdgpu_dm_update_connector_after_detect(aconnector
);
3284 } else if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
3285 amdgpu_dm_update_connector_after_detect(aconnector
);
3286 register_backlight_device(dm
, link
);
3287 if (amdgpu_dc_feature_mask
& DC_PSR_MASK
)
3288 amdgpu_dm_set_psr_caps(link
);
3294 /* Software is initialized. Now we can register interrupt handlers. */
3295 switch (adev
->asic_type
) {
3296 #if defined(CONFIG_DRM_AMD_DC_SI)
3301 if (dce60_register_irq_handlers(dm
->adev
)) {
3302 DRM_ERROR("DM: Failed to initialize IRQ\n");
3316 case CHIP_POLARIS11
:
3317 case CHIP_POLARIS10
:
3318 case CHIP_POLARIS12
:
3323 if (dce110_register_irq_handlers(dm
->adev
)) {
3324 DRM_ERROR("DM: Failed to initialize IRQ\n");
3328 #if defined(CONFIG_DRM_AMD_DC_DCN)
3334 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3335 case CHIP_SIENNA_CICHLID
:
3336 case CHIP_NAVY_FLOUNDER
:
3338 if (dcn10_register_irq_handlers(dm
->adev
)) {
3339 DRM_ERROR("DM: Failed to initialize IRQ\n");
3345 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
3349 /* No userspace support. */
3350 dm
->dc
->debug
.disable_tri_buf
= true;
3360 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
3362 drm_mode_config_cleanup(dm
->ddev
);
3363 drm_atomic_private_obj_fini(&dm
->atomic_obj
);
3367 /******************************************************************************
3368 * amdgpu_display_funcs functions
3369 *****************************************************************************/
3372 * dm_bandwidth_update - program display watermarks
3374 * @adev: amdgpu_device pointer
3376 * Calculate and program the display watermarks and line buffer allocation.
3378 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
3380 /* TODO: implement later */
3383 static const struct amdgpu_display_funcs dm_display_funcs
= {
3384 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
3385 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
3386 .backlight_set_level
= NULL
, /* never called for DC */
3387 .backlight_get_level
= NULL
, /* never called for DC */
3388 .hpd_sense
= NULL
,/* called unconditionally */
3389 .hpd_set_polarity
= NULL
, /* called unconditionally */
3390 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
3391 .page_flip_get_scanoutpos
=
3392 dm_crtc_get_scanoutpos
,/* called unconditionally */
3393 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
3394 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
3397 #if defined(CONFIG_DEBUG_KERNEL_DC)
3399 static ssize_t
s3_debug_store(struct device
*device
,
3400 struct device_attribute
*attr
,
3406 struct drm_device
*drm_dev
= dev_get_drvdata(device
);
3407 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
3409 ret
= kstrtoint(buf
, 0, &s3_state
);
3414 drm_kms_helper_hotplug_event(adev
->ddev
);
3419 return ret
== 0 ? count
: 0;
3422 DEVICE_ATTR_WO(s3_debug
);
3426 static int dm_early_init(void *handle
)
3428 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3430 switch (adev
->asic_type
) {
3431 #if defined(CONFIG_DRM_AMD_DC_SI)
3435 adev
->mode_info
.num_crtc
= 6;
3436 adev
->mode_info
.num_hpd
= 6;
3437 adev
->mode_info
.num_dig
= 6;
3440 adev
->mode_info
.num_crtc
= 2;
3441 adev
->mode_info
.num_hpd
= 2;
3442 adev
->mode_info
.num_dig
= 2;
3447 adev
->mode_info
.num_crtc
= 6;
3448 adev
->mode_info
.num_hpd
= 6;
3449 adev
->mode_info
.num_dig
= 6;
3452 adev
->mode_info
.num_crtc
= 4;
3453 adev
->mode_info
.num_hpd
= 6;
3454 adev
->mode_info
.num_dig
= 7;
3458 adev
->mode_info
.num_crtc
= 2;
3459 adev
->mode_info
.num_hpd
= 6;
3460 adev
->mode_info
.num_dig
= 6;
3464 adev
->mode_info
.num_crtc
= 6;
3465 adev
->mode_info
.num_hpd
= 6;
3466 adev
->mode_info
.num_dig
= 7;
3469 adev
->mode_info
.num_crtc
= 3;
3470 adev
->mode_info
.num_hpd
= 6;
3471 adev
->mode_info
.num_dig
= 9;
3474 adev
->mode_info
.num_crtc
= 2;
3475 adev
->mode_info
.num_hpd
= 6;
3476 adev
->mode_info
.num_dig
= 9;
3478 case CHIP_POLARIS11
:
3479 case CHIP_POLARIS12
:
3480 adev
->mode_info
.num_crtc
= 5;
3481 adev
->mode_info
.num_hpd
= 5;
3482 adev
->mode_info
.num_dig
= 5;
3484 case CHIP_POLARIS10
:
3486 adev
->mode_info
.num_crtc
= 6;
3487 adev
->mode_info
.num_hpd
= 6;
3488 adev
->mode_info
.num_dig
= 6;
3493 adev
->mode_info
.num_crtc
= 6;
3494 adev
->mode_info
.num_hpd
= 6;
3495 adev
->mode_info
.num_dig
= 6;
3497 #if defined(CONFIG_DRM_AMD_DC_DCN)
3499 adev
->mode_info
.num_crtc
= 4;
3500 adev
->mode_info
.num_hpd
= 4;
3501 adev
->mode_info
.num_dig
= 4;
3506 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3507 case CHIP_SIENNA_CICHLID
:
3508 case CHIP_NAVY_FLOUNDER
:
3510 adev
->mode_info
.num_crtc
= 6;
3511 adev
->mode_info
.num_hpd
= 6;
3512 adev
->mode_info
.num_dig
= 6;
3515 adev
->mode_info
.num_crtc
= 5;
3516 adev
->mode_info
.num_hpd
= 5;
3517 adev
->mode_info
.num_dig
= 5;
3520 adev
->mode_info
.num_crtc
= 4;
3521 adev
->mode_info
.num_hpd
= 4;
3522 adev
->mode_info
.num_dig
= 4;
3525 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
3529 amdgpu_dm_set_irq_funcs(adev
);
3531 if (adev
->mode_info
.funcs
== NULL
)
3532 adev
->mode_info
.funcs
= &dm_display_funcs
;
3535 * Note: Do NOT change adev->audio_endpt_rreg and
3536 * adev->audio_endpt_wreg because they are initialised in
3537 * amdgpu_device_init()
3539 #if defined(CONFIG_DEBUG_KERNEL_DC)
3542 &dev_attr_s3_debug
);
3548 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
3549 struct dc_stream_state
*new_stream
,
3550 struct dc_stream_state
*old_stream
)
3552 return crtc_state
->active
&& drm_atomic_crtc_needs_modeset(crtc_state
);
3555 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
3557 return !crtc_state
->active
&& drm_atomic_crtc_needs_modeset(crtc_state
);
3560 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
3562 drm_encoder_cleanup(encoder
);
3566 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
3567 .destroy
= amdgpu_dm_encoder_destroy
,
3571 static int fill_dc_scaling_info(const struct drm_plane_state
*state
,
3572 struct dc_scaling_info
*scaling_info
)
3574 int scale_w
, scale_h
;
3576 memset(scaling_info
, 0, sizeof(*scaling_info
));
3578 /* Source is fixed 16.16 but we ignore mantissa for now... */
3579 scaling_info
->src_rect
.x
= state
->src_x
>> 16;
3580 scaling_info
->src_rect
.y
= state
->src_y
>> 16;
3582 scaling_info
->src_rect
.width
= state
->src_w
>> 16;
3583 if (scaling_info
->src_rect
.width
== 0)
3586 scaling_info
->src_rect
.height
= state
->src_h
>> 16;
3587 if (scaling_info
->src_rect
.height
== 0)
3590 scaling_info
->dst_rect
.x
= state
->crtc_x
;
3591 scaling_info
->dst_rect
.y
= state
->crtc_y
;
3593 if (state
->crtc_w
== 0)
3596 scaling_info
->dst_rect
.width
= state
->crtc_w
;
3598 if (state
->crtc_h
== 0)
3601 scaling_info
->dst_rect
.height
= state
->crtc_h
;
3603 /* DRM doesn't specify clipping on destination output. */
3604 scaling_info
->clip_rect
= scaling_info
->dst_rect
;
3606 /* TODO: Validate scaling per-format with DC plane caps */
3607 scale_w
= scaling_info
->dst_rect
.width
* 1000 /
3608 scaling_info
->src_rect
.width
;
3610 if (scale_w
< 250 || scale_w
> 16000)
3613 scale_h
= scaling_info
->dst_rect
.height
* 1000 /
3614 scaling_info
->src_rect
.height
;
3616 if (scale_h
< 250 || scale_h
> 16000)
3620 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3621 * assume reasonable defaults based on the format.
3627 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
3628 uint64_t *tiling_flags
, bool *tmz_surface
)
3630 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
3631 int r
= amdgpu_bo_reserve(rbo
, false);
3634 /* Don't show error message when returning -ERESTARTSYS */
3635 if (r
!= -ERESTARTSYS
)
3636 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
3641 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
3644 *tmz_surface
= amdgpu_bo_encrypted(rbo
);
3646 amdgpu_bo_unreserve(rbo
);
3651 static inline uint64_t get_dcc_address(uint64_t address
, uint64_t tiling_flags
)
3653 uint32_t offset
= AMDGPU_TILING_GET(tiling_flags
, DCC_OFFSET_256B
);
3655 return offset
? (address
+ offset
* 256) : 0;
3659 fill_plane_dcc_attributes(struct amdgpu_device
*adev
,
3660 const struct amdgpu_framebuffer
*afb
,
3661 const enum surface_pixel_format format
,
3662 const enum dc_rotation_angle rotation
,
3663 const struct plane_size
*plane_size
,
3664 const union dc_tiling_info
*tiling_info
,
3665 const uint64_t info
,
3666 struct dc_plane_dcc_param
*dcc
,
3667 struct dc_plane_address
*address
,
3668 bool force_disable_dcc
)
3670 struct dc
*dc
= adev
->dm
.dc
;
3671 struct dc_dcc_surface_param input
;
3672 struct dc_surface_dcc_cap output
;
3673 uint32_t offset
= AMDGPU_TILING_GET(info
, DCC_OFFSET_256B
);
3674 uint32_t i64b
= AMDGPU_TILING_GET(info
, DCC_INDEPENDENT_64B
) != 0;
3675 uint64_t dcc_address
;
3677 memset(&input
, 0, sizeof(input
));
3678 memset(&output
, 0, sizeof(output
));
3680 if (force_disable_dcc
)
3686 if (format
>= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3689 if (!dc
->cap_funcs
.get_dcc_compression_cap
)
3692 input
.format
= format
;
3693 input
.surface_size
.width
= plane_size
->surface_size
.width
;
3694 input
.surface_size
.height
= plane_size
->surface_size
.height
;
3695 input
.swizzle_mode
= tiling_info
->gfx9
.swizzle
;
3697 if (rotation
== ROTATION_ANGLE_0
|| rotation
== ROTATION_ANGLE_180
)
3698 input
.scan
= SCAN_DIRECTION_HORIZONTAL
;
3699 else if (rotation
== ROTATION_ANGLE_90
|| rotation
== ROTATION_ANGLE_270
)
3700 input
.scan
= SCAN_DIRECTION_VERTICAL
;
3702 if (!dc
->cap_funcs
.get_dcc_compression_cap(dc
, &input
, &output
))
3705 if (!output
.capable
)
3708 if (i64b
== 0 && output
.grph
.rgb
.independent_64b_blks
!= 0)
3713 AMDGPU_TILING_GET(info
, DCC_PITCH_MAX
) + 1;
3714 dcc
->independent_64b_blks
= i64b
;
3716 dcc_address
= get_dcc_address(afb
->address
, info
);
3717 address
->grph
.meta_addr
.low_part
= lower_32_bits(dcc_address
);
3718 address
->grph
.meta_addr
.high_part
= upper_32_bits(dcc_address
);
3724 fill_plane_buffer_attributes(struct amdgpu_device
*adev
,
3725 const struct amdgpu_framebuffer
*afb
,
3726 const enum surface_pixel_format format
,
3727 const enum dc_rotation_angle rotation
,
3728 const uint64_t tiling_flags
,
3729 union dc_tiling_info
*tiling_info
,
3730 struct plane_size
*plane_size
,
3731 struct dc_plane_dcc_param
*dcc
,
3732 struct dc_plane_address
*address
,
3734 bool force_disable_dcc
)
3736 const struct drm_framebuffer
*fb
= &afb
->base
;
3739 memset(tiling_info
, 0, sizeof(*tiling_info
));
3740 memset(plane_size
, 0, sizeof(*plane_size
));
3741 memset(dcc
, 0, sizeof(*dcc
));
3742 memset(address
, 0, sizeof(*address
));
3744 address
->tmz_surface
= tmz_surface
;
3746 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3747 plane_size
->surface_size
.x
= 0;
3748 plane_size
->surface_size
.y
= 0;
3749 plane_size
->surface_size
.width
= fb
->width
;
3750 plane_size
->surface_size
.height
= fb
->height
;
3751 plane_size
->surface_pitch
=
3752 fb
->pitches
[0] / fb
->format
->cpp
[0];
3754 address
->type
= PLN_ADDR_TYPE_GRAPHICS
;
3755 address
->grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3756 address
->grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3757 } else if (format
< SURFACE_PIXEL_FORMAT_INVALID
) {
3758 uint64_t chroma_addr
= afb
->address
+ fb
->offsets
[1];
3760 plane_size
->surface_size
.x
= 0;
3761 plane_size
->surface_size
.y
= 0;
3762 plane_size
->surface_size
.width
= fb
->width
;
3763 plane_size
->surface_size
.height
= fb
->height
;
3764 plane_size
->surface_pitch
=
3765 fb
->pitches
[0] / fb
->format
->cpp
[0];
3767 plane_size
->chroma_size
.x
= 0;
3768 plane_size
->chroma_size
.y
= 0;
3769 /* TODO: set these based on surface format */
3770 plane_size
->chroma_size
.width
= fb
->width
/ 2;
3771 plane_size
->chroma_size
.height
= fb
->height
/ 2;
3773 plane_size
->chroma_pitch
=
3774 fb
->pitches
[1] / fb
->format
->cpp
[1];
3776 address
->type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
3777 address
->video_progressive
.luma_addr
.low_part
=
3778 lower_32_bits(afb
->address
);
3779 address
->video_progressive
.luma_addr
.high_part
=
3780 upper_32_bits(afb
->address
);
3781 address
->video_progressive
.chroma_addr
.low_part
=
3782 lower_32_bits(chroma_addr
);
3783 address
->video_progressive
.chroma_addr
.high_part
=
3784 upper_32_bits(chroma_addr
);
3787 /* Fill GFX8 params */
3788 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
3789 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
3791 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
3792 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
3793 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
3794 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
3795 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
3797 /* XXX fix me for VI */
3798 tiling_info
->gfx8
.num_banks
= num_banks
;
3799 tiling_info
->gfx8
.array_mode
=
3800 DC_ARRAY_2D_TILED_THIN1
;
3801 tiling_info
->gfx8
.tile_split
= tile_split
;
3802 tiling_info
->gfx8
.bank_width
= bankw
;
3803 tiling_info
->gfx8
.bank_height
= bankh
;
3804 tiling_info
->gfx8
.tile_aspect
= mtaspect
;
3805 tiling_info
->gfx8
.tile_mode
=
3806 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
3807 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
3808 == DC_ARRAY_1D_TILED_THIN1
) {
3809 tiling_info
->gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
3812 tiling_info
->gfx8
.pipe_config
=
3813 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
3815 if (adev
->asic_type
== CHIP_VEGA10
||
3816 adev
->asic_type
== CHIP_VEGA12
||
3817 adev
->asic_type
== CHIP_VEGA20
||
3818 adev
->asic_type
== CHIP_NAVI10
||
3819 adev
->asic_type
== CHIP_NAVI14
||
3820 adev
->asic_type
== CHIP_NAVI12
||
3821 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3822 adev
->asic_type
== CHIP_SIENNA_CICHLID
||
3823 adev
->asic_type
== CHIP_NAVY_FLOUNDER
||
3825 adev
->asic_type
== CHIP_RENOIR
||
3826 adev
->asic_type
== CHIP_RAVEN
) {
3827 /* Fill GFX9 params */
3828 tiling_info
->gfx9
.num_pipes
=
3829 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
3830 tiling_info
->gfx9
.num_banks
=
3831 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
3832 tiling_info
->gfx9
.pipe_interleave
=
3833 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
3834 tiling_info
->gfx9
.num_shader_engines
=
3835 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
3836 tiling_info
->gfx9
.max_compressed_frags
=
3837 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
3838 tiling_info
->gfx9
.num_rb_per_se
=
3839 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
3840 tiling_info
->gfx9
.swizzle
=
3841 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
3842 tiling_info
->gfx9
.shaderEnable
= 1;
3844 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3845 if (adev
->asic_type
== CHIP_SIENNA_CICHLID
||
3846 adev
->asic_type
== CHIP_NAVY_FLOUNDER
)
3847 tiling_info
->gfx9
.num_pkrs
= adev
->gfx
.config
.gb_addr_config_fields
.num_pkrs
;
3849 ret
= fill_plane_dcc_attributes(adev
, afb
, format
, rotation
,
3850 plane_size
, tiling_info
,
3851 tiling_flags
, dcc
, address
,
3861 fill_blending_from_plane_state(const struct drm_plane_state
*plane_state
,
3862 bool *per_pixel_alpha
, bool *global_alpha
,
3863 int *global_alpha_value
)
3865 *per_pixel_alpha
= false;
3866 *global_alpha
= false;
3867 *global_alpha_value
= 0xff;
3869 if (plane_state
->plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
3872 if (plane_state
->pixel_blend_mode
== DRM_MODE_BLEND_PREMULTI
) {
3873 static const uint32_t alpha_formats
[] = {
3874 DRM_FORMAT_ARGB8888
,
3875 DRM_FORMAT_RGBA8888
,
3876 DRM_FORMAT_ABGR8888
,
3878 uint32_t format
= plane_state
->fb
->format
->format
;
3881 for (i
= 0; i
< ARRAY_SIZE(alpha_formats
); ++i
) {
3882 if (format
== alpha_formats
[i
]) {
3883 *per_pixel_alpha
= true;
3889 if (plane_state
->alpha
< 0xffff) {
3890 *global_alpha
= true;
3891 *global_alpha_value
= plane_state
->alpha
>> 8;
3896 fill_plane_color_attributes(const struct drm_plane_state
*plane_state
,
3897 const enum surface_pixel_format format
,
3898 enum dc_color_space
*color_space
)
3902 *color_space
= COLOR_SPACE_SRGB
;
3904 /* DRM color properties only affect non-RGB formats. */
3905 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3908 full_range
= (plane_state
->color_range
== DRM_COLOR_YCBCR_FULL_RANGE
);
3910 switch (plane_state
->color_encoding
) {
3911 case DRM_COLOR_YCBCR_BT601
:
3913 *color_space
= COLOR_SPACE_YCBCR601
;
3915 *color_space
= COLOR_SPACE_YCBCR601_LIMITED
;
3918 case DRM_COLOR_YCBCR_BT709
:
3920 *color_space
= COLOR_SPACE_YCBCR709
;
3922 *color_space
= COLOR_SPACE_YCBCR709_LIMITED
;
3925 case DRM_COLOR_YCBCR_BT2020
:
3927 *color_space
= COLOR_SPACE_2020_YCBCR
;
3940 fill_dc_plane_info_and_addr(struct amdgpu_device
*adev
,
3941 const struct drm_plane_state
*plane_state
,
3942 const uint64_t tiling_flags
,
3943 struct dc_plane_info
*plane_info
,
3944 struct dc_plane_address
*address
,
3946 bool force_disable_dcc
)
3948 const struct drm_framebuffer
*fb
= plane_state
->fb
;
3949 const struct amdgpu_framebuffer
*afb
=
3950 to_amdgpu_framebuffer(plane_state
->fb
);
3951 struct drm_format_name_buf format_name
;
3954 memset(plane_info
, 0, sizeof(*plane_info
));
3956 switch (fb
->format
->format
) {
3958 plane_info
->format
=
3959 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
3961 case DRM_FORMAT_RGB565
:
3962 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
3964 case DRM_FORMAT_XRGB8888
:
3965 case DRM_FORMAT_ARGB8888
:
3966 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
3968 case DRM_FORMAT_XRGB2101010
:
3969 case DRM_FORMAT_ARGB2101010
:
3970 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
3972 case DRM_FORMAT_XBGR2101010
:
3973 case DRM_FORMAT_ABGR2101010
:
3974 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
3976 case DRM_FORMAT_XBGR8888
:
3977 case DRM_FORMAT_ABGR8888
:
3978 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
;
3980 case DRM_FORMAT_NV21
:
3981 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
3983 case DRM_FORMAT_NV12
:
3984 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
3986 case DRM_FORMAT_P010
:
3987 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb
;
3989 case DRM_FORMAT_XRGB16161616F
:
3990 case DRM_FORMAT_ARGB16161616F
:
3991 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F
;
3993 case DRM_FORMAT_XBGR16161616F
:
3994 case DRM_FORMAT_ABGR16161616F
:
3995 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
;
3999 "Unsupported screen format %s\n",
4000 drm_get_format_name(fb
->format
->format
, &format_name
));
4004 switch (plane_state
->rotation
& DRM_MODE_ROTATE_MASK
) {
4005 case DRM_MODE_ROTATE_0
:
4006 plane_info
->rotation
= ROTATION_ANGLE_0
;
4008 case DRM_MODE_ROTATE_90
:
4009 plane_info
->rotation
= ROTATION_ANGLE_90
;
4011 case DRM_MODE_ROTATE_180
:
4012 plane_info
->rotation
= ROTATION_ANGLE_180
;
4014 case DRM_MODE_ROTATE_270
:
4015 plane_info
->rotation
= ROTATION_ANGLE_270
;
4018 plane_info
->rotation
= ROTATION_ANGLE_0
;
4022 plane_info
->visible
= true;
4023 plane_info
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
4025 plane_info
->layer_index
= 0;
4027 ret
= fill_plane_color_attributes(plane_state
, plane_info
->format
,
4028 &plane_info
->color_space
);
4032 ret
= fill_plane_buffer_attributes(adev
, afb
, plane_info
->format
,
4033 plane_info
->rotation
, tiling_flags
,
4034 &plane_info
->tiling_info
,
4035 &plane_info
->plane_size
,
4036 &plane_info
->dcc
, address
, tmz_surface
,
4041 fill_blending_from_plane_state(
4042 plane_state
, &plane_info
->per_pixel_alpha
,
4043 &plane_info
->global_alpha
, &plane_info
->global_alpha_value
);
4048 static int fill_dc_plane_attributes(struct amdgpu_device
*adev
,
4049 struct dc_plane_state
*dc_plane_state
,
4050 struct drm_plane_state
*plane_state
,
4051 struct drm_crtc_state
*crtc_state
)
4053 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(crtc_state
);
4054 const struct amdgpu_framebuffer
*amdgpu_fb
=
4055 to_amdgpu_framebuffer(plane_state
->fb
);
4056 struct dc_scaling_info scaling_info
;
4057 struct dc_plane_info plane_info
;
4058 uint64_t tiling_flags
;
4060 bool tmz_surface
= false;
4061 bool force_disable_dcc
= false;
4063 ret
= fill_dc_scaling_info(plane_state
, &scaling_info
);
4067 dc_plane_state
->src_rect
= scaling_info
.src_rect
;
4068 dc_plane_state
->dst_rect
= scaling_info
.dst_rect
;
4069 dc_plane_state
->clip_rect
= scaling_info
.clip_rect
;
4070 dc_plane_state
->scaling_quality
= scaling_info
.scaling_quality
;
4072 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
, &tmz_surface
);
4076 force_disable_dcc
= adev
->asic_type
== CHIP_RAVEN
&& adev
->in_suspend
;
4077 ret
= fill_dc_plane_info_and_addr(adev
, plane_state
, tiling_flags
,
4079 &dc_plane_state
->address
,
4085 dc_plane_state
->format
= plane_info
.format
;
4086 dc_plane_state
->color_space
= plane_info
.color_space
;
4087 dc_plane_state
->format
= plane_info
.format
;
4088 dc_plane_state
->plane_size
= plane_info
.plane_size
;
4089 dc_plane_state
->rotation
= plane_info
.rotation
;
4090 dc_plane_state
->horizontal_mirror
= plane_info
.horizontal_mirror
;
4091 dc_plane_state
->stereo_format
= plane_info
.stereo_format
;
4092 dc_plane_state
->tiling_info
= plane_info
.tiling_info
;
4093 dc_plane_state
->visible
= plane_info
.visible
;
4094 dc_plane_state
->per_pixel_alpha
= plane_info
.per_pixel_alpha
;
4095 dc_plane_state
->global_alpha
= plane_info
.global_alpha
;
4096 dc_plane_state
->global_alpha_value
= plane_info
.global_alpha_value
;
4097 dc_plane_state
->dcc
= plane_info
.dcc
;
4098 dc_plane_state
->layer_index
= plane_info
.layer_index
; // Always returns 0
4101 * Always set input transfer function, since plane state is refreshed
4104 ret
= amdgpu_dm_update_plane_color_mgmt(dm_crtc_state
, dc_plane_state
);
4111 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
4112 const struct dm_connector_state
*dm_state
,
4113 struct dc_stream_state
*stream
)
4115 enum amdgpu_rmx_type rmx_type
;
4117 struct rect src
= { 0 }; /* viewport in composition space*/
4118 struct rect dst
= { 0 }; /* stream addressable area */
4120 /* no mode. nothing to be done */
4124 /* Full screen scaling by default */
4125 src
.width
= mode
->hdisplay
;
4126 src
.height
= mode
->vdisplay
;
4127 dst
.width
= stream
->timing
.h_addressable
;
4128 dst
.height
= stream
->timing
.v_addressable
;
4131 rmx_type
= dm_state
->scaling
;
4132 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
4133 if (src
.width
* dst
.height
<
4134 src
.height
* dst
.width
) {
4135 /* height needs less upscaling/more downscaling */
4136 dst
.width
= src
.width
*
4137 dst
.height
/ src
.height
;
4139 /* width needs less upscaling/more downscaling */
4140 dst
.height
= src
.height
*
4141 dst
.width
/ src
.width
;
4143 } else if (rmx_type
== RMX_CENTER
) {
4147 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
4148 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
4150 if (dm_state
->underscan_enable
) {
4151 dst
.x
+= dm_state
->underscan_hborder
/ 2;
4152 dst
.y
+= dm_state
->underscan_vborder
/ 2;
4153 dst
.width
-= dm_state
->underscan_hborder
;
4154 dst
.height
-= dm_state
->underscan_vborder
;
4161 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4162 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
4166 static enum dc_color_depth
4167 convert_color_depth_from_display_info(const struct drm_connector
*connector
,
4168 bool is_y420
, int requested_bpc
)
4175 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4176 if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_48
)
4178 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_36
)
4180 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_30
)
4183 bpc
= (uint8_t)connector
->display_info
.bpc
;
4184 /* Assume 8 bpc by default if no bpc is specified. */
4185 bpc
= bpc
? bpc
: 8;
4188 if (requested_bpc
> 0) {
4190 * Cap display bpc based on the user requested value.
4192 * The value for state->max_bpc may not correctly updated
4193 * depending on when the connector gets added to the state
4194 * or if this was called outside of atomic check, so it
4195 * can't be used directly.
4197 bpc
= min_t(u8
, bpc
, requested_bpc
);
4199 /* Round down to the nearest even number. */
4200 bpc
= bpc
- (bpc
& 1);
4206 * Temporary Work around, DRM doesn't parse color depth for
4207 * EDID revision before 1.4
4208 * TODO: Fix edid parsing
4210 return COLOR_DEPTH_888
;
4212 return COLOR_DEPTH_666
;
4214 return COLOR_DEPTH_888
;
4216 return COLOR_DEPTH_101010
;
4218 return COLOR_DEPTH_121212
;
4220 return COLOR_DEPTH_141414
;
4222 return COLOR_DEPTH_161616
;
4224 return COLOR_DEPTH_UNDEFINED
;
4228 static enum dc_aspect_ratio
4229 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
4231 /* 1-1 mapping, since both enums follow the HDMI spec. */
4232 return (enum dc_aspect_ratio
) mode_in
->picture_aspect_ratio
;
4235 static enum dc_color_space
4236 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
4238 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
4240 switch (dc_crtc_timing
->pixel_encoding
) {
4241 case PIXEL_ENCODING_YCBCR422
:
4242 case PIXEL_ENCODING_YCBCR444
:
4243 case PIXEL_ENCODING_YCBCR420
:
4246 * 27030khz is the separation point between HDTV and SDTV
4247 * according to HDMI spec, we use YCbCr709 and YCbCr601
4250 if (dc_crtc_timing
->pix_clk_100hz
> 270300) {
4251 if (dc_crtc_timing
->flags
.Y_ONLY
)
4253 COLOR_SPACE_YCBCR709_LIMITED
;
4255 color_space
= COLOR_SPACE_YCBCR709
;
4257 if (dc_crtc_timing
->flags
.Y_ONLY
)
4259 COLOR_SPACE_YCBCR601_LIMITED
;
4261 color_space
= COLOR_SPACE_YCBCR601
;
4266 case PIXEL_ENCODING_RGB
:
4267 color_space
= COLOR_SPACE_SRGB
;
4278 static bool adjust_colour_depth_from_display_info(
4279 struct dc_crtc_timing
*timing_out
,
4280 const struct drm_display_info
*info
)
4282 enum dc_color_depth depth
= timing_out
->display_color_depth
;
4285 normalized_clk
= timing_out
->pix_clk_100hz
/ 10;
4286 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4287 if (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
4288 normalized_clk
/= 2;
4289 /* Adjusting pix clock following on HDMI spec based on colour depth */
4291 case COLOR_DEPTH_888
:
4293 case COLOR_DEPTH_101010
:
4294 normalized_clk
= (normalized_clk
* 30) / 24;
4296 case COLOR_DEPTH_121212
:
4297 normalized_clk
= (normalized_clk
* 36) / 24;
4299 case COLOR_DEPTH_161616
:
4300 normalized_clk
= (normalized_clk
* 48) / 24;
4303 /* The above depths are the only ones valid for HDMI. */
4306 if (normalized_clk
<= info
->max_tmds_clock
) {
4307 timing_out
->display_color_depth
= depth
;
4310 } while (--depth
> COLOR_DEPTH_666
);
4314 static void fill_stream_properties_from_drm_display_mode(
4315 struct dc_stream_state
*stream
,
4316 const struct drm_display_mode
*mode_in
,
4317 const struct drm_connector
*connector
,
4318 const struct drm_connector_state
*connector_state
,
4319 const struct dc_stream_state
*old_stream
,
4322 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
4323 const struct drm_display_info
*info
= &connector
->display_info
;
4324 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4325 struct hdmi_vendor_infoframe hv_frame
;
4326 struct hdmi_avi_infoframe avi_frame
;
4328 memset(&hv_frame
, 0, sizeof(hv_frame
));
4329 memset(&avi_frame
, 0, sizeof(avi_frame
));
4331 timing_out
->h_border_left
= 0;
4332 timing_out
->h_border_right
= 0;
4333 timing_out
->v_border_top
= 0;
4334 timing_out
->v_border_bottom
= 0;
4335 /* TODO: un-hardcode */
4336 if (drm_mode_is_420_only(info
, mode_in
)
4337 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4338 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4339 else if (drm_mode_is_420_also(info
, mode_in
)
4340 && aconnector
->force_yuv420_output
)
4341 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4342 else if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
4343 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4344 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
4346 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
4348 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
4349 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
4351 (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
),
4353 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
4354 timing_out
->hdmi_vic
= 0;
4357 timing_out
->vic
= old_stream
->timing
.vic
;
4358 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.HSYNC_POSITIVE_POLARITY
;
4359 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.VSYNC_POSITIVE_POLARITY
;
4361 timing_out
->vic
= drm_match_cea_mode(mode_in
);
4362 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
4363 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
4364 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
4365 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
4368 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
4369 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame
, (struct drm_connector
*)connector
, mode_in
);
4370 timing_out
->vic
= avi_frame
.video_code
;
4371 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame
, (struct drm_connector
*)connector
, mode_in
);
4372 timing_out
->hdmi_vic
= hv_frame
.vic
;
4375 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
4376 timing_out
->h_total
= mode_in
->crtc_htotal
;
4377 timing_out
->h_sync_width
=
4378 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
4379 timing_out
->h_front_porch
=
4380 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
4381 timing_out
->v_total
= mode_in
->crtc_vtotal
;
4382 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
4383 timing_out
->v_front_porch
=
4384 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
4385 timing_out
->v_sync_width
=
4386 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
4387 timing_out
->pix_clk_100hz
= mode_in
->crtc_clock
* 10;
4388 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
4390 stream
->output_color_space
= get_output_color_space(timing_out
);
4392 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
4393 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
4394 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
4395 if (!adjust_colour_depth_from_display_info(timing_out
, info
) &&
4396 drm_mode_is_420_also(info
, mode_in
) &&
4397 timing_out
->pixel_encoding
!= PIXEL_ENCODING_YCBCR420
) {
4398 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4399 adjust_colour_depth_from_display_info(timing_out
, info
);
4404 static void fill_audio_info(struct audio_info
*audio_info
,
4405 const struct drm_connector
*drm_connector
,
4406 const struct dc_sink
*dc_sink
)
4409 int cea_revision
= 0;
4410 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
4412 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
4413 audio_info
->product_id
= edid_caps
->product_id
;
4415 cea_revision
= drm_connector
->display_info
.cea_rev
;
4417 strscpy(audio_info
->display_name
,
4418 edid_caps
->display_name
,
4419 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
);
4421 if (cea_revision
>= 3) {
4422 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
4424 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
4425 audio_info
->modes
[i
].format_code
=
4426 (enum audio_format_code
)
4427 (edid_caps
->audio_modes
[i
].format_code
);
4428 audio_info
->modes
[i
].channel_count
=
4429 edid_caps
->audio_modes
[i
].channel_count
;
4430 audio_info
->modes
[i
].sample_rates
.all
=
4431 edid_caps
->audio_modes
[i
].sample_rate
;
4432 audio_info
->modes
[i
].sample_size
=
4433 edid_caps
->audio_modes
[i
].sample_size
;
4437 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
4439 /* TODO: We only check for the progressive mode, check for interlace mode too */
4440 if (drm_connector
->latency_present
[0]) {
4441 audio_info
->video_latency
= drm_connector
->video_latency
[0];
4442 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
4445 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4450 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
4451 struct drm_display_mode
*dst_mode
)
4453 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
4454 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
4455 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
4456 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
4457 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
4458 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
4459 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
4460 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
4461 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
4462 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
4463 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
4464 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
4465 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
4466 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
4470 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
4471 const struct drm_display_mode
*native_mode
,
4474 if (scale_enabled
) {
4475 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
4476 } else if (native_mode
->clock
== drm_mode
->clock
&&
4477 native_mode
->htotal
== drm_mode
->htotal
&&
4478 native_mode
->vtotal
== drm_mode
->vtotal
) {
4479 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
4481 /* no scaling nor amdgpu inserted, no need to patch */
4485 static struct dc_sink
*
4486 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
4488 struct dc_sink_init_data sink_init_data
= { 0 };
4489 struct dc_sink
*sink
= NULL
;
4490 sink_init_data
.link
= aconnector
->dc_link
;
4491 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
4493 sink
= dc_sink_create(&sink_init_data
);
4495 DRM_ERROR("Failed to create sink!\n");
4498 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
4503 static void set_multisync_trigger_params(
4504 struct dc_stream_state
*stream
)
4506 if (stream
->triggered_crtc_reset
.enabled
) {
4507 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
4508 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
4512 static void set_master_stream(struct dc_stream_state
*stream_set
[],
4515 int j
, highest_rfr
= 0, master_stream
= 0;
4517 for (j
= 0; j
< stream_count
; j
++) {
4518 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
4519 int refresh_rate
= 0;
4521 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_100hz
*100)/
4522 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
4523 if (refresh_rate
> highest_rfr
) {
4524 highest_rfr
= refresh_rate
;
4529 for (j
= 0; j
< stream_count
; j
++) {
4531 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
4535 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
4539 if (context
->stream_count
< 2)
4541 for (i
= 0; i
< context
->stream_count
; i
++) {
4542 if (!context
->streams
[i
])
4545 * TODO: add a function to read AMD VSDB bits and set
4546 * crtc_sync_master.multi_sync_enabled flag
4547 * For now it's set to false
4549 set_multisync_trigger_params(context
->streams
[i
]);
4551 set_master_stream(context
->streams
, context
->stream_count
);
4554 static struct dc_stream_state
*
4555 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
4556 const struct drm_display_mode
*drm_mode
,
4557 const struct dm_connector_state
*dm_state
,
4558 const struct dc_stream_state
*old_stream
,
4561 struct drm_display_mode
*preferred_mode
= NULL
;
4562 struct drm_connector
*drm_connector
;
4563 const struct drm_connector_state
*con_state
=
4564 dm_state
? &dm_state
->base
: NULL
;
4565 struct dc_stream_state
*stream
= NULL
;
4566 struct drm_display_mode mode
= *drm_mode
;
4567 bool native_mode_found
= false;
4568 bool scale
= dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false;
4570 int preferred_refresh
= 0;
4571 #if defined(CONFIG_DRM_AMD_DC_DCN)
4572 struct dsc_dec_dpcd_caps dsc_caps
;
4574 uint32_t link_bandwidth_kbps
;
4576 struct dc_sink
*sink
= NULL
;
4577 if (aconnector
== NULL
) {
4578 DRM_ERROR("aconnector is NULL!\n");
4582 drm_connector
= &aconnector
->base
;
4584 if (!aconnector
->dc_sink
) {
4585 sink
= create_fake_sink(aconnector
);
4589 sink
= aconnector
->dc_sink
;
4590 dc_sink_retain(sink
);
4593 stream
= dc_create_stream_for_sink(sink
);
4595 if (stream
== NULL
) {
4596 DRM_ERROR("Failed to create stream for sink!\n");
4600 stream
->dm_stream_context
= aconnector
;
4602 stream
->timing
.flags
.LTE_340MCSC_SCRAMBLE
=
4603 drm_connector
->display_info
.hdmi
.scdc
.scrambling
.low_rates
;
4605 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
4606 /* Search for preferred mode */
4607 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
4608 native_mode_found
= true;
4612 if (!native_mode_found
)
4613 preferred_mode
= list_first_entry_or_null(
4614 &aconnector
->base
.modes
,
4615 struct drm_display_mode
,
4618 mode_refresh
= drm_mode_vrefresh(&mode
);
4620 if (preferred_mode
== NULL
) {
4622 * This may not be an error, the use case is when we have no
4623 * usermode calls to reset and set mode upon hotplug. In this
4624 * case, we call set mode ourselves to restore the previous mode
4625 * and the modelist may not be filled in in time.
4627 DRM_DEBUG_DRIVER("No preferred mode found\n");
4629 decide_crtc_timing_for_drm_display_mode(
4630 &mode
, preferred_mode
,
4631 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
4632 preferred_refresh
= drm_mode_vrefresh(preferred_mode
);
4636 drm_mode_set_crtcinfo(&mode
, 0);
4639 * If scaling is enabled and refresh rate didn't change
4640 * we copy the vic and polarities of the old timings
4642 if (!scale
|| mode_refresh
!= preferred_refresh
)
4643 fill_stream_properties_from_drm_display_mode(stream
,
4644 &mode
, &aconnector
->base
, con_state
, NULL
, requested_bpc
);
4646 fill_stream_properties_from_drm_display_mode(stream
,
4647 &mode
, &aconnector
->base
, con_state
, old_stream
, requested_bpc
);
4649 stream
->timing
.flags
.DSC
= 0;
4651 if (aconnector
->dc_link
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4652 #if defined(CONFIG_DRM_AMD_DC_DCN)
4653 dc_dsc_parse_dsc_dpcd(aconnector
->dc_link
->ctx
->dc
,
4654 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_basic_caps
.raw
,
4655 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_branch_decoder_caps
.raw
,
4658 link_bandwidth_kbps
= dc_link_bandwidth_kbps(aconnector
->dc_link
,
4659 dc_link_get_link_cap(aconnector
->dc_link
));
4661 #if defined(CONFIG_DRM_AMD_DC_DCN)
4662 if (dsc_caps
.is_dsc_supported
) {
4663 if (dc_dsc_compute_config(aconnector
->dc_link
->ctx
->dc
->res_pool
->dscs
[0],
4665 aconnector
->dc_link
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
4666 link_bandwidth_kbps
,
4668 &stream
->timing
.dsc_cfg
))
4669 stream
->timing
.flags
.DSC
= 1;
4670 /* Overwrite the stream flag if DSC is enabled through debugfs */
4671 if (aconnector
->dsc_settings
.dsc_clock_en
)
4672 stream
->timing
.flags
.DSC
= 1;
4674 if (stream
->timing
.flags
.DSC
&& aconnector
->dsc_settings
.dsc_slice_width
)
4675 stream
->timing
.dsc_cfg
.num_slices_h
= DIV_ROUND_UP(stream
->timing
.h_addressable
,
4676 aconnector
->dsc_settings
.dsc_slice_width
);
4678 if (stream
->timing
.flags
.DSC
&& aconnector
->dsc_settings
.dsc_slice_height
)
4679 stream
->timing
.dsc_cfg
.num_slices_v
= DIV_ROUND_UP(stream
->timing
.v_addressable
,
4680 aconnector
->dsc_settings
.dsc_slice_height
);
4685 update_stream_scaling_settings(&mode
, dm_state
, stream
);
4688 &stream
->audio_info
,
4692 update_stream_signal(stream
, sink
);
4694 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4695 mod_build_hf_vsif_infopacket(stream
, &stream
->vsp_infopacket
, false, false);
4696 if (stream
->link
->psr_settings
.psr_feature_enabled
) {
4698 // should decide stream support vsc sdp colorimetry capability
4699 // before building vsc info packet
4701 stream
->use_vsc_sdp_for_colorimetry
= false;
4702 if (aconnector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
4703 stream
->use_vsc_sdp_for_colorimetry
=
4704 aconnector
->dc_sink
->is_vsc_sdp_colorimetry_supported
;
4706 if (stream
->link
->dpcd_caps
.dprx_feature
.bits
.VSC_SDP_COLORIMETRY_SUPPORTED
)
4707 stream
->use_vsc_sdp_for_colorimetry
= true;
4709 mod_build_vsc_infopacket(stream
, &stream
->vsc_infopacket
);
4712 dc_sink_release(sink
);
4717 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
4719 drm_crtc_cleanup(crtc
);
4723 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
4724 struct drm_crtc_state
*state
)
4726 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
4728 /* TODO Destroy dc_stream objects are stream object is flattened */
4730 dc_stream_release(cur
->stream
);
4733 __drm_atomic_helper_crtc_destroy_state(state
);
4739 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
4741 struct dm_crtc_state
*state
;
4744 dm_crtc_destroy_state(crtc
, crtc
->state
);
4746 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4747 if (WARN_ON(!state
))
4750 crtc
->state
= &state
->base
;
4751 crtc
->state
->crtc
= crtc
;
4755 static struct drm_crtc_state
*
4756 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
4758 struct dm_crtc_state
*state
, *cur
;
4760 cur
= to_dm_crtc_state(crtc
->state
);
4762 if (WARN_ON(!crtc
->state
))
4765 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4769 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
4772 state
->stream
= cur
->stream
;
4773 dc_stream_retain(state
->stream
);
4776 state
->active_planes
= cur
->active_planes
;
4777 state
->vrr_params
= cur
->vrr_params
;
4778 state
->vrr_infopacket
= cur
->vrr_infopacket
;
4779 state
->abm_level
= cur
->abm_level
;
4780 state
->vrr_supported
= cur
->vrr_supported
;
4781 state
->freesync_config
= cur
->freesync_config
;
4782 state
->crc_src
= cur
->crc_src
;
4783 state
->cm_has_degamma
= cur
->cm_has_degamma
;
4784 state
->cm_is_degamma_srgb
= cur
->cm_is_degamma_srgb
;
4786 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4788 return &state
->base
;
4791 static inline int dm_set_vupdate_irq(struct drm_crtc
*crtc
, bool enable
)
4793 enum dc_irq_source irq_source
;
4794 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4795 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4798 irq_source
= IRQ_TYPE_VUPDATE
+ acrtc
->otg_inst
;
4800 rc
= dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4802 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4803 acrtc
->crtc_id
, enable
? "en" : "dis", rc
);
4807 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
4809 enum dc_irq_source irq_source
;
4810 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4811 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4812 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
4816 /* vblank irq on -> Only need vupdate irq in vrr mode */
4817 if (amdgpu_dm_vrr_active(acrtc_state
))
4818 rc
= dm_set_vupdate_irq(crtc
, true);
4820 /* vblank irq off -> vupdate irq off */
4821 rc
= dm_set_vupdate_irq(crtc
, false);
4827 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
4828 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4831 static int dm_enable_vblank(struct drm_crtc
*crtc
)
4833 return dm_set_vblank(crtc
, true);
4836 static void dm_disable_vblank(struct drm_crtc
*crtc
)
4838 dm_set_vblank(crtc
, false);
4841 /* Implemented only the options currently availible for the driver */
4842 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
4843 .reset
= dm_crtc_reset_state
,
4844 .destroy
= amdgpu_dm_crtc_destroy
,
4845 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
4846 .set_config
= drm_atomic_helper_set_config
,
4847 .page_flip
= drm_atomic_helper_page_flip
,
4848 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
4849 .atomic_destroy_state
= dm_crtc_destroy_state
,
4850 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
4851 .verify_crc_source
= amdgpu_dm_crtc_verify_crc_source
,
4852 .get_crc_sources
= amdgpu_dm_crtc_get_crc_sources
,
4853 .get_vblank_counter
= amdgpu_get_vblank_counter_kms
,
4854 .enable_vblank
= dm_enable_vblank
,
4855 .disable_vblank
= dm_disable_vblank
,
4856 .get_vblank_timestamp
= drm_crtc_vblank_helper_get_vblank_timestamp
,
4859 static enum drm_connector_status
4860 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
4863 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4867 * 1. This interface is NOT called in context of HPD irq.
4868 * 2. This interface *is called* in context of user-mode ioctl. Which
4869 * makes it a bad place for *any* MST-related activity.
4872 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
4873 !aconnector
->fake_enable
)
4874 connected
= (aconnector
->dc_sink
!= NULL
);
4876 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
4878 return (connected
? connector_status_connected
:
4879 connector_status_disconnected
);
4882 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
4883 struct drm_connector_state
*connector_state
,
4884 struct drm_property
*property
,
4887 struct drm_device
*dev
= connector
->dev
;
4888 struct amdgpu_device
*adev
= dev
->dev_private
;
4889 struct dm_connector_state
*dm_old_state
=
4890 to_dm_connector_state(connector
->state
);
4891 struct dm_connector_state
*dm_new_state
=
4892 to_dm_connector_state(connector_state
);
4896 if (property
== dev
->mode_config
.scaling_mode_property
) {
4897 enum amdgpu_rmx_type rmx_type
;
4900 case DRM_MODE_SCALE_CENTER
:
4901 rmx_type
= RMX_CENTER
;
4903 case DRM_MODE_SCALE_ASPECT
:
4904 rmx_type
= RMX_ASPECT
;
4906 case DRM_MODE_SCALE_FULLSCREEN
:
4907 rmx_type
= RMX_FULL
;
4909 case DRM_MODE_SCALE_NONE
:
4915 if (dm_old_state
->scaling
== rmx_type
)
4918 dm_new_state
->scaling
= rmx_type
;
4920 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4921 dm_new_state
->underscan_hborder
= val
;
4923 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4924 dm_new_state
->underscan_vborder
= val
;
4926 } else if (property
== adev
->mode_info
.underscan_property
) {
4927 dm_new_state
->underscan_enable
= val
;
4929 } else if (property
== adev
->mode_info
.abm_level_property
) {
4930 dm_new_state
->abm_level
= val
;
4937 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
4938 const struct drm_connector_state
*state
,
4939 struct drm_property
*property
,
4942 struct drm_device
*dev
= connector
->dev
;
4943 struct amdgpu_device
*adev
= dev
->dev_private
;
4944 struct dm_connector_state
*dm_state
=
4945 to_dm_connector_state(state
);
4948 if (property
== dev
->mode_config
.scaling_mode_property
) {
4949 switch (dm_state
->scaling
) {
4951 *val
= DRM_MODE_SCALE_CENTER
;
4954 *val
= DRM_MODE_SCALE_ASPECT
;
4957 *val
= DRM_MODE_SCALE_FULLSCREEN
;
4961 *val
= DRM_MODE_SCALE_NONE
;
4965 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4966 *val
= dm_state
->underscan_hborder
;
4968 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4969 *val
= dm_state
->underscan_vborder
;
4971 } else if (property
== adev
->mode_info
.underscan_property
) {
4972 *val
= dm_state
->underscan_enable
;
4974 } else if (property
== adev
->mode_info
.abm_level_property
) {
4975 *val
= dm_state
->abm_level
;
4982 static void amdgpu_dm_connector_unregister(struct drm_connector
*connector
)
4984 struct amdgpu_dm_connector
*amdgpu_dm_connector
= to_amdgpu_dm_connector(connector
);
4986 drm_dp_aux_unregister(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4989 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
4991 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4992 const struct dc_link
*link
= aconnector
->dc_link
;
4993 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4994 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4996 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4997 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4999 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
5000 link
->type
!= dc_connection_none
&&
5001 dm
->backlight_dev
) {
5002 backlight_device_unregister(dm
->backlight_dev
);
5003 dm
->backlight_dev
= NULL
;
5007 if (aconnector
->dc_em_sink
)
5008 dc_sink_release(aconnector
->dc_em_sink
);
5009 aconnector
->dc_em_sink
= NULL
;
5010 if (aconnector
->dc_sink
)
5011 dc_sink_release(aconnector
->dc_sink
);
5012 aconnector
->dc_sink
= NULL
;
5014 drm_dp_cec_unregister_connector(&aconnector
->dm_dp_aux
.aux
);
5015 drm_connector_unregister(connector
);
5016 drm_connector_cleanup(connector
);
5017 if (aconnector
->i2c
) {
5018 i2c_del_adapter(&aconnector
->i2c
->base
);
5019 kfree(aconnector
->i2c
);
5021 kfree(aconnector
->dm_dp_aux
.aux
.name
);
5026 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
5028 struct dm_connector_state
*state
=
5029 to_dm_connector_state(connector
->state
);
5031 if (connector
->state
)
5032 __drm_atomic_helper_connector_destroy_state(connector
->state
);
5036 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
5039 state
->scaling
= RMX_OFF
;
5040 state
->underscan_enable
= false;
5041 state
->underscan_hborder
= 0;
5042 state
->underscan_vborder
= 0;
5043 state
->base
.max_requested_bpc
= 8;
5044 state
->vcpi_slots
= 0;
5046 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
5047 state
->abm_level
= amdgpu_dm_abm_level
;
5049 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
5053 struct drm_connector_state
*
5054 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
5056 struct dm_connector_state
*state
=
5057 to_dm_connector_state(connector
->state
);
5059 struct dm_connector_state
*new_state
=
5060 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
5065 __drm_atomic_helper_connector_duplicate_state(connector
, &new_state
->base
);
5067 new_state
->freesync_capable
= state
->freesync_capable
;
5068 new_state
->abm_level
= state
->abm_level
;
5069 new_state
->scaling
= state
->scaling
;
5070 new_state
->underscan_enable
= state
->underscan_enable
;
5071 new_state
->underscan_hborder
= state
->underscan_hborder
;
5072 new_state
->underscan_vborder
= state
->underscan_vborder
;
5073 new_state
->vcpi_slots
= state
->vcpi_slots
;
5074 new_state
->pbn
= state
->pbn
;
5075 return &new_state
->base
;
5079 amdgpu_dm_connector_late_register(struct drm_connector
*connector
)
5081 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
5082 to_amdgpu_dm_connector(connector
);
5085 if ((connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
) ||
5086 (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)) {
5087 amdgpu_dm_connector
->dm_dp_aux
.aux
.dev
= connector
->kdev
;
5088 r
= drm_dp_aux_register(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
5093 #if defined(CONFIG_DEBUG_FS)
5094 connector_debugfs_init(amdgpu_dm_connector
);
5100 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
5101 .reset
= amdgpu_dm_connector_funcs_reset
,
5102 .detect
= amdgpu_dm_connector_detect
,
5103 .fill_modes
= drm_helper_probe_single_connector_modes
,
5104 .destroy
= amdgpu_dm_connector_destroy
,
5105 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
5106 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
5107 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
5108 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
,
5109 .late_register
= amdgpu_dm_connector_late_register
,
5110 .early_unregister
= amdgpu_dm_connector_unregister
5113 static int get_modes(struct drm_connector
*connector
)
5115 return amdgpu_dm_connector_get_modes(connector
);
5118 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
5120 struct dc_sink_init_data init_params
= {
5121 .link
= aconnector
->dc_link
,
5122 .sink_signal
= SIGNAL_TYPE_VIRTUAL
5126 if (!aconnector
->base
.edid_blob_ptr
) {
5127 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5128 aconnector
->base
.name
);
5130 aconnector
->base
.force
= DRM_FORCE_OFF
;
5131 aconnector
->base
.override_edid
= false;
5135 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
5137 aconnector
->edid
= edid
;
5139 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
5140 aconnector
->dc_link
,
5142 (edid
->extensions
+ 1) * EDID_LENGTH
,
5145 if (aconnector
->base
.force
== DRM_FORCE_ON
) {
5146 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
5147 aconnector
->dc_link
->local_sink
:
5148 aconnector
->dc_em_sink
;
5149 dc_sink_retain(aconnector
->dc_sink
);
5153 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
5155 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
5158 * In case of headless boot with force on for DP managed connector
5159 * Those settings have to be != 0 to get initial modeset
5161 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
5162 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
5163 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
5167 aconnector
->base
.override_edid
= true;
5168 create_eml_sink(aconnector
);
5171 static struct dc_stream_state
*
5172 create_validate_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
5173 const struct drm_display_mode
*drm_mode
,
5174 const struct dm_connector_state
*dm_state
,
5175 const struct dc_stream_state
*old_stream
)
5177 struct drm_connector
*connector
= &aconnector
->base
;
5178 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
5179 struct dc_stream_state
*stream
;
5180 const struct drm_connector_state
*drm_state
= dm_state
? &dm_state
->base
: NULL
;
5181 int requested_bpc
= drm_state
? drm_state
->max_requested_bpc
: 8;
5182 enum dc_status dc_result
= DC_OK
;
5185 stream
= create_stream_for_sink(aconnector
, drm_mode
,
5186 dm_state
, old_stream
,
5188 if (stream
== NULL
) {
5189 DRM_ERROR("Failed to create stream for sink!\n");
5193 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
5195 if (dc_result
!= DC_OK
) {
5196 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5201 dc_status_to_str(dc_result
));
5203 dc_stream_release(stream
);
5205 requested_bpc
-= 2; /* lower bpc to retry validation */
5208 } while (stream
== NULL
&& requested_bpc
>= 6);
5213 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
5214 struct drm_display_mode
*mode
)
5216 int result
= MODE_ERROR
;
5217 struct dc_sink
*dc_sink
;
5218 /* TODO: Unhardcode stream count */
5219 struct dc_stream_state
*stream
;
5220 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5222 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
5223 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
5227 * Only run this the first time mode_valid is called to initilialize
5230 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
5231 !aconnector
->dc_em_sink
)
5232 handle_edid_mgmt(aconnector
);
5234 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
5236 if (dc_sink
== NULL
) {
5237 DRM_ERROR("dc_sink is NULL!\n");
5241 stream
= create_validate_stream_for_sink(aconnector
, mode
, NULL
, NULL
);
5243 dc_stream_release(stream
);
5248 /* TODO: error handling*/
5252 static int fill_hdr_info_packet(const struct drm_connector_state
*state
,
5253 struct dc_info_packet
*out
)
5255 struct hdmi_drm_infoframe frame
;
5256 unsigned char buf
[30]; /* 26 + 4 */
5260 memset(out
, 0, sizeof(*out
));
5262 if (!state
->hdr_output_metadata
)
5265 ret
= drm_hdmi_infoframe_set_hdr_metadata(&frame
, state
);
5269 len
= hdmi_drm_infoframe_pack_only(&frame
, buf
, sizeof(buf
));
5273 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5277 /* Prepare the infopacket for DC. */
5278 switch (state
->connector
->connector_type
) {
5279 case DRM_MODE_CONNECTOR_HDMIA
:
5280 out
->hb0
= 0x87; /* type */
5281 out
->hb1
= 0x01; /* version */
5282 out
->hb2
= 0x1A; /* length */
5283 out
->sb
[0] = buf
[3]; /* checksum */
5287 case DRM_MODE_CONNECTOR_DisplayPort
:
5288 case DRM_MODE_CONNECTOR_eDP
:
5289 out
->hb0
= 0x00; /* sdp id, zero */
5290 out
->hb1
= 0x87; /* type */
5291 out
->hb2
= 0x1D; /* payload len - 1 */
5292 out
->hb3
= (0x13 << 2); /* sdp version */
5293 out
->sb
[0] = 0x01; /* version */
5294 out
->sb
[1] = 0x1A; /* length */
5302 memcpy(&out
->sb
[i
], &buf
[4], 26);
5305 print_hex_dump(KERN_DEBUG
, "HDR SB:", DUMP_PREFIX_NONE
, 16, 1, out
->sb
,
5306 sizeof(out
->sb
), false);
5312 is_hdr_metadata_different(const struct drm_connector_state
*old_state
,
5313 const struct drm_connector_state
*new_state
)
5315 struct drm_property_blob
*old_blob
= old_state
->hdr_output_metadata
;
5316 struct drm_property_blob
*new_blob
= new_state
->hdr_output_metadata
;
5318 if (old_blob
!= new_blob
) {
5319 if (old_blob
&& new_blob
&&
5320 old_blob
->length
== new_blob
->length
)
5321 return memcmp(old_blob
->data
, new_blob
->data
,
5331 amdgpu_dm_connector_atomic_check(struct drm_connector
*conn
,
5332 struct drm_atomic_state
*state
)
5334 struct drm_connector_state
*new_con_state
=
5335 drm_atomic_get_new_connector_state(state
, conn
);
5336 struct drm_connector_state
*old_con_state
=
5337 drm_atomic_get_old_connector_state(state
, conn
);
5338 struct drm_crtc
*crtc
= new_con_state
->crtc
;
5339 struct drm_crtc_state
*new_crtc_state
;
5345 if (is_hdr_metadata_different(old_con_state
, new_con_state
)) {
5346 struct dc_info_packet hdr_infopacket
;
5348 ret
= fill_hdr_info_packet(new_con_state
, &hdr_infopacket
);
5352 new_crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
5353 if (IS_ERR(new_crtc_state
))
5354 return PTR_ERR(new_crtc_state
);
5357 * DC considers the stream backends changed if the
5358 * static metadata changes. Forcing the modeset also
5359 * gives a simple way for userspace to switch from
5360 * 8bpc to 10bpc when setting the metadata to enter
5363 * Changing the static metadata after it's been
5364 * set is permissible, however. So only force a
5365 * modeset if we're entering or exiting HDR.
5367 new_crtc_state
->mode_changed
=
5368 !old_con_state
->hdr_output_metadata
||
5369 !new_con_state
->hdr_output_metadata
;
5375 static const struct drm_connector_helper_funcs
5376 amdgpu_dm_connector_helper_funcs
= {
5378 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5379 * modes will be filtered by drm_mode_validate_size(), and those modes
5380 * are missing after user start lightdm. So we need to renew modes list.
5381 * in get_modes call back, not just return the modes count
5383 .get_modes
= get_modes
,
5384 .mode_valid
= amdgpu_dm_connector_mode_valid
,
5385 .atomic_check
= amdgpu_dm_connector_atomic_check
,
5388 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
5392 static bool does_crtc_have_active_cursor(struct drm_crtc_state
*new_crtc_state
)
5394 struct drm_device
*dev
= new_crtc_state
->crtc
->dev
;
5395 struct drm_plane
*plane
;
5397 drm_for_each_plane_mask(plane
, dev
, new_crtc_state
->plane_mask
) {
5398 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5405 static int count_crtc_active_planes(struct drm_crtc_state
*new_crtc_state
)
5407 struct drm_atomic_state
*state
= new_crtc_state
->state
;
5408 struct drm_plane
*plane
;
5411 drm_for_each_plane_mask(plane
, state
->dev
, new_crtc_state
->plane_mask
) {
5412 struct drm_plane_state
*new_plane_state
;
5414 /* Cursor planes are "fake". */
5415 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5418 new_plane_state
= drm_atomic_get_new_plane_state(state
, plane
);
5420 if (!new_plane_state
) {
5422 * The plane is enable on the CRTC and hasn't changed
5423 * state. This means that it previously passed
5424 * validation and is therefore enabled.
5430 /* We need a framebuffer to be considered enabled. */
5431 num_active
+= (new_plane_state
->fb
!= NULL
);
5437 static void dm_update_crtc_active_planes(struct drm_crtc
*crtc
,
5438 struct drm_crtc_state
*new_crtc_state
)
5440 struct dm_crtc_state
*dm_new_crtc_state
=
5441 to_dm_crtc_state(new_crtc_state
);
5443 dm_new_crtc_state
->active_planes
= 0;
5445 if (!dm_new_crtc_state
->stream
)
5448 dm_new_crtc_state
->active_planes
=
5449 count_crtc_active_planes(new_crtc_state
);
5452 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
5453 struct drm_crtc_state
*state
)
5455 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
5456 struct dc
*dc
= adev
->dm
.dc
;
5457 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
5460 dm_update_crtc_active_planes(crtc
, state
);
5462 if (unlikely(!dm_crtc_state
->stream
&&
5463 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
5468 /* In some use cases, like reset, no stream is attached */
5469 if (!dm_crtc_state
->stream
)
5473 * We want at least one hardware plane enabled to use
5474 * the stream with a cursor enabled.
5476 if (state
->enable
&& state
->active
&&
5477 does_crtc_have_active_cursor(state
) &&
5478 dm_crtc_state
->active_planes
== 0)
5481 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
5487 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
5488 const struct drm_display_mode
*mode
,
5489 struct drm_display_mode
*adjusted_mode
)
5494 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
5495 .disable
= dm_crtc_helper_disable
,
5496 .atomic_check
= dm_crtc_helper_atomic_check
,
5497 .mode_fixup
= dm_crtc_helper_mode_fixup
,
5498 .get_scanout_position
= amdgpu_crtc_get_scanout_position
,
5501 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
5506 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth
)
5508 switch (display_color_depth
) {
5509 case COLOR_DEPTH_666
:
5511 case COLOR_DEPTH_888
:
5513 case COLOR_DEPTH_101010
:
5515 case COLOR_DEPTH_121212
:
5517 case COLOR_DEPTH_141414
:
5519 case COLOR_DEPTH_161616
:
5527 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
5528 struct drm_crtc_state
*crtc_state
,
5529 struct drm_connector_state
*conn_state
)
5531 struct drm_atomic_state
*state
= crtc_state
->state
;
5532 struct drm_connector
*connector
= conn_state
->connector
;
5533 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5534 struct dm_connector_state
*dm_new_connector_state
= to_dm_connector_state(conn_state
);
5535 const struct drm_display_mode
*adjusted_mode
= &crtc_state
->adjusted_mode
;
5536 struct drm_dp_mst_topology_mgr
*mst_mgr
;
5537 struct drm_dp_mst_port
*mst_port
;
5538 enum dc_color_depth color_depth
;
5540 bool is_y420
= false;
5542 if (!aconnector
->port
|| !aconnector
->dc_sink
)
5545 mst_port
= aconnector
->port
;
5546 mst_mgr
= &aconnector
->mst_port
->mst_mgr
;
5548 if (!crtc_state
->connectors_changed
&& !crtc_state
->mode_changed
)
5551 if (!state
->duplicated
) {
5552 int max_bpc
= conn_state
->max_requested_bpc
;
5553 is_y420
= drm_mode_is_420_also(&connector
->display_info
, adjusted_mode
) &&
5554 aconnector
->force_yuv420_output
;
5555 color_depth
= convert_color_depth_from_display_info(connector
,
5558 bpp
= convert_dc_color_depth_into_bpc(color_depth
) * 3;
5559 clock
= adjusted_mode
->clock
;
5560 dm_new_connector_state
->pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, false);
5562 dm_new_connector_state
->vcpi_slots
= drm_dp_atomic_find_vcpi_slots(state
,
5565 dm_new_connector_state
->pbn
,
5566 dm_mst_get_pbn_divider(aconnector
->dc_link
));
5567 if (dm_new_connector_state
->vcpi_slots
< 0) {
5568 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state
->vcpi_slots
);
5569 return dm_new_connector_state
->vcpi_slots
;
5574 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
5575 .disable
= dm_encoder_helper_disable
,
5576 .atomic_check
= dm_encoder_helper_atomic_check
5579 #if defined(CONFIG_DRM_AMD_DC_DCN)
5580 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state
*state
,
5581 struct dc_state
*dc_state
)
5583 struct dc_stream_state
*stream
= NULL
;
5584 struct drm_connector
*connector
;
5585 struct drm_connector_state
*new_con_state
, *old_con_state
;
5586 struct amdgpu_dm_connector
*aconnector
;
5587 struct dm_connector_state
*dm_conn_state
;
5588 int i
, j
, clock
, bpp
;
5589 int vcpi
, pbn_div
, pbn
= 0;
5591 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
5593 aconnector
= to_amdgpu_dm_connector(connector
);
5595 if (!aconnector
->port
)
5598 if (!new_con_state
|| !new_con_state
->crtc
)
5601 dm_conn_state
= to_dm_connector_state(new_con_state
);
5603 for (j
= 0; j
< dc_state
->stream_count
; j
++) {
5604 stream
= dc_state
->streams
[j
];
5608 if ((struct amdgpu_dm_connector
*)stream
->dm_stream_context
== aconnector
)
5617 if (stream
->timing
.flags
.DSC
!= 1) {
5618 drm_dp_mst_atomic_enable_dsc(state
,
5626 pbn_div
= dm_mst_get_pbn_divider(stream
->link
);
5627 bpp
= stream
->timing
.dsc_cfg
.bits_per_pixel
;
5628 clock
= stream
->timing
.pix_clk_100hz
/ 10;
5629 pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, true);
5630 vcpi
= drm_dp_mst_atomic_enable_dsc(state
,
5637 dm_conn_state
->pbn
= pbn
;
5638 dm_conn_state
->vcpi_slots
= vcpi
;
5644 static void dm_drm_plane_reset(struct drm_plane
*plane
)
5646 struct dm_plane_state
*amdgpu_state
= NULL
;
5649 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
5651 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
5652 WARN_ON(amdgpu_state
== NULL
);
5655 __drm_atomic_helper_plane_reset(plane
, &amdgpu_state
->base
);
5658 static struct drm_plane_state
*
5659 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
5661 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
5663 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
5664 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
5665 if (!dm_plane_state
)
5668 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
5670 if (old_dm_plane_state
->dc_state
) {
5671 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
5672 dc_plane_state_retain(dm_plane_state
->dc_state
);
5675 return &dm_plane_state
->base
;
5678 static void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
5679 struct drm_plane_state
*state
)
5681 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
5683 if (dm_plane_state
->dc_state
)
5684 dc_plane_state_release(dm_plane_state
->dc_state
);
5686 drm_atomic_helper_plane_destroy_state(plane
, state
);
5689 static const struct drm_plane_funcs dm_plane_funcs
= {
5690 .update_plane
= drm_atomic_helper_update_plane
,
5691 .disable_plane
= drm_atomic_helper_disable_plane
,
5692 .destroy
= drm_primary_helper_destroy
,
5693 .reset
= dm_drm_plane_reset
,
5694 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
5695 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
5698 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
5699 struct drm_plane_state
*new_state
)
5701 struct amdgpu_framebuffer
*afb
;
5702 struct drm_gem_object
*obj
;
5703 struct amdgpu_device
*adev
;
5704 struct amdgpu_bo
*rbo
;
5705 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
5706 struct list_head list
;
5707 struct ttm_validate_buffer tv
;
5708 struct ww_acquire_ctx ticket
;
5709 uint64_t tiling_flags
;
5712 bool tmz_surface
= false;
5713 bool force_disable_dcc
= false;
5715 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
5716 dm_plane_state_new
= to_dm_plane_state(new_state
);
5718 if (!new_state
->fb
) {
5719 DRM_DEBUG_DRIVER("No FB bound\n");
5723 afb
= to_amdgpu_framebuffer(new_state
->fb
);
5724 obj
= new_state
->fb
->obj
[0];
5725 rbo
= gem_to_amdgpu_bo(obj
);
5726 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
5727 INIT_LIST_HEAD(&list
);
5731 list_add(&tv
.head
, &list
);
5733 r
= ttm_eu_reserve_buffers(&ticket
, &list
, false, NULL
);
5735 dev_err(adev
->dev
, "fail to reserve bo (%d)\n", r
);
5739 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5740 domain
= amdgpu_display_supported_domains(adev
, rbo
->flags
);
5742 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
5744 r
= amdgpu_bo_pin(rbo
, domain
);
5745 if (unlikely(r
!= 0)) {
5746 if (r
!= -ERESTARTSYS
)
5747 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
5748 ttm_eu_backoff_reservation(&ticket
, &list
);
5752 r
= amdgpu_ttm_alloc_gart(&rbo
->tbo
);
5753 if (unlikely(r
!= 0)) {
5754 amdgpu_bo_unpin(rbo
);
5755 ttm_eu_backoff_reservation(&ticket
, &list
);
5756 DRM_ERROR("%p bind failed\n", rbo
);
5760 amdgpu_bo_get_tiling_flags(rbo
, &tiling_flags
);
5762 tmz_surface
= amdgpu_bo_encrypted(rbo
);
5764 ttm_eu_backoff_reservation(&ticket
, &list
);
5766 afb
->address
= amdgpu_bo_gpu_offset(rbo
);
5770 if (dm_plane_state_new
->dc_state
&&
5771 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
5772 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
5774 force_disable_dcc
= adev
->asic_type
== CHIP_RAVEN
&& adev
->in_suspend
;
5775 fill_plane_buffer_attributes(
5776 adev
, afb
, plane_state
->format
, plane_state
->rotation
,
5777 tiling_flags
, &plane_state
->tiling_info
,
5778 &plane_state
->plane_size
, &plane_state
->dcc
,
5779 &plane_state
->address
, tmz_surface
,
5786 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
5787 struct drm_plane_state
*old_state
)
5789 struct amdgpu_bo
*rbo
;
5795 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
5796 r
= amdgpu_bo_reserve(rbo
, false);
5798 DRM_ERROR("failed to reserve rbo before unpin\n");
5802 amdgpu_bo_unpin(rbo
);
5803 amdgpu_bo_unreserve(rbo
);
5804 amdgpu_bo_unref(&rbo
);
5807 static int dm_plane_helper_check_state(struct drm_plane_state
*state
,
5808 struct drm_crtc_state
*new_crtc_state
)
5810 int max_downscale
= 0;
5811 int max_upscale
= INT_MAX
;
5813 /* TODO: These should be checked against DC plane caps */
5814 return drm_atomic_helper_check_plane_state(
5815 state
, new_crtc_state
, max_downscale
, max_upscale
, true, true);
5818 static int dm_plane_atomic_check(struct drm_plane
*plane
,
5819 struct drm_plane_state
*state
)
5821 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
5822 struct dc
*dc
= adev
->dm
.dc
;
5823 struct dm_plane_state
*dm_plane_state
;
5824 struct dc_scaling_info scaling_info
;
5825 struct drm_crtc_state
*new_crtc_state
;
5828 dm_plane_state
= to_dm_plane_state(state
);
5830 if (!dm_plane_state
->dc_state
)
5834 drm_atomic_get_new_crtc_state(state
->state
, state
->crtc
);
5835 if (!new_crtc_state
)
5838 ret
= dm_plane_helper_check_state(state
, new_crtc_state
);
5842 ret
= fill_dc_scaling_info(state
, &scaling_info
);
5846 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
5852 static int dm_plane_atomic_async_check(struct drm_plane
*plane
,
5853 struct drm_plane_state
*new_plane_state
)
5855 /* Only support async updates on cursor planes. */
5856 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5862 static void dm_plane_atomic_async_update(struct drm_plane
*plane
,
5863 struct drm_plane_state
*new_state
)
5865 struct drm_plane_state
*old_state
=
5866 drm_atomic_get_old_plane_state(new_state
->state
, plane
);
5868 swap(plane
->state
->fb
, new_state
->fb
);
5870 plane
->state
->src_x
= new_state
->src_x
;
5871 plane
->state
->src_y
= new_state
->src_y
;
5872 plane
->state
->src_w
= new_state
->src_w
;
5873 plane
->state
->src_h
= new_state
->src_h
;
5874 plane
->state
->crtc_x
= new_state
->crtc_x
;
5875 plane
->state
->crtc_y
= new_state
->crtc_y
;
5876 plane
->state
->crtc_w
= new_state
->crtc_w
;
5877 plane
->state
->crtc_h
= new_state
->crtc_h
;
5879 handle_cursor_update(plane
, old_state
);
5882 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
5883 .prepare_fb
= dm_plane_helper_prepare_fb
,
5884 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
5885 .atomic_check
= dm_plane_atomic_check
,
5886 .atomic_async_check
= dm_plane_atomic_async_check
,
5887 .atomic_async_update
= dm_plane_atomic_async_update
5891 * TODO: these are currently initialized to rgb formats only.
5892 * For future use cases we should either initialize them dynamically based on
5893 * plane capabilities, or initialize this array to all formats, so internal drm
5894 * check will succeed, and let DC implement proper check
5896 static const uint32_t rgb_formats
[] = {
5897 DRM_FORMAT_XRGB8888
,
5898 DRM_FORMAT_ARGB8888
,
5899 DRM_FORMAT_RGBA8888
,
5900 DRM_FORMAT_XRGB2101010
,
5901 DRM_FORMAT_XBGR2101010
,
5902 DRM_FORMAT_ARGB2101010
,
5903 DRM_FORMAT_ABGR2101010
,
5904 DRM_FORMAT_XBGR8888
,
5905 DRM_FORMAT_ABGR8888
,
5909 static const uint32_t overlay_formats
[] = {
5910 DRM_FORMAT_XRGB8888
,
5911 DRM_FORMAT_ARGB8888
,
5912 DRM_FORMAT_RGBA8888
,
5913 DRM_FORMAT_XBGR8888
,
5914 DRM_FORMAT_ABGR8888
,
5918 static const u32 cursor_formats
[] = {
5922 static int get_plane_formats(const struct drm_plane
*plane
,
5923 const struct dc_plane_cap
*plane_cap
,
5924 uint32_t *formats
, int max_formats
)
5926 int i
, num_formats
= 0;
5929 * TODO: Query support for each group of formats directly from
5930 * DC plane caps. This will require adding more formats to the
5934 switch (plane
->type
) {
5935 case DRM_PLANE_TYPE_PRIMARY
:
5936 for (i
= 0; i
< ARRAY_SIZE(rgb_formats
); ++i
) {
5937 if (num_formats
>= max_formats
)
5940 formats
[num_formats
++] = rgb_formats
[i
];
5943 if (plane_cap
&& plane_cap
->pixel_format_support
.nv12
)
5944 formats
[num_formats
++] = DRM_FORMAT_NV12
;
5945 if (plane_cap
&& plane_cap
->pixel_format_support
.p010
)
5946 formats
[num_formats
++] = DRM_FORMAT_P010
;
5947 if (plane_cap
&& plane_cap
->pixel_format_support
.fp16
) {
5948 formats
[num_formats
++] = DRM_FORMAT_XRGB16161616F
;
5949 formats
[num_formats
++] = DRM_FORMAT_ARGB16161616F
;
5950 formats
[num_formats
++] = DRM_FORMAT_XBGR16161616F
;
5951 formats
[num_formats
++] = DRM_FORMAT_ABGR16161616F
;
5955 case DRM_PLANE_TYPE_OVERLAY
:
5956 for (i
= 0; i
< ARRAY_SIZE(overlay_formats
); ++i
) {
5957 if (num_formats
>= max_formats
)
5960 formats
[num_formats
++] = overlay_formats
[i
];
5964 case DRM_PLANE_TYPE_CURSOR
:
5965 for (i
= 0; i
< ARRAY_SIZE(cursor_formats
); ++i
) {
5966 if (num_formats
>= max_formats
)
5969 formats
[num_formats
++] = cursor_formats
[i
];
5977 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
5978 struct drm_plane
*plane
,
5979 unsigned long possible_crtcs
,
5980 const struct dc_plane_cap
*plane_cap
)
5982 uint32_t formats
[32];
5985 unsigned int supported_rotations
;
5987 num_formats
= get_plane_formats(plane
, plane_cap
, formats
,
5988 ARRAY_SIZE(formats
));
5990 res
= drm_universal_plane_init(dm
->adev
->ddev
, plane
, possible_crtcs
,
5991 &dm_plane_funcs
, formats
, num_formats
,
5992 NULL
, plane
->type
, NULL
);
5996 if (plane
->type
== DRM_PLANE_TYPE_OVERLAY
&&
5997 plane_cap
&& plane_cap
->per_pixel_alpha
) {
5998 unsigned int blend_caps
= BIT(DRM_MODE_BLEND_PIXEL_NONE
) |
5999 BIT(DRM_MODE_BLEND_PREMULTI
);
6001 drm_plane_create_alpha_property(plane
);
6002 drm_plane_create_blend_mode_property(plane
, blend_caps
);
6005 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
&&
6007 (plane_cap
->pixel_format_support
.nv12
||
6008 plane_cap
->pixel_format_support
.p010
)) {
6009 /* This only affects YUV formats. */
6010 drm_plane_create_color_properties(
6012 BIT(DRM_COLOR_YCBCR_BT601
) |
6013 BIT(DRM_COLOR_YCBCR_BT709
) |
6014 BIT(DRM_COLOR_YCBCR_BT2020
),
6015 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE
) |
6016 BIT(DRM_COLOR_YCBCR_FULL_RANGE
),
6017 DRM_COLOR_YCBCR_BT709
, DRM_COLOR_YCBCR_LIMITED_RANGE
);
6020 supported_rotations
=
6021 DRM_MODE_ROTATE_0
| DRM_MODE_ROTATE_90
|
6022 DRM_MODE_ROTATE_180
| DRM_MODE_ROTATE_270
;
6024 if (dm
->adev
->asic_type
>= CHIP_BONAIRE
)
6025 drm_plane_create_rotation_property(plane
, DRM_MODE_ROTATE_0
,
6026 supported_rotations
);
6028 drm_plane_helper_add(plane
, &dm_plane_helper_funcs
);
6030 /* Create (reset) the plane state */
6031 if (plane
->funcs
->reset
)
6032 plane
->funcs
->reset(plane
);
6037 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
6038 struct drm_plane
*plane
,
6039 uint32_t crtc_index
)
6041 struct amdgpu_crtc
*acrtc
= NULL
;
6042 struct drm_plane
*cursor_plane
;
6046 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
6050 cursor_plane
->type
= DRM_PLANE_TYPE_CURSOR
;
6051 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0, NULL
);
6053 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
6057 res
= drm_crtc_init_with_planes(
6062 &amdgpu_dm_crtc_funcs
, NULL
);
6067 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
6069 /* Create (reset) the plane state */
6070 if (acrtc
->base
.funcs
->reset
)
6071 acrtc
->base
.funcs
->reset(&acrtc
->base
);
6073 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
6074 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
6076 acrtc
->crtc_id
= crtc_index
;
6077 acrtc
->base
.enabled
= false;
6078 acrtc
->otg_inst
= -1;
6080 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
6081 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
6082 true, MAX_COLOR_LUT_ENTRIES
);
6083 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
6089 kfree(cursor_plane
);
6094 static int to_drm_connector_type(enum signal_type st
)
6097 case SIGNAL_TYPE_HDMI_TYPE_A
:
6098 return DRM_MODE_CONNECTOR_HDMIA
;
6099 case SIGNAL_TYPE_EDP
:
6100 return DRM_MODE_CONNECTOR_eDP
;
6101 case SIGNAL_TYPE_LVDS
:
6102 return DRM_MODE_CONNECTOR_LVDS
;
6103 case SIGNAL_TYPE_RGB
:
6104 return DRM_MODE_CONNECTOR_VGA
;
6105 case SIGNAL_TYPE_DISPLAY_PORT
:
6106 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
6107 return DRM_MODE_CONNECTOR_DisplayPort
;
6108 case SIGNAL_TYPE_DVI_DUAL_LINK
:
6109 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
6110 return DRM_MODE_CONNECTOR_DVID
;
6111 case SIGNAL_TYPE_VIRTUAL
:
6112 return DRM_MODE_CONNECTOR_VIRTUAL
;
6115 return DRM_MODE_CONNECTOR_Unknown
;
6119 static struct drm_encoder
*amdgpu_dm_connector_to_encoder(struct drm_connector
*connector
)
6121 struct drm_encoder
*encoder
;
6123 /* There is only one encoder per connector */
6124 drm_connector_for_each_possible_encoder(connector
, encoder
)
6130 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
6132 struct drm_encoder
*encoder
;
6133 struct amdgpu_encoder
*amdgpu_encoder
;
6135 encoder
= amdgpu_dm_connector_to_encoder(connector
);
6137 if (encoder
== NULL
)
6140 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
6142 amdgpu_encoder
->native_mode
.clock
= 0;
6144 if (!list_empty(&connector
->probed_modes
)) {
6145 struct drm_display_mode
*preferred_mode
= NULL
;
6147 list_for_each_entry(preferred_mode
,
6148 &connector
->probed_modes
,
6150 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
6151 amdgpu_encoder
->native_mode
= *preferred_mode
;
6159 static struct drm_display_mode
*
6160 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
6162 int hdisplay
, int vdisplay
)
6164 struct drm_device
*dev
= encoder
->dev
;
6165 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
6166 struct drm_display_mode
*mode
= NULL
;
6167 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
6169 mode
= drm_mode_duplicate(dev
, native_mode
);
6174 mode
->hdisplay
= hdisplay
;
6175 mode
->vdisplay
= vdisplay
;
6176 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
6177 strscpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
6183 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
6184 struct drm_connector
*connector
)
6186 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
6187 struct drm_display_mode
*mode
= NULL
;
6188 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
6189 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
6190 to_amdgpu_dm_connector(connector
);
6194 char name
[DRM_DISPLAY_MODE_LEN
];
6197 } common_modes
[] = {
6198 { "640x480", 640, 480},
6199 { "800x600", 800, 600},
6200 { "1024x768", 1024, 768},
6201 { "1280x720", 1280, 720},
6202 { "1280x800", 1280, 800},
6203 {"1280x1024", 1280, 1024},
6204 { "1440x900", 1440, 900},
6205 {"1680x1050", 1680, 1050},
6206 {"1600x1200", 1600, 1200},
6207 {"1920x1080", 1920, 1080},
6208 {"1920x1200", 1920, 1200}
6211 n
= ARRAY_SIZE(common_modes
);
6213 for (i
= 0; i
< n
; i
++) {
6214 struct drm_display_mode
*curmode
= NULL
;
6215 bool mode_existed
= false;
6217 if (common_modes
[i
].w
> native_mode
->hdisplay
||
6218 common_modes
[i
].h
> native_mode
->vdisplay
||
6219 (common_modes
[i
].w
== native_mode
->hdisplay
&&
6220 common_modes
[i
].h
== native_mode
->vdisplay
))
6223 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
6224 if (common_modes
[i
].w
== curmode
->hdisplay
&&
6225 common_modes
[i
].h
== curmode
->vdisplay
) {
6226 mode_existed
= true;
6234 mode
= amdgpu_dm_create_common_mode(encoder
,
6235 common_modes
[i
].name
, common_modes
[i
].w
,
6237 drm_mode_probed_add(connector
, mode
);
6238 amdgpu_dm_connector
->num_modes
++;
6242 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
6245 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
6246 to_amdgpu_dm_connector(connector
);
6249 /* empty probed_modes */
6250 INIT_LIST_HEAD(&connector
->probed_modes
);
6251 amdgpu_dm_connector
->num_modes
=
6252 drm_add_edid_modes(connector
, edid
);
6254 /* sorting the probed modes before calling function
6255 * amdgpu_dm_get_native_mode() since EDID can have
6256 * more than one preferred mode. The modes that are
6257 * later in the probed mode list could be of higher
6258 * and preferred resolution. For example, 3840x2160
6259 * resolution in base EDID preferred timing and 4096x2160
6260 * preferred resolution in DID extension block later.
6262 drm_mode_sort(&connector
->probed_modes
);
6263 amdgpu_dm_get_native_mode(connector
);
6265 amdgpu_dm_connector
->num_modes
= 0;
6269 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
6271 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
6272 to_amdgpu_dm_connector(connector
);
6273 struct drm_encoder
*encoder
;
6274 struct edid
*edid
= amdgpu_dm_connector
->edid
;
6276 encoder
= amdgpu_dm_connector_to_encoder(connector
);
6278 if (!edid
|| !drm_edid_is_valid(edid
)) {
6279 amdgpu_dm_connector
->num_modes
=
6280 drm_add_modes_noedid(connector
, 640, 480);
6282 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
6283 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
6285 amdgpu_dm_fbc_init(connector
);
6287 return amdgpu_dm_connector
->num_modes
;
6290 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
6291 struct amdgpu_dm_connector
*aconnector
,
6293 struct dc_link
*link
,
6296 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
6299 * Some of the properties below require access to state, like bpc.
6300 * Allocate some default initial connector state with our reset helper.
6302 if (aconnector
->base
.funcs
->reset
)
6303 aconnector
->base
.funcs
->reset(&aconnector
->base
);
6305 aconnector
->connector_id
= link_index
;
6306 aconnector
->dc_link
= link
;
6307 aconnector
->base
.interlace_allowed
= false;
6308 aconnector
->base
.doublescan_allowed
= false;
6309 aconnector
->base
.stereo_allowed
= false;
6310 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
6311 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
6312 aconnector
->audio_inst
= -1;
6313 mutex_init(&aconnector
->hpd_lock
);
6316 * configure support HPD hot plug connector_>polled default value is 0
6317 * which means HPD hot plug not supported
6319 switch (connector_type
) {
6320 case DRM_MODE_CONNECTOR_HDMIA
:
6321 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
6322 aconnector
->base
.ycbcr_420_allowed
=
6323 link
->link_enc
->features
.hdmi_ycbcr420_supported
? true : false;
6325 case DRM_MODE_CONNECTOR_DisplayPort
:
6326 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
6327 aconnector
->base
.ycbcr_420_allowed
=
6328 link
->link_enc
->features
.dp_ycbcr420_supported
? true : false;
6330 case DRM_MODE_CONNECTOR_DVID
:
6331 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
6337 drm_object_attach_property(&aconnector
->base
.base
,
6338 dm
->ddev
->mode_config
.scaling_mode_property
,
6339 DRM_MODE_SCALE_NONE
);
6341 drm_object_attach_property(&aconnector
->base
.base
,
6342 adev
->mode_info
.underscan_property
,
6344 drm_object_attach_property(&aconnector
->base
.base
,
6345 adev
->mode_info
.underscan_hborder_property
,
6347 drm_object_attach_property(&aconnector
->base
.base
,
6348 adev
->mode_info
.underscan_vborder_property
,
6351 if (!aconnector
->mst_port
)
6352 drm_connector_attach_max_bpc_property(&aconnector
->base
, 8, 16);
6354 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6355 aconnector
->base
.state
->max_bpc
= (connector_type
== DRM_MODE_CONNECTOR_eDP
) ? 16 : 8;
6356 aconnector
->base
.state
->max_requested_bpc
= aconnector
->base
.state
->max_bpc
;
6358 if (connector_type
== DRM_MODE_CONNECTOR_eDP
&&
6359 (dc_is_dmcu_initialized(adev
->dm
.dc
) || adev
->dm
.dc
->ctx
->dmub_srv
)) {
6360 drm_object_attach_property(&aconnector
->base
.base
,
6361 adev
->mode_info
.abm_level_property
, 0);
6364 if (connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
6365 connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
6366 connector_type
== DRM_MODE_CONNECTOR_eDP
) {
6367 drm_object_attach_property(
6368 &aconnector
->base
.base
,
6369 dm
->ddev
->mode_config
.hdr_output_metadata_property
, 0);
6371 if (!aconnector
->mst_port
)
6372 drm_connector_attach_vrr_capable_property(&aconnector
->base
);
6374 #ifdef CONFIG_DRM_AMD_DC_HDCP
6375 if (adev
->dm
.hdcp_workqueue
)
6376 drm_connector_attach_content_protection_property(&aconnector
->base
, true);
6381 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
6382 struct i2c_msg
*msgs
, int num
)
6384 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
6385 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
6386 struct i2c_command cmd
;
6390 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
6395 cmd
.number_of_payloads
= num
;
6396 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
6399 for (i
= 0; i
< num
; i
++) {
6400 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
6401 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
6402 cmd
.payloads
[i
].length
= msgs
[i
].len
;
6403 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
6407 ddc_service
->ctx
->dc
,
6408 ddc_service
->ddc_pin
->hw_info
.ddc_channel
,
6412 kfree(cmd
.payloads
);
6416 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
6418 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
6421 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
6422 .master_xfer
= amdgpu_dm_i2c_xfer
,
6423 .functionality
= amdgpu_dm_i2c_func
,
6426 static struct amdgpu_i2c_adapter
*
6427 create_i2c(struct ddc_service
*ddc_service
,
6431 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
6432 struct amdgpu_i2c_adapter
*i2c
;
6434 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
6437 i2c
->base
.owner
= THIS_MODULE
;
6438 i2c
->base
.class = I2C_CLASS_DDC
;
6439 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
6440 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
6441 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
6442 i2c_set_adapdata(&i2c
->base
, i2c
);
6443 i2c
->ddc_service
= ddc_service
;
6444 i2c
->ddc_service
->ddc_pin
->hw_info
.ddc_channel
= link_index
;
6451 * Note: this function assumes that dc_link_detect() was called for the
6452 * dc_link which will be represented by this aconnector.
6454 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
6455 struct amdgpu_dm_connector
*aconnector
,
6456 uint32_t link_index
,
6457 struct amdgpu_encoder
*aencoder
)
6461 struct dc
*dc
= dm
->dc
;
6462 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
6463 struct amdgpu_i2c_adapter
*i2c
;
6465 link
->priv
= aconnector
;
6467 DRM_DEBUG_DRIVER("%s()\n", __func__
);
6469 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
6471 DRM_ERROR("Failed to create i2c adapter data\n");
6475 aconnector
->i2c
= i2c
;
6476 res
= i2c_add_adapter(&i2c
->base
);
6479 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
6483 connector_type
= to_drm_connector_type(link
->connector_signal
);
6485 res
= drm_connector_init_with_ddc(
6488 &amdgpu_dm_connector_funcs
,
6493 DRM_ERROR("connector_init failed\n");
6494 aconnector
->connector_id
= -1;
6498 drm_connector_helper_add(
6500 &amdgpu_dm_connector_helper_funcs
);
6502 amdgpu_dm_connector_init_helper(
6509 drm_connector_attach_encoder(
6510 &aconnector
->base
, &aencoder
->base
);
6512 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
6513 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
6514 amdgpu_dm_initialize_dp_connector(dm
, aconnector
, link
->link_index
);
6519 aconnector
->i2c
= NULL
;
6524 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
6526 switch (adev
->mode_info
.num_crtc
) {
6543 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
6544 struct amdgpu_encoder
*aencoder
,
6545 uint32_t link_index
)
6547 struct amdgpu_device
*adev
= dev
->dev_private
;
6549 int res
= drm_encoder_init(dev
,
6551 &amdgpu_dm_encoder_funcs
,
6552 DRM_MODE_ENCODER_TMDS
,
6555 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
6558 aencoder
->encoder_id
= link_index
;
6560 aencoder
->encoder_id
= -1;
6562 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
6567 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
6568 struct amdgpu_crtc
*acrtc
,
6572 * We have no guarantee that the frontend index maps to the same
6573 * backend index - some even map to more than one.
6575 * TODO: Use a different interrupt or check DC itself for the mapping.
6578 amdgpu_display_crtc_idx_to_irq_type(
6583 drm_crtc_vblank_on(&acrtc
->base
);
6586 &adev
->pageflip_irq
,
6592 &adev
->pageflip_irq
,
6594 drm_crtc_vblank_off(&acrtc
->base
);
6598 static void dm_update_pflip_irq_state(struct amdgpu_device
*adev
,
6599 struct amdgpu_crtc
*acrtc
)
6602 amdgpu_display_crtc_idx_to_irq_type(adev
, acrtc
->crtc_id
);
6605 * This reads the current state for the IRQ and force reapplies
6606 * the setting to hardware.
6608 amdgpu_irq_update(adev
, &adev
->pageflip_irq
, irq_type
);
6612 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
6613 const struct dm_connector_state
*old_dm_state
)
6615 if (dm_state
->scaling
!= old_dm_state
->scaling
)
6617 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
6618 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
6620 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
6621 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
6623 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
6624 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
6629 #ifdef CONFIG_DRM_AMD_DC_HDCP
6630 static bool is_content_protection_different(struct drm_connector_state
*state
,
6631 const struct drm_connector_state
*old_state
,
6632 const struct drm_connector
*connector
, struct hdcp_workqueue
*hdcp_w
)
6634 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
6636 if (old_state
->hdcp_content_type
!= state
->hdcp_content_type
&&
6637 state
->content_protection
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
6638 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6642 /* CP is being re enabled, ignore this */
6643 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
&&
6644 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
) {
6645 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
6649 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6650 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
&&
6651 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
6652 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6654 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6655 * hot-plug, headless s3, dpms
6657 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&& connector
->dpms
== DRM_MODE_DPMS_ON
&&
6658 aconnector
->dc_sink
!= NULL
)
6661 if (old_state
->content_protection
== state
->content_protection
)
6664 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
6671 static void remove_stream(struct amdgpu_device
*adev
,
6672 struct amdgpu_crtc
*acrtc
,
6673 struct dc_stream_state
*stream
)
6675 /* this is the update mode case */
6677 acrtc
->otg_inst
= -1;
6678 acrtc
->enabled
= false;
6681 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
6682 struct dc_cursor_position
*position
)
6684 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6686 int xorigin
= 0, yorigin
= 0;
6688 position
->enable
= false;
6692 if (!crtc
|| !plane
->state
->fb
)
6695 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
6696 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
6697 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6699 plane
->state
->crtc_w
,
6700 plane
->state
->crtc_h
);
6704 x
= plane
->state
->crtc_x
;
6705 y
= plane
->state
->crtc_y
;
6707 if (x
<= -amdgpu_crtc
->max_cursor_width
||
6708 y
<= -amdgpu_crtc
->max_cursor_height
)
6712 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
6716 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
6719 position
->enable
= true;
6720 position
->translate_by_source
= true;
6723 position
->x_hotspot
= xorigin
;
6724 position
->y_hotspot
= yorigin
;
6729 static void handle_cursor_update(struct drm_plane
*plane
,
6730 struct drm_plane_state
*old_plane_state
)
6732 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
6733 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
6734 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
6735 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
6736 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6737 uint64_t address
= afb
? afb
->address
: 0;
6738 struct dc_cursor_position position
;
6739 struct dc_cursor_attributes attributes
;
6742 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
6745 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6747 amdgpu_crtc
->crtc_id
,
6748 plane
->state
->crtc_w
,
6749 plane
->state
->crtc_h
);
6751 ret
= get_cursor_position(plane
, crtc
, &position
);
6755 if (!position
.enable
) {
6756 /* turn off cursor */
6757 if (crtc_state
&& crtc_state
->stream
) {
6758 mutex_lock(&adev
->dm
.dc_lock
);
6759 dc_stream_set_cursor_position(crtc_state
->stream
,
6761 mutex_unlock(&adev
->dm
.dc_lock
);
6766 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
6767 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
6769 memset(&attributes
, 0, sizeof(attributes
));
6770 attributes
.address
.high_part
= upper_32_bits(address
);
6771 attributes
.address
.low_part
= lower_32_bits(address
);
6772 attributes
.width
= plane
->state
->crtc_w
;
6773 attributes
.height
= plane
->state
->crtc_h
;
6774 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
6775 attributes
.rotation_angle
= 0;
6776 attributes
.attribute_flags
.value
= 0;
6778 attributes
.pitch
= attributes
.width
;
6780 if (crtc_state
->stream
) {
6781 mutex_lock(&adev
->dm
.dc_lock
);
6782 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
6784 DRM_ERROR("DC failed to set cursor attributes\n");
6786 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
6788 DRM_ERROR("DC failed to set cursor position\n");
6789 mutex_unlock(&adev
->dm
.dc_lock
);
6793 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
6796 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
6797 WARN_ON(acrtc
->event
);
6799 acrtc
->event
= acrtc
->base
.state
->event
;
6801 /* Set the flip status */
6802 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
6804 /* Mark this event as consumed */
6805 acrtc
->base
.state
->event
= NULL
;
6807 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6811 static void update_freesync_state_on_stream(
6812 struct amdgpu_display_manager
*dm
,
6813 struct dm_crtc_state
*new_crtc_state
,
6814 struct dc_stream_state
*new_stream
,
6815 struct dc_plane_state
*surface
,
6816 u32 flip_timestamp_in_us
)
6818 struct mod_vrr_params vrr_params
;
6819 struct dc_info_packet vrr_infopacket
= {0};
6820 struct amdgpu_device
*adev
= dm
->adev
;
6821 unsigned long flags
;
6827 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6828 * For now it's sufficient to just guard against these conditions.
6831 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6834 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6835 vrr_params
= new_crtc_state
->vrr_params
;
6838 mod_freesync_handle_preflip(
6839 dm
->freesync_module
,
6842 flip_timestamp_in_us
,
6845 if (adev
->family
< AMDGPU_FAMILY_AI
&&
6846 amdgpu_dm_vrr_active(new_crtc_state
)) {
6847 mod_freesync_handle_v_update(dm
->freesync_module
,
6848 new_stream
, &vrr_params
);
6850 /* Need to call this before the frame ends. */
6851 dc_stream_adjust_vmin_vmax(dm
->dc
,
6852 new_crtc_state
->stream
,
6853 &vrr_params
.adjust
);
6857 mod_freesync_build_vrr_infopacket(
6858 dm
->freesync_module
,
6862 TRANSFER_FUNC_UNKNOWN
,
6865 new_crtc_state
->freesync_timing_changed
|=
6866 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6868 sizeof(vrr_params
.adjust
)) != 0);
6870 new_crtc_state
->freesync_vrr_info_changed
|=
6871 (memcmp(&new_crtc_state
->vrr_infopacket
,
6873 sizeof(vrr_infopacket
)) != 0);
6875 new_crtc_state
->vrr_params
= vrr_params
;
6876 new_crtc_state
->vrr_infopacket
= vrr_infopacket
;
6878 new_stream
->adjust
= new_crtc_state
->vrr_params
.adjust
;
6879 new_stream
->vrr_infopacket
= vrr_infopacket
;
6881 if (new_crtc_state
->freesync_vrr_info_changed
)
6882 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6883 new_crtc_state
->base
.crtc
->base
.id
,
6884 (int)new_crtc_state
->base
.vrr_enabled
,
6885 (int)vrr_params
.state
);
6887 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6890 static void pre_update_freesync_state_on_stream(
6891 struct amdgpu_display_manager
*dm
,
6892 struct dm_crtc_state
*new_crtc_state
)
6894 struct dc_stream_state
*new_stream
= new_crtc_state
->stream
;
6895 struct mod_vrr_params vrr_params
;
6896 struct mod_freesync_config config
= new_crtc_state
->freesync_config
;
6897 struct amdgpu_device
*adev
= dm
->adev
;
6898 unsigned long flags
;
6904 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6905 * For now it's sufficient to just guard against these conditions.
6907 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6910 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6911 vrr_params
= new_crtc_state
->vrr_params
;
6913 if (new_crtc_state
->vrr_supported
&&
6914 config
.min_refresh_in_uhz
&&
6915 config
.max_refresh_in_uhz
) {
6916 config
.state
= new_crtc_state
->base
.vrr_enabled
?
6917 VRR_STATE_ACTIVE_VARIABLE
:
6920 config
.state
= VRR_STATE_UNSUPPORTED
;
6923 mod_freesync_build_vrr_params(dm
->freesync_module
,
6925 &config
, &vrr_params
);
6927 new_crtc_state
->freesync_timing_changed
|=
6928 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6930 sizeof(vrr_params
.adjust
)) != 0);
6932 new_crtc_state
->vrr_params
= vrr_params
;
6933 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6936 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state
*old_state
,
6937 struct dm_crtc_state
*new_state
)
6939 bool old_vrr_active
= amdgpu_dm_vrr_active(old_state
);
6940 bool new_vrr_active
= amdgpu_dm_vrr_active(new_state
);
6942 if (!old_vrr_active
&& new_vrr_active
) {
6943 /* Transition VRR inactive -> active:
6944 * While VRR is active, we must not disable vblank irq, as a
6945 * reenable after disable would compute bogus vblank/pflip
6946 * timestamps if it likely happened inside display front-porch.
6948 * We also need vupdate irq for the actual core vblank handling
6951 dm_set_vupdate_irq(new_state
->base
.crtc
, true);
6952 drm_crtc_vblank_get(new_state
->base
.crtc
);
6953 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6954 __func__
, new_state
->base
.crtc
->base
.id
);
6955 } else if (old_vrr_active
&& !new_vrr_active
) {
6956 /* Transition VRR active -> inactive:
6957 * Allow vblank irq disable again for fixed refresh rate.
6959 dm_set_vupdate_irq(new_state
->base
.crtc
, false);
6960 drm_crtc_vblank_put(new_state
->base
.crtc
);
6961 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6962 __func__
, new_state
->base
.crtc
->base
.id
);
6966 static void amdgpu_dm_commit_cursors(struct drm_atomic_state
*state
)
6968 struct drm_plane
*plane
;
6969 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6973 * TODO: Make this per-stream so we don't issue redundant updates for
6974 * commits with multiple streams.
6976 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
,
6978 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6979 handle_cursor_update(plane
, old_plane_state
);
6982 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
6983 struct dc_state
*dc_state
,
6984 struct drm_device
*dev
,
6985 struct amdgpu_display_manager
*dm
,
6986 struct drm_crtc
*pcrtc
,
6987 bool wait_for_vblank
)
6990 uint64_t timestamp_ns
;
6991 struct drm_plane
*plane
;
6992 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6993 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
6994 struct drm_crtc_state
*new_pcrtc_state
=
6995 drm_atomic_get_new_crtc_state(state
, pcrtc
);
6996 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
6997 struct dm_crtc_state
*dm_old_crtc_state
=
6998 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
6999 int planes_count
= 0, vpos
, hpos
;
7001 unsigned long flags
;
7002 struct amdgpu_bo
*abo
;
7003 uint64_t tiling_flags
;
7004 bool tmz_surface
= false;
7005 uint32_t target_vblank
, last_flip_vblank
;
7006 bool vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
7007 bool pflip_present
= false;
7009 struct dc_surface_update surface_updates
[MAX_SURFACES
];
7010 struct dc_plane_info plane_infos
[MAX_SURFACES
];
7011 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
7012 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
7013 struct dc_stream_update stream_update
;
7016 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
7019 dm_error("Failed to allocate update bundle\n");
7024 * Disable the cursor first if we're disabling all the planes.
7025 * It'll remain on the screen after the planes are re-enabled
7028 if (acrtc_state
->active_planes
== 0)
7029 amdgpu_dm_commit_cursors(state
);
7031 /* update planes when needed */
7032 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
7033 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
7034 struct drm_crtc_state
*new_crtc_state
;
7035 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
7036 bool plane_needs_flip
;
7037 struct dc_plane_state
*dc_plane
;
7038 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
7040 /* Cursor plane is handled after stream updates */
7041 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
7044 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
7047 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
7048 if (!new_crtc_state
->active
)
7051 dc_plane
= dm_new_plane_state
->dc_state
;
7053 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
7054 if (new_pcrtc_state
->color_mgmt_changed
) {
7055 bundle
->surface_updates
[planes_count
].gamma
= dc_plane
->gamma_correction
;
7056 bundle
->surface_updates
[planes_count
].in_transfer_func
= dc_plane
->in_transfer_func
;
7057 bundle
->surface_updates
[planes_count
].gamut_remap_matrix
= &dc_plane
->gamut_remap_matrix
;
7060 fill_dc_scaling_info(new_plane_state
,
7061 &bundle
->scaling_infos
[planes_count
]);
7063 bundle
->surface_updates
[planes_count
].scaling_info
=
7064 &bundle
->scaling_infos
[planes_count
];
7066 plane_needs_flip
= old_plane_state
->fb
&& new_plane_state
->fb
;
7068 pflip_present
= pflip_present
|| plane_needs_flip
;
7070 if (!plane_needs_flip
) {
7075 abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
7078 * Wait for all fences on this FB. Do limited wait to avoid
7079 * deadlock during GPU reset when this fence will not signal
7080 * but we hold reservation lock for the BO.
7082 r
= dma_resv_wait_timeout_rcu(abo
->tbo
.base
.resv
, true,
7084 msecs_to_jiffies(5000));
7085 if (unlikely(r
<= 0))
7086 DRM_ERROR("Waiting for fences timed out!");
7089 * We cannot reserve buffers here, which means the normal flag
7090 * access functions don't work. Paper over this with READ_ONCE,
7091 * but maybe the flags are invariant enough that not even that
7094 tiling_flags
= READ_ONCE(abo
->tiling_flags
);
7095 tmz_surface
= READ_ONCE(abo
->flags
) & AMDGPU_GEM_CREATE_ENCRYPTED
;
7097 fill_dc_plane_info_and_addr(
7098 dm
->adev
, new_plane_state
, tiling_flags
,
7099 &bundle
->plane_infos
[planes_count
],
7100 &bundle
->flip_addrs
[planes_count
].address
,
7104 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7105 new_plane_state
->plane
->index
,
7106 bundle
->plane_infos
[planes_count
].dcc
.enable
);
7108 bundle
->surface_updates
[planes_count
].plane_info
=
7109 &bundle
->plane_infos
[planes_count
];
7112 * Only allow immediate flips for fast updates that don't
7113 * change FB pitch, DCC state, rotation or mirroing.
7115 bundle
->flip_addrs
[planes_count
].flip_immediate
=
7116 crtc
->state
->async_flip
&&
7117 acrtc_state
->update_type
== UPDATE_TYPE_FAST
;
7119 timestamp_ns
= ktime_get_ns();
7120 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
= div_u64(timestamp_ns
, 1000);
7121 bundle
->surface_updates
[planes_count
].flip_addr
= &bundle
->flip_addrs
[planes_count
];
7122 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
7124 if (!bundle
->surface_updates
[planes_count
].surface
) {
7125 DRM_ERROR("No surface for CRTC: id=%d\n",
7126 acrtc_attach
->crtc_id
);
7130 if (plane
== pcrtc
->primary
)
7131 update_freesync_state_on_stream(
7134 acrtc_state
->stream
,
7136 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
);
7138 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7140 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.high_part
,
7141 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.low_part
);
7147 if (pflip_present
) {
7149 /* Use old throttling in non-vrr fixed refresh rate mode
7150 * to keep flip scheduling based on target vblank counts
7151 * working in a backwards compatible way, e.g., for
7152 * clients using the GLX_OML_sync_control extension or
7153 * DRI3/Present extension with defined target_msc.
7155 last_flip_vblank
= amdgpu_get_vblank_counter_kms(pcrtc
);
7158 /* For variable refresh rate mode only:
7159 * Get vblank of last completed flip to avoid > 1 vrr
7160 * flips per video frame by use of throttling, but allow
7161 * flip programming anywhere in the possibly large
7162 * variable vrr vblank interval for fine-grained flip
7163 * timing control and more opportunity to avoid stutter
7164 * on late submission of flips.
7166 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
7167 last_flip_vblank
= acrtc_attach
->last_flip_vblank
;
7168 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
7171 target_vblank
= last_flip_vblank
+ wait_for_vblank
;
7174 * Wait until we're out of the vertical blank period before the one
7175 * targeted by the flip
7177 while ((acrtc_attach
->enabled
&&
7178 (amdgpu_display_get_crtc_scanoutpos(dm
->ddev
, acrtc_attach
->crtc_id
,
7179 0, &vpos
, &hpos
, NULL
,
7180 NULL
, &pcrtc
->hwmode
)
7181 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
7182 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
7183 (int)(target_vblank
-
7184 amdgpu_get_vblank_counter_kms(pcrtc
)) > 0)) {
7185 usleep_range(1000, 1100);
7189 * Prepare the flip event for the pageflip interrupt to handle.
7191 * This only works in the case where we've already turned on the
7192 * appropriate hardware blocks (eg. HUBP) so in the transition case
7193 * from 0 -> n planes we have to skip a hardware generated event
7194 * and rely on sending it from software.
7196 if (acrtc_attach
->base
.state
->event
&&
7197 acrtc_state
->active_planes
> 0) {
7198 drm_crtc_vblank_get(pcrtc
);
7200 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
7202 WARN_ON(acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
);
7203 prepare_flip_isr(acrtc_attach
);
7205 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
7208 if (acrtc_state
->stream
) {
7209 if (acrtc_state
->freesync_vrr_info_changed
)
7210 bundle
->stream_update
.vrr_infopacket
=
7211 &acrtc_state
->stream
->vrr_infopacket
;
7215 /* Update the planes if changed or disable if we don't have any. */
7216 if ((planes_count
|| acrtc_state
->active_planes
== 0) &&
7217 acrtc_state
->stream
) {
7218 bundle
->stream_update
.stream
= acrtc_state
->stream
;
7219 if (new_pcrtc_state
->mode_changed
) {
7220 bundle
->stream_update
.src
= acrtc_state
->stream
->src
;
7221 bundle
->stream_update
.dst
= acrtc_state
->stream
->dst
;
7224 if (new_pcrtc_state
->color_mgmt_changed
) {
7226 * TODO: This isn't fully correct since we've actually
7227 * already modified the stream in place.
7229 bundle
->stream_update
.gamut_remap
=
7230 &acrtc_state
->stream
->gamut_remap_matrix
;
7231 bundle
->stream_update
.output_csc_transform
=
7232 &acrtc_state
->stream
->csc_color_matrix
;
7233 bundle
->stream_update
.out_transfer_func
=
7234 acrtc_state
->stream
->out_transfer_func
;
7237 acrtc_state
->stream
->abm_level
= acrtc_state
->abm_level
;
7238 if (acrtc_state
->abm_level
!= dm_old_crtc_state
->abm_level
)
7239 bundle
->stream_update
.abm_level
= &acrtc_state
->abm_level
;
7242 * If FreeSync state on the stream has changed then we need to
7243 * re-adjust the min/max bounds now that DC doesn't handle this
7244 * as part of commit.
7246 if (amdgpu_dm_vrr_active(dm_old_crtc_state
) !=
7247 amdgpu_dm_vrr_active(acrtc_state
)) {
7248 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
7249 dc_stream_adjust_vmin_vmax(
7250 dm
->dc
, acrtc_state
->stream
,
7251 &acrtc_state
->vrr_params
.adjust
);
7252 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
7254 mutex_lock(&dm
->dc_lock
);
7255 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
7256 acrtc_state
->stream
->link
->psr_settings
.psr_allow_active
)
7257 amdgpu_dm_psr_disable(acrtc_state
->stream
);
7259 dc_commit_updates_for_stream(dm
->dc
,
7260 bundle
->surface_updates
,
7262 acrtc_state
->stream
,
7263 &bundle
->stream_update
,
7267 * Enable or disable the interrupts on the backend.
7269 * Most pipes are put into power gating when unused.
7271 * When power gating is enabled on a pipe we lose the
7272 * interrupt enablement state when power gating is disabled.
7274 * So we need to update the IRQ control state in hardware
7275 * whenever the pipe turns on (since it could be previously
7276 * power gated) or off (since some pipes can't be power gated
7279 if (dm_old_crtc_state
->active_planes
!= acrtc_state
->active_planes
)
7280 dm_update_pflip_irq_state(
7281 (struct amdgpu_device
*)dev
->dev_private
,
7284 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
7285 acrtc_state
->stream
->link
->psr_settings
.psr_version
!= DC_PSR_VERSION_UNSUPPORTED
&&
7286 !acrtc_state
->stream
->link
->psr_settings
.psr_feature_enabled
)
7287 amdgpu_dm_link_setup_psr(acrtc_state
->stream
);
7288 else if ((acrtc_state
->update_type
== UPDATE_TYPE_FAST
) &&
7289 acrtc_state
->stream
->link
->psr_settings
.psr_feature_enabled
&&
7290 !acrtc_state
->stream
->link
->psr_settings
.psr_allow_active
) {
7291 amdgpu_dm_psr_enable(acrtc_state
->stream
);
7294 mutex_unlock(&dm
->dc_lock
);
7298 * Update cursor state *after* programming all the planes.
7299 * This avoids redundant programming in the case where we're going
7300 * to be disabling a single plane - those pipes are being disabled.
7302 if (acrtc_state
->active_planes
)
7303 amdgpu_dm_commit_cursors(state
);
7309 static void amdgpu_dm_commit_audio(struct drm_device
*dev
,
7310 struct drm_atomic_state
*state
)
7312 struct amdgpu_device
*adev
= dev
->dev_private
;
7313 struct amdgpu_dm_connector
*aconnector
;
7314 struct drm_connector
*connector
;
7315 struct drm_connector_state
*old_con_state
, *new_con_state
;
7316 struct drm_crtc_state
*new_crtc_state
;
7317 struct dm_crtc_state
*new_dm_crtc_state
;
7318 const struct dc_stream_status
*status
;
7321 /* Notify device removals. */
7322 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7323 if (old_con_state
->crtc
!= new_con_state
->crtc
) {
7324 /* CRTC changes require notification. */
7328 if (!new_con_state
->crtc
)
7331 new_crtc_state
= drm_atomic_get_new_crtc_state(
7332 state
, new_con_state
->crtc
);
7334 if (!new_crtc_state
)
7337 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7341 aconnector
= to_amdgpu_dm_connector(connector
);
7343 mutex_lock(&adev
->dm
.audio_lock
);
7344 inst
= aconnector
->audio_inst
;
7345 aconnector
->audio_inst
= -1;
7346 mutex_unlock(&adev
->dm
.audio_lock
);
7348 amdgpu_dm_audio_eld_notify(adev
, inst
);
7351 /* Notify audio device additions. */
7352 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
7353 if (!new_con_state
->crtc
)
7356 new_crtc_state
= drm_atomic_get_new_crtc_state(
7357 state
, new_con_state
->crtc
);
7359 if (!new_crtc_state
)
7362 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7365 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7366 if (!new_dm_crtc_state
->stream
)
7369 status
= dc_stream_get_status(new_dm_crtc_state
->stream
);
7373 aconnector
= to_amdgpu_dm_connector(connector
);
7375 mutex_lock(&adev
->dm
.audio_lock
);
7376 inst
= status
->audio_inst
;
7377 aconnector
->audio_inst
= inst
;
7378 mutex_unlock(&adev
->dm
.audio_lock
);
7380 amdgpu_dm_audio_eld_notify(adev
, inst
);
7385 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7386 * @crtc_state: the DRM CRTC state
7387 * @stream_state: the DC stream state.
7389 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7390 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7392 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
7393 struct dc_stream_state
*stream_state
)
7395 stream_state
->mode_changed
= drm_atomic_crtc_needs_modeset(crtc_state
);
7398 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
7399 struct drm_atomic_state
*state
,
7402 struct drm_crtc
*crtc
;
7403 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7404 struct amdgpu_device
*adev
= dev
->dev_private
;
7408 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7409 * a modeset, being disabled, or have no active planes.
7411 * It's done in atomic commit rather than commit tail for now since
7412 * some of these interrupt handlers access the current CRTC state and
7413 * potentially the stream pointer itself.
7415 * Since the atomic state is swapped within atomic commit and not within
7416 * commit tail this would leave to new state (that hasn't been committed yet)
7417 * being accesssed from within the handlers.
7419 * TODO: Fix this so we can do this in commit tail and not have to block
7422 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7423 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7425 if (old_crtc_state
->active
&&
7426 (!new_crtc_state
->active
||
7427 drm_atomic_crtc_needs_modeset(new_crtc_state
)))
7428 manage_dm_interrupts(adev
, acrtc
, false);
7431 * Add check here for SoC's that support hardware cursor plane, to
7432 * unset legacy_cursor_update
7435 return drm_atomic_helper_commit(dev
, state
, nonblock
);
7437 /*TODO Handle EINTR, reenable IRQ*/
7441 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7442 * @state: The atomic state to commit
7444 * This will tell DC to commit the constructed DC state from atomic_check,
7445 * programming the hardware. Any failures here implies a hardware failure, since
7446 * atomic check should have filtered anything non-kosher.
7448 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
7450 struct drm_device
*dev
= state
->dev
;
7451 struct amdgpu_device
*adev
= dev
->dev_private
;
7452 struct amdgpu_display_manager
*dm
= &adev
->dm
;
7453 struct dm_atomic_state
*dm_state
;
7454 struct dc_state
*dc_state
= NULL
, *dc_state_temp
= NULL
;
7456 struct drm_crtc
*crtc
;
7457 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7458 unsigned long flags
;
7459 bool wait_for_vblank
= true;
7460 struct drm_connector
*connector
;
7461 struct drm_connector_state
*old_con_state
, *new_con_state
;
7462 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7463 int crtc_disable_count
= 0;
7465 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
7467 dm_state
= dm_atomic_get_new_state(state
);
7468 if (dm_state
&& dm_state
->context
) {
7469 dc_state
= dm_state
->context
;
7471 /* No state changes, retain current state. */
7472 dc_state_temp
= dc_create_state(dm
->dc
);
7473 ASSERT(dc_state_temp
);
7474 dc_state
= dc_state_temp
;
7475 dc_resource_state_copy_construct_current(dm
->dc
, dc_state
);
7478 /* update changed items */
7479 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7480 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7482 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7483 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7486 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7487 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7488 "connectors_changed:%d\n",
7490 new_crtc_state
->enable
,
7491 new_crtc_state
->active
,
7492 new_crtc_state
->planes_changed
,
7493 new_crtc_state
->mode_changed
,
7494 new_crtc_state
->active_changed
,
7495 new_crtc_state
->connectors_changed
);
7497 /* Copy all transient state flags into dc state */
7498 if (dm_new_crtc_state
->stream
) {
7499 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
7500 dm_new_crtc_state
->stream
);
7503 /* handles headless hotplug case, updating new_state and
7504 * aconnector as needed
7507 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
7509 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
7511 if (!dm_new_crtc_state
->stream
) {
7513 * this could happen because of issues with
7514 * userspace notifications delivery.
7515 * In this case userspace tries to set mode on
7516 * display which is disconnected in fact.
7517 * dc_sink is NULL in this case on aconnector.
7518 * We expect reset mode will come soon.
7520 * This can also happen when unplug is done
7521 * during resume sequence ended
7523 * In this case, we want to pretend we still
7524 * have a sink to keep the pipe running so that
7525 * hw state is consistent with the sw state
7527 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7528 __func__
, acrtc
->base
.base
.id
);
7532 if (dm_old_crtc_state
->stream
)
7533 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
7535 pm_runtime_get_noresume(dev
->dev
);
7537 acrtc
->enabled
= true;
7538 acrtc
->hw_mode
= new_crtc_state
->mode
;
7539 crtc
->hwmode
= new_crtc_state
->mode
;
7540 } else if (modereset_required(new_crtc_state
)) {
7541 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
7542 /* i.e. reset mode */
7543 if (dm_old_crtc_state
->stream
) {
7544 if (dm_old_crtc_state
->stream
->link
->psr_settings
.psr_allow_active
)
7545 amdgpu_dm_psr_disable(dm_old_crtc_state
->stream
);
7547 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
7550 } /* for_each_crtc_in_state() */
7553 dm_enable_per_frame_crtc_master_sync(dc_state
);
7554 mutex_lock(&dm
->dc_lock
);
7555 WARN_ON(!dc_commit_state(dm
->dc
, dc_state
));
7556 mutex_unlock(&dm
->dc_lock
);
7559 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7560 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7562 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7564 if (dm_new_crtc_state
->stream
!= NULL
) {
7565 const struct dc_stream_status
*status
=
7566 dc_stream_get_status(dm_new_crtc_state
->stream
);
7569 status
= dc_stream_get_status_from_state(dc_state
,
7570 dm_new_crtc_state
->stream
);
7573 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
7575 acrtc
->otg_inst
= status
->primary_otg_inst
;
7578 #ifdef CONFIG_DRM_AMD_DC_HDCP
7579 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7580 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7581 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7582 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7584 new_crtc_state
= NULL
;
7587 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
7589 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7591 if (dm_new_crtc_state
&& dm_new_crtc_state
->stream
== NULL
&&
7592 connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
) {
7593 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
7594 new_con_state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
7598 if (is_content_protection_different(new_con_state
, old_con_state
, connector
, adev
->dm
.hdcp_workqueue
))
7599 hdcp_update_display(
7600 adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
, aconnector
,
7601 new_con_state
->hdcp_content_type
,
7602 new_con_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
? true
7607 /* Handle connector state changes */
7608 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7609 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7610 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
7611 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7612 struct dc_surface_update dummy_updates
[MAX_SURFACES
];
7613 struct dc_stream_update stream_update
;
7614 struct dc_info_packet hdr_packet
;
7615 struct dc_stream_status
*status
= NULL
;
7616 bool abm_changed
, hdr_changed
, scaling_changed
;
7618 memset(&dummy_updates
, 0, sizeof(dummy_updates
));
7619 memset(&stream_update
, 0, sizeof(stream_update
));
7622 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
7623 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
7626 /* Skip any modesets/resets */
7627 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
7630 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7631 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7633 scaling_changed
= is_scaling_state_different(dm_new_con_state
,
7636 abm_changed
= dm_new_crtc_state
->abm_level
!=
7637 dm_old_crtc_state
->abm_level
;
7640 is_hdr_metadata_different(old_con_state
, new_con_state
);
7642 if (!scaling_changed
&& !abm_changed
&& !hdr_changed
)
7645 stream_update
.stream
= dm_new_crtc_state
->stream
;
7646 if (scaling_changed
) {
7647 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
7648 dm_new_con_state
, dm_new_crtc_state
->stream
);
7650 stream_update
.src
= dm_new_crtc_state
->stream
->src
;
7651 stream_update
.dst
= dm_new_crtc_state
->stream
->dst
;
7655 dm_new_crtc_state
->stream
->abm_level
= dm_new_crtc_state
->abm_level
;
7657 stream_update
.abm_level
= &dm_new_crtc_state
->abm_level
;
7661 fill_hdr_info_packet(new_con_state
, &hdr_packet
);
7662 stream_update
.hdr_static_metadata
= &hdr_packet
;
7665 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
7667 WARN_ON(!status
->plane_count
);
7670 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7671 * Here we create an empty update on each plane.
7672 * To fix this, DC should permit updating only stream properties.
7674 for (j
= 0; j
< status
->plane_count
; j
++)
7675 dummy_updates
[j
].surface
= status
->plane_states
[0];
7678 mutex_lock(&dm
->dc_lock
);
7679 dc_commit_updates_for_stream(dm
->dc
,
7681 status
->plane_count
,
7682 dm_new_crtc_state
->stream
,
7685 mutex_unlock(&dm
->dc_lock
);
7688 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7689 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
7690 new_crtc_state
, i
) {
7691 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
7692 crtc_disable_count
++;
7694 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7695 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7697 /* Update freesync active state. */
7698 pre_update_freesync_state_on_stream(dm
, dm_new_crtc_state
);
7700 /* Handle vrr on->off / off->on transitions */
7701 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state
,
7706 * Enable interrupts for CRTCs that are newly enabled or went through
7707 * a modeset. It was intentionally deferred until after the front end
7708 * state was modified to wait until the OTG was on and so the IRQ
7709 * handlers didn't access stale or invalid state.
7711 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7712 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7714 if (new_crtc_state
->active
&&
7715 (!old_crtc_state
->active
||
7716 drm_atomic_crtc_needs_modeset(new_crtc_state
))) {
7717 manage_dm_interrupts(adev
, acrtc
, true);
7718 #ifdef CONFIG_DEBUG_FS
7720 * Frontend may have changed so reapply the CRC capture
7721 * settings for the stream.
7723 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7725 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state
->crc_src
)) {
7726 amdgpu_dm_crtc_configure_crc_source(
7727 crtc
, dm_new_crtc_state
,
7728 dm_new_crtc_state
->crc_src
);
7734 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
)
7735 if (new_crtc_state
->async_flip
)
7736 wait_for_vblank
= false;
7738 /* update planes when needed per crtc*/
7739 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
7740 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7742 if (dm_new_crtc_state
->stream
)
7743 amdgpu_dm_commit_planes(state
, dc_state
, dev
,
7744 dm
, crtc
, wait_for_vblank
);
7747 /* Update audio instances for each connector. */
7748 amdgpu_dm_commit_audio(dev
, state
);
7751 * send vblank event on all events not handled in flip and
7752 * mark consumed event for drm_atomic_helper_commit_hw_done
7754 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
7755 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7757 if (new_crtc_state
->event
)
7758 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
7760 new_crtc_state
->event
= NULL
;
7762 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
7764 /* Signal HW programming completion */
7765 drm_atomic_helper_commit_hw_done(state
);
7767 if (wait_for_vblank
)
7768 drm_atomic_helper_wait_for_flip_done(dev
, state
);
7770 drm_atomic_helper_cleanup_planes(dev
, state
);
7773 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7774 * so we can put the GPU into runtime suspend if we're not driving any
7777 for (i
= 0; i
< crtc_disable_count
; i
++)
7778 pm_runtime_put_autosuspend(dev
->dev
);
7779 pm_runtime_mark_last_busy(dev
->dev
);
7782 dc_release_state(dc_state_temp
);
7786 static int dm_force_atomic_commit(struct drm_connector
*connector
)
7789 struct drm_device
*ddev
= connector
->dev
;
7790 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
7791 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7792 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
7793 struct drm_connector_state
*conn_state
;
7794 struct drm_crtc_state
*crtc_state
;
7795 struct drm_plane_state
*plane_state
;
7800 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
7802 /* Construct an atomic state to restore previous display setting */
7805 * Attach connectors to drm_atomic_state
7807 conn_state
= drm_atomic_get_connector_state(state
, connector
);
7809 ret
= PTR_ERR_OR_ZERO(conn_state
);
7813 /* Attach crtc to drm_atomic_state*/
7814 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
7816 ret
= PTR_ERR_OR_ZERO(crtc_state
);
7820 /* force a restore */
7821 crtc_state
->mode_changed
= true;
7823 /* Attach plane to drm_atomic_state */
7824 plane_state
= drm_atomic_get_plane_state(state
, plane
);
7826 ret
= PTR_ERR_OR_ZERO(plane_state
);
7831 /* Call commit internally with the state we just constructed */
7832 ret
= drm_atomic_commit(state
);
7837 DRM_ERROR("Restoring old state failed with %i\n", ret
);
7838 drm_atomic_state_put(state
);
7844 * This function handles all cases when set mode does not come upon hotplug.
7845 * This includes when a display is unplugged then plugged back into the
7846 * same port and when running without usermode desktop manager supprot
7848 void dm_restore_drm_connector_state(struct drm_device
*dev
,
7849 struct drm_connector
*connector
)
7851 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7852 struct amdgpu_crtc
*disconnected_acrtc
;
7853 struct dm_crtc_state
*acrtc_state
;
7855 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
7858 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7859 if (!disconnected_acrtc
)
7862 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
7863 if (!acrtc_state
->stream
)
7867 * If the previous sink is not released and different from the current,
7868 * we deduce we are in a state where we can not rely on usermode call
7869 * to turn on the display, so we do it here
7871 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
7872 dm_force_atomic_commit(&aconnector
->base
);
7876 * Grabs all modesetting locks to serialize against any blocking commits,
7877 * Waits for completion of all non blocking commits.
7879 static int do_aquire_global_lock(struct drm_device
*dev
,
7880 struct drm_atomic_state
*state
)
7882 struct drm_crtc
*crtc
;
7883 struct drm_crtc_commit
*commit
;
7887 * Adding all modeset locks to aquire_ctx will
7888 * ensure that when the framework release it the
7889 * extra locks we are locking here will get released to
7891 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
7895 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
7896 spin_lock(&crtc
->commit_lock
);
7897 commit
= list_first_entry_or_null(&crtc
->commit_list
,
7898 struct drm_crtc_commit
, commit_entry
);
7900 drm_crtc_commit_get(commit
);
7901 spin_unlock(&crtc
->commit_lock
);
7907 * Make sure all pending HW programming completed and
7910 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
7913 ret
= wait_for_completion_interruptible_timeout(
7914 &commit
->flip_done
, 10*HZ
);
7917 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7918 "timed out\n", crtc
->base
.id
, crtc
->name
);
7920 drm_crtc_commit_put(commit
);
7923 return ret
< 0 ? ret
: 0;
7926 static void get_freesync_config_for_crtc(
7927 struct dm_crtc_state
*new_crtc_state
,
7928 struct dm_connector_state
*new_con_state
)
7930 struct mod_freesync_config config
= {0};
7931 struct amdgpu_dm_connector
*aconnector
=
7932 to_amdgpu_dm_connector(new_con_state
->base
.connector
);
7933 struct drm_display_mode
*mode
= &new_crtc_state
->base
.mode
;
7934 int vrefresh
= drm_mode_vrefresh(mode
);
7936 new_crtc_state
->vrr_supported
= new_con_state
->freesync_capable
&&
7937 vrefresh
>= aconnector
->min_vfreq
&&
7938 vrefresh
<= aconnector
->max_vfreq
;
7940 if (new_crtc_state
->vrr_supported
) {
7941 new_crtc_state
->stream
->ignore_msa_timing_param
= true;
7942 config
.state
= new_crtc_state
->base
.vrr_enabled
?
7943 VRR_STATE_ACTIVE_VARIABLE
:
7945 config
.min_refresh_in_uhz
=
7946 aconnector
->min_vfreq
* 1000000;
7947 config
.max_refresh_in_uhz
=
7948 aconnector
->max_vfreq
* 1000000;
7949 config
.vsif_supported
= true;
7953 new_crtc_state
->freesync_config
= config
;
7956 static void reset_freesync_config_for_crtc(
7957 struct dm_crtc_state
*new_crtc_state
)
7959 new_crtc_state
->vrr_supported
= false;
7961 memset(&new_crtc_state
->vrr_params
, 0,
7962 sizeof(new_crtc_state
->vrr_params
));
7963 memset(&new_crtc_state
->vrr_infopacket
, 0,
7964 sizeof(new_crtc_state
->vrr_infopacket
));
7967 static int dm_update_crtc_state(struct amdgpu_display_manager
*dm
,
7968 struct drm_atomic_state
*state
,
7969 struct drm_crtc
*crtc
,
7970 struct drm_crtc_state
*old_crtc_state
,
7971 struct drm_crtc_state
*new_crtc_state
,
7973 bool *lock_and_validation_needed
)
7975 struct dm_atomic_state
*dm_state
= NULL
;
7976 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7977 struct dc_stream_state
*new_stream
;
7981 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7982 * update changed items
7984 struct amdgpu_crtc
*acrtc
= NULL
;
7985 struct amdgpu_dm_connector
*aconnector
= NULL
;
7986 struct drm_connector_state
*drm_new_conn_state
= NULL
, *drm_old_conn_state
= NULL
;
7987 struct dm_connector_state
*dm_new_conn_state
= NULL
, *dm_old_conn_state
= NULL
;
7991 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7992 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7993 acrtc
= to_amdgpu_crtc(crtc
);
7994 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
7996 /* TODO This hack should go away */
7997 if (aconnector
&& enable
) {
7998 /* Make sure fake sink is created in plug-in scenario */
7999 drm_new_conn_state
= drm_atomic_get_new_connector_state(state
,
8001 drm_old_conn_state
= drm_atomic_get_old_connector_state(state
,
8004 if (IS_ERR(drm_new_conn_state
)) {
8005 ret
= PTR_ERR_OR_ZERO(drm_new_conn_state
);
8009 dm_new_conn_state
= to_dm_connector_state(drm_new_conn_state
);
8010 dm_old_conn_state
= to_dm_connector_state(drm_old_conn_state
);
8012 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
8015 new_stream
= create_validate_stream_for_sink(aconnector
,
8016 &new_crtc_state
->mode
,
8018 dm_old_crtc_state
->stream
);
8021 * we can have no stream on ACTION_SET if a display
8022 * was disconnected during S3, in this case it is not an
8023 * error, the OS will be updated after detection, and
8024 * will do the right thing on next atomic commit
8028 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8029 __func__
, acrtc
->base
.base
.id
);
8034 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
8036 ret
= fill_hdr_info_packet(drm_new_conn_state
,
8037 &new_stream
->hdr_static_metadata
);
8042 * If we already removed the old stream from the context
8043 * (and set the new stream to NULL) then we can't reuse
8044 * the old stream even if the stream and scaling are unchanged.
8045 * We'll hit the BUG_ON and black screen.
8047 * TODO: Refactor this function to allow this check to work
8048 * in all conditions.
8050 if (dm_new_crtc_state
->stream
&&
8051 dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
8052 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
8053 new_crtc_state
->mode_changed
= false;
8054 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8055 new_crtc_state
->mode_changed
);
8059 /* mode_changed flag may get updated above, need to check again */
8060 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
8064 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8065 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8066 "connectors_changed:%d\n",
8068 new_crtc_state
->enable
,
8069 new_crtc_state
->active
,
8070 new_crtc_state
->planes_changed
,
8071 new_crtc_state
->mode_changed
,
8072 new_crtc_state
->active_changed
,
8073 new_crtc_state
->connectors_changed
);
8075 /* Remove stream for any changed/disabled CRTC */
8078 if (!dm_old_crtc_state
->stream
)
8081 ret
= dm_atomic_get_state(state
, &dm_state
);
8085 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8088 /* i.e. reset mode */
8089 if (dc_remove_stream_from_ctx(
8092 dm_old_crtc_state
->stream
) != DC_OK
) {
8097 dc_stream_release(dm_old_crtc_state
->stream
);
8098 dm_new_crtc_state
->stream
= NULL
;
8100 reset_freesync_config_for_crtc(dm_new_crtc_state
);
8102 *lock_and_validation_needed
= true;
8104 } else {/* Add stream for any updated/enabled CRTC */
8106 * Quick fix to prevent NULL pointer on new_stream when
8107 * added MST connectors not found in existing crtc_state in the chained mode
8108 * TODO: need to dig out the root cause of that
8110 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
8113 if (modereset_required(new_crtc_state
))
8116 if (modeset_required(new_crtc_state
, new_stream
,
8117 dm_old_crtc_state
->stream
)) {
8119 WARN_ON(dm_new_crtc_state
->stream
);
8121 ret
= dm_atomic_get_state(state
, &dm_state
);
8125 dm_new_crtc_state
->stream
= new_stream
;
8127 dc_stream_retain(new_stream
);
8129 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8132 if (dc_add_stream_to_ctx(
8135 dm_new_crtc_state
->stream
) != DC_OK
) {
8140 *lock_and_validation_needed
= true;
8145 /* Release extra reference */
8147 dc_stream_release(new_stream
);
8150 * We want to do dc stream updates that do not require a
8151 * full modeset below.
8153 if (!(enable
&& aconnector
&& new_crtc_state
->active
))
8156 * Given above conditions, the dc state cannot be NULL because:
8157 * 1. We're in the process of enabling CRTCs (just been added
8158 * to the dc context, or already is on the context)
8159 * 2. Has a valid connector attached, and
8160 * 3. Is currently active and enabled.
8161 * => The dc stream state currently exists.
8163 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
8165 /* Scaling or underscan settings */
8166 if (is_scaling_state_different(dm_old_conn_state
, dm_new_conn_state
))
8167 update_stream_scaling_settings(
8168 &new_crtc_state
->mode
, dm_new_conn_state
, dm_new_crtc_state
->stream
);
8171 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
8174 * Color management settings. We also update color properties
8175 * when a modeset is needed, to ensure it gets reprogrammed.
8177 if (dm_new_crtc_state
->base
.color_mgmt_changed
||
8178 drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
8179 ret
= amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state
);
8184 /* Update Freesync settings. */
8185 get_freesync_config_for_crtc(dm_new_crtc_state
,
8192 dc_stream_release(new_stream
);
8196 static bool should_reset_plane(struct drm_atomic_state
*state
,
8197 struct drm_plane
*plane
,
8198 struct drm_plane_state
*old_plane_state
,
8199 struct drm_plane_state
*new_plane_state
)
8201 struct drm_plane
*other
;
8202 struct drm_plane_state
*old_other_state
, *new_other_state
;
8203 struct drm_crtc_state
*new_crtc_state
;
8207 * TODO: Remove this hack once the checks below are sufficient
8208 * enough to determine when we need to reset all the planes on
8211 if (state
->allow_modeset
)
8214 /* Exit early if we know that we're adding or removing the plane. */
8215 if (old_plane_state
->crtc
!= new_plane_state
->crtc
)
8218 /* old crtc == new_crtc == NULL, plane not in context. */
8219 if (!new_plane_state
->crtc
)
8223 drm_atomic_get_new_crtc_state(state
, new_plane_state
->crtc
);
8225 if (!new_crtc_state
)
8228 /* CRTC Degamma changes currently require us to recreate planes. */
8229 if (new_crtc_state
->color_mgmt_changed
)
8232 if (drm_atomic_crtc_needs_modeset(new_crtc_state
))
8236 * If there are any new primary or overlay planes being added or
8237 * removed then the z-order can potentially change. To ensure
8238 * correct z-order and pipe acquisition the current DC architecture
8239 * requires us to remove and recreate all existing planes.
8241 * TODO: Come up with a more elegant solution for this.
8243 for_each_oldnew_plane_in_state(state
, other
, old_other_state
, new_other_state
, i
) {
8244 if (other
->type
== DRM_PLANE_TYPE_CURSOR
)
8247 if (old_other_state
->crtc
!= new_plane_state
->crtc
&&
8248 new_other_state
->crtc
!= new_plane_state
->crtc
)
8251 if (old_other_state
->crtc
!= new_other_state
->crtc
)
8254 /* TODO: Remove this once we can handle fast format changes. */
8255 if (old_other_state
->fb
&& new_other_state
->fb
&&
8256 old_other_state
->fb
->format
!= new_other_state
->fb
->format
)
8263 static int dm_update_plane_state(struct dc
*dc
,
8264 struct drm_atomic_state
*state
,
8265 struct drm_plane
*plane
,
8266 struct drm_plane_state
*old_plane_state
,
8267 struct drm_plane_state
*new_plane_state
,
8269 bool *lock_and_validation_needed
)
8272 struct dm_atomic_state
*dm_state
= NULL
;
8273 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
8274 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
8275 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
8276 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
8277 struct amdgpu_crtc
*new_acrtc
;
8282 new_plane_crtc
= new_plane_state
->crtc
;
8283 old_plane_crtc
= old_plane_state
->crtc
;
8284 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
8285 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
8287 /*TODO Implement better atomic check for cursor plane */
8288 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
8289 if (!enable
|| !new_plane_crtc
||
8290 drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
8293 new_acrtc
= to_amdgpu_crtc(new_plane_crtc
);
8295 if ((new_plane_state
->crtc_w
> new_acrtc
->max_cursor_width
) ||
8296 (new_plane_state
->crtc_h
> new_acrtc
->max_cursor_height
)) {
8297 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8298 new_plane_state
->crtc_w
, new_plane_state
->crtc_h
);
8305 needs_reset
= should_reset_plane(state
, plane
, old_plane_state
,
8308 /* Remove any changed/removed planes */
8313 if (!old_plane_crtc
)
8316 old_crtc_state
= drm_atomic_get_old_crtc_state(
8317 state
, old_plane_crtc
);
8318 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
8320 if (!dm_old_crtc_state
->stream
)
8323 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8324 plane
->base
.id
, old_plane_crtc
->base
.id
);
8326 ret
= dm_atomic_get_state(state
, &dm_state
);
8330 if (!dc_remove_plane_from_context(
8332 dm_old_crtc_state
->stream
,
8333 dm_old_plane_state
->dc_state
,
8334 dm_state
->context
)) {
8341 dc_plane_state_release(dm_old_plane_state
->dc_state
);
8342 dm_new_plane_state
->dc_state
= NULL
;
8344 *lock_and_validation_needed
= true;
8346 } else { /* Add new planes */
8347 struct dc_plane_state
*dc_new_plane_state
;
8349 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
8352 if (!new_plane_crtc
)
8355 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
8356 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
8358 if (!dm_new_crtc_state
->stream
)
8364 ret
= dm_plane_helper_check_state(new_plane_state
, new_crtc_state
);
8368 WARN_ON(dm_new_plane_state
->dc_state
);
8370 dc_new_plane_state
= dc_create_plane_state(dc
);
8371 if (!dc_new_plane_state
)
8374 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8375 plane
->base
.id
, new_plane_crtc
->base
.id
);
8377 ret
= fill_dc_plane_attributes(
8378 new_plane_crtc
->dev
->dev_private
,
8383 dc_plane_state_release(dc_new_plane_state
);
8387 ret
= dm_atomic_get_state(state
, &dm_state
);
8389 dc_plane_state_release(dc_new_plane_state
);
8394 * Any atomic check errors that occur after this will
8395 * not need a release. The plane state will be attached
8396 * to the stream, and therefore part of the atomic
8397 * state. It'll be released when the atomic state is
8400 if (!dc_add_plane_to_context(
8402 dm_new_crtc_state
->stream
,
8404 dm_state
->context
)) {
8406 dc_plane_state_release(dc_new_plane_state
);
8410 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
8412 /* Tell DC to do a full surface update every time there
8413 * is a plane change. Inefficient, but works for now.
8415 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
8417 *lock_and_validation_needed
= true;
8425 dm_determine_update_type_for_commit(struct amdgpu_display_manager
*dm
,
8426 struct drm_atomic_state
*state
,
8427 enum surface_update_type
*out_type
)
8429 struct dc
*dc
= dm
->dc
;
8430 struct dm_atomic_state
*dm_state
= NULL
, *old_dm_state
= NULL
;
8431 int i
, j
, num_plane
, ret
= 0;
8432 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
8433 struct dm_plane_state
*new_dm_plane_state
, *old_dm_plane_state
;
8434 struct drm_crtc
*new_plane_crtc
;
8435 struct drm_plane
*plane
;
8437 struct drm_crtc
*crtc
;
8438 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
8439 struct dm_crtc_state
*new_dm_crtc_state
, *old_dm_crtc_state
;
8440 struct dc_stream_status
*status
= NULL
;
8441 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
8442 struct surface_info_bundle
{
8443 struct dc_surface_update surface_updates
[MAX_SURFACES
];
8444 struct dc_plane_info plane_infos
[MAX_SURFACES
];
8445 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
8446 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
8447 struct dc_stream_update stream_update
;
8450 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
8453 DRM_ERROR("Failed to allocate update bundle\n");
8454 /* Set type to FULL to avoid crashing in DC*/
8455 update_type
= UPDATE_TYPE_FULL
;
8459 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8461 memset(bundle
, 0, sizeof(struct surface_info_bundle
));
8463 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
8464 old_dm_crtc_state
= to_dm_crtc_state(old_crtc_state
);
8467 if (new_dm_crtc_state
->stream
!= old_dm_crtc_state
->stream
) {
8468 update_type
= UPDATE_TYPE_FULL
;
8472 if (!new_dm_crtc_state
->stream
)
8475 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, j
) {
8476 const struct amdgpu_framebuffer
*amdgpu_fb
=
8477 to_amdgpu_framebuffer(new_plane_state
->fb
);
8478 struct dc_plane_info
*plane_info
= &bundle
->plane_infos
[num_plane
];
8479 struct dc_flip_addrs
*flip_addr
= &bundle
->flip_addrs
[num_plane
];
8480 struct dc_scaling_info
*scaling_info
= &bundle
->scaling_infos
[num_plane
];
8481 uint64_t tiling_flags
;
8482 bool tmz_surface
= false;
8484 new_plane_crtc
= new_plane_state
->crtc
;
8485 new_dm_plane_state
= to_dm_plane_state(new_plane_state
);
8486 old_dm_plane_state
= to_dm_plane_state(old_plane_state
);
8488 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8491 if (new_dm_plane_state
->dc_state
!= old_dm_plane_state
->dc_state
) {
8492 update_type
= UPDATE_TYPE_FULL
;
8496 if (crtc
!= new_plane_crtc
)
8499 bundle
->surface_updates
[num_plane
].surface
=
8500 new_dm_plane_state
->dc_state
;
8502 if (new_crtc_state
->mode_changed
) {
8503 bundle
->stream_update
.dst
= new_dm_crtc_state
->stream
->dst
;
8504 bundle
->stream_update
.src
= new_dm_crtc_state
->stream
->src
;
8507 if (new_crtc_state
->color_mgmt_changed
) {
8508 bundle
->surface_updates
[num_plane
].gamma
=
8509 new_dm_plane_state
->dc_state
->gamma_correction
;
8510 bundle
->surface_updates
[num_plane
].in_transfer_func
=
8511 new_dm_plane_state
->dc_state
->in_transfer_func
;
8512 bundle
->surface_updates
[num_plane
].gamut_remap_matrix
=
8513 &new_dm_plane_state
->dc_state
->gamut_remap_matrix
;
8514 bundle
->stream_update
.gamut_remap
=
8515 &new_dm_crtc_state
->stream
->gamut_remap_matrix
;
8516 bundle
->stream_update
.output_csc_transform
=
8517 &new_dm_crtc_state
->stream
->csc_color_matrix
;
8518 bundle
->stream_update
.out_transfer_func
=
8519 new_dm_crtc_state
->stream
->out_transfer_func
;
8522 ret
= fill_dc_scaling_info(new_plane_state
,
8527 bundle
->surface_updates
[num_plane
].scaling_info
= scaling_info
;
8530 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
, &tmz_surface
);
8534 ret
= fill_dc_plane_info_and_addr(
8535 dm
->adev
, new_plane_state
, tiling_flags
,
8537 &flip_addr
->address
, tmz_surface
,
8542 bundle
->surface_updates
[num_plane
].plane_info
= plane_info
;
8543 bundle
->surface_updates
[num_plane
].flip_addr
= flip_addr
;
8552 ret
= dm_atomic_get_state(state
, &dm_state
);
8556 old_dm_state
= dm_atomic_get_old_state(state
);
8557 if (!old_dm_state
) {
8562 status
= dc_stream_get_status_from_state(old_dm_state
->context
,
8563 new_dm_crtc_state
->stream
);
8564 bundle
->stream_update
.stream
= new_dm_crtc_state
->stream
;
8566 * TODO: DC modifies the surface during this call so we need
8567 * to lock here - find a way to do this without locking.
8569 mutex_lock(&dm
->dc_lock
);
8570 update_type
= dc_check_update_surfaces_for_stream(
8571 dc
, bundle
->surface_updates
, num_plane
,
8572 &bundle
->stream_update
, status
);
8573 mutex_unlock(&dm
->dc_lock
);
8575 if (update_type
> UPDATE_TYPE_MED
) {
8576 update_type
= UPDATE_TYPE_FULL
;
8584 *out_type
= update_type
;
8587 #if defined(CONFIG_DRM_AMD_DC_DCN)
8588 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state
*state
, struct drm_crtc
*crtc
)
8590 struct drm_connector
*connector
;
8591 struct drm_connector_state
*conn_state
;
8592 struct amdgpu_dm_connector
*aconnector
= NULL
;
8594 for_each_new_connector_in_state(state
, connector
, conn_state
, i
) {
8595 if (conn_state
->crtc
!= crtc
)
8598 aconnector
= to_amdgpu_dm_connector(connector
);
8599 if (!aconnector
->port
|| !aconnector
->mst_port
)
8608 return drm_dp_mst_add_affected_dsc_crtcs(state
, &aconnector
->mst_port
->mst_mgr
);
8613 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8614 * @dev: The DRM device
8615 * @state: The atomic state to commit
8617 * Validate that the given atomic state is programmable by DC into hardware.
8618 * This involves constructing a &struct dc_state reflecting the new hardware
8619 * state we wish to commit, then querying DC to see if it is programmable. It's
8620 * important not to modify the existing DC state. Otherwise, atomic_check
8621 * may unexpectedly commit hardware changes.
8623 * When validating the DC state, it's important that the right locks are
8624 * acquired. For full updates case which removes/adds/updates streams on one
8625 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8626 * that any such full update commit will wait for completion of any outstanding
8627 * flip using DRMs synchronization events. See
8628 * dm_determine_update_type_for_commit()
8630 * Note that DM adds the affected connectors for all CRTCs in state, when that
8631 * might not seem necessary. This is because DC stream creation requires the
8632 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8633 * be possible but non-trivial - a possible TODO item.
8635 * Return: -Error code if validation failed.
8637 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
8638 struct drm_atomic_state
*state
)
8640 struct amdgpu_device
*adev
= dev
->dev_private
;
8641 struct dm_atomic_state
*dm_state
= NULL
;
8642 struct dc
*dc
= adev
->dm
.dc
;
8643 struct drm_connector
*connector
;
8644 struct drm_connector_state
*old_con_state
, *new_con_state
;
8645 struct drm_crtc
*crtc
;
8646 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
8647 struct drm_plane
*plane
;
8648 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
8649 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
8650 enum surface_update_type overall_update_type
= UPDATE_TYPE_FAST
;
8651 enum dc_status status
;
8655 * This bool will be set for true for any modeset/reset
8656 * or plane update which implies non fast surface update.
8658 bool lock_and_validation_needed
= false;
8660 ret
= drm_atomic_helper_check_modeset(dev
, state
);
8664 /* Check connector changes */
8665 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
8666 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
8667 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
8669 /* Skip connectors that are disabled or part of modeset already. */
8670 if (!old_con_state
->crtc
&& !new_con_state
->crtc
)
8673 if (!new_con_state
->crtc
)
8676 new_crtc_state
= drm_atomic_get_crtc_state(state
, new_con_state
->crtc
);
8677 if (IS_ERR(new_crtc_state
)) {
8678 ret
= PTR_ERR(new_crtc_state
);
8682 if (dm_old_con_state
->abm_level
!=
8683 dm_new_con_state
->abm_level
)
8684 new_crtc_state
->connectors_changed
= true;
8687 #if defined(CONFIG_DRM_AMD_DC_DCN)
8688 if (adev
->asic_type
>= CHIP_NAVI10
) {
8689 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8690 if (drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
8691 ret
= add_affected_mst_dsc_crtcs(state
, crtc
);
8698 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8699 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
8700 !new_crtc_state
->color_mgmt_changed
&&
8701 old_crtc_state
->vrr_enabled
== new_crtc_state
->vrr_enabled
)
8704 if (!new_crtc_state
->enable
)
8707 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
8711 ret
= drm_atomic_add_affected_planes(state
, crtc
);
8717 * Add all primary and overlay planes on the CRTC to the state
8718 * whenever a plane is enabled to maintain correct z-ordering
8719 * and to enable fast surface updates.
8721 drm_for_each_crtc(crtc
, dev
) {
8722 bool modified
= false;
8724 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8725 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8728 if (new_plane_state
->crtc
== crtc
||
8729 old_plane_state
->crtc
== crtc
) {
8738 drm_for_each_plane_mask(plane
, state
->dev
, crtc
->state
->plane_mask
) {
8739 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8743 drm_atomic_get_plane_state(state
, plane
);
8745 if (IS_ERR(new_plane_state
)) {
8746 ret
= PTR_ERR(new_plane_state
);
8752 /* Remove exiting planes if they are modified */
8753 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8754 ret
= dm_update_plane_state(dc
, state
, plane
,
8758 &lock_and_validation_needed
);
8763 /* Disable all crtcs which require disable */
8764 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8765 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8769 &lock_and_validation_needed
);
8774 /* Enable all crtcs which require enable */
8775 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8776 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8780 &lock_and_validation_needed
);
8785 /* Add new/modified planes */
8786 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8787 ret
= dm_update_plane_state(dc
, state
, plane
,
8791 &lock_and_validation_needed
);
8796 /* Run this here since we want to validate the streams we created */
8797 ret
= drm_atomic_helper_check_planes(dev
, state
);
8801 if (state
->legacy_cursor_update
) {
8803 * This is a fast cursor update coming from the plane update
8804 * helper, check if it can be done asynchronously for better
8807 state
->async_update
=
8808 !drm_atomic_helper_async_check(dev
, state
);
8811 * Skip the remaining global validation if this is an async
8812 * update. Cursor updates can be done without affecting
8813 * state or bandwidth calcs and this avoids the performance
8814 * penalty of locking the private state object and
8815 * allocating a new dc_state.
8817 if (state
->async_update
)
8821 /* Check scaling and underscan changes*/
8822 /* TODO Removed scaling changes validation due to inability to commit
8823 * new stream into context w\o causing full reset. Need to
8824 * decide how to handle.
8826 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
8827 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
8828 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
8829 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
8831 /* Skip any modesets/resets */
8832 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
8833 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
8836 /* Skip any thing not scale or underscan changes */
8837 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
8840 overall_update_type
= UPDATE_TYPE_FULL
;
8841 lock_and_validation_needed
= true;
8844 ret
= dm_determine_update_type_for_commit(&adev
->dm
, state
, &update_type
);
8848 if (overall_update_type
< update_type
)
8849 overall_update_type
= update_type
;
8852 * lock_and_validation_needed was an old way to determine if we need to set
8853 * the global lock. Leaving it in to check if we broke any corner cases
8854 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8855 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8857 if (lock_and_validation_needed
&& overall_update_type
<= UPDATE_TYPE_FAST
)
8858 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8860 if (overall_update_type
> UPDATE_TYPE_FAST
) {
8861 ret
= dm_atomic_get_state(state
, &dm_state
);
8865 ret
= do_aquire_global_lock(dev
, state
);
8869 #if defined(CONFIG_DRM_AMD_DC_DCN)
8870 if (!compute_mst_dsc_configs_for_state(state
, dm_state
->context
))
8873 ret
= dm_update_mst_vcpi_slots_for_dsc(state
, dm_state
->context
);
8879 * Perform validation of MST topology in the state:
8880 * We need to perform MST atomic check before calling
8881 * dc_validate_global_state(), or there is a chance
8882 * to get stuck in an infinite loop and hang eventually.
8884 ret
= drm_dp_mst_atomic_check(state
);
8887 status
= dc_validate_global_state(dc
, dm_state
->context
, false);
8888 if (status
!= DC_OK
) {
8889 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8890 dc_status_to_str(status
), status
);
8896 * The commit is a fast update. Fast updates shouldn't change
8897 * the DC context, affect global validation, and can have their
8898 * commit work done in parallel with other commits not touching
8899 * the same resource. If we have a new DC context as part of
8900 * the DM atomic state from validation we need to free it and
8901 * retain the existing one instead.
8903 * Furthermore, since the DM atomic state only contains the DC
8904 * context and can safely be annulled, we can free the state
8905 * and clear the associated private object now to free
8906 * some memory and avoid a possible use-after-free later.
8909 for (i
= 0; i
< state
->num_private_objs
; i
++) {
8910 struct drm_private_obj
*obj
= state
->private_objs
[i
].ptr
;
8912 if (obj
->funcs
== adev
->dm
.atomic_obj
.funcs
) {
8913 int j
= state
->num_private_objs
-1;
8915 dm_atomic_destroy_state(obj
,
8916 state
->private_objs
[i
].state
);
8918 /* If i is not at the end of the array then the
8919 * last element needs to be moved to where i was
8920 * before the array can safely be truncated.
8923 state
->private_objs
[i
] =
8924 state
->private_objs
[j
];
8926 state
->private_objs
[j
].ptr
= NULL
;
8927 state
->private_objs
[j
].state
= NULL
;
8928 state
->private_objs
[j
].old_state
= NULL
;
8929 state
->private_objs
[j
].new_state
= NULL
;
8931 state
->num_private_objs
= j
;
8937 /* Store the overall update type for use later in atomic check. */
8938 for_each_new_crtc_in_state (state
, crtc
, new_crtc_state
, i
) {
8939 struct dm_crtc_state
*dm_new_crtc_state
=
8940 to_dm_crtc_state(new_crtc_state
);
8942 dm_new_crtc_state
->update_type
= (int)overall_update_type
;
8945 /* Must be success */
8950 if (ret
== -EDEADLK
)
8951 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8952 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
8953 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8955 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
8960 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
8961 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
8964 bool capable
= false;
8966 if (amdgpu_dm_connector
->dc_link
&&
8967 dm_helpers_dp_read_dpcd(
8969 amdgpu_dm_connector
->dc_link
,
8970 DP_DOWN_STREAM_PORT_COUNT
,
8972 sizeof(dpcd_data
))) {
8973 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
8978 void amdgpu_dm_update_freesync_caps(struct drm_connector
*connector
,
8982 bool edid_check_required
;
8983 struct detailed_timing
*timing
;
8984 struct detailed_non_pixel
*data
;
8985 struct detailed_data_monitor_range
*range
;
8986 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
8987 to_amdgpu_dm_connector(connector
);
8988 struct dm_connector_state
*dm_con_state
= NULL
;
8990 struct drm_device
*dev
= connector
->dev
;
8991 struct amdgpu_device
*adev
= dev
->dev_private
;
8992 bool freesync_capable
= false;
8994 if (!connector
->state
) {
8995 DRM_ERROR("%s - Connector has no state", __func__
);
9000 dm_con_state
= to_dm_connector_state(connector
->state
);
9002 amdgpu_dm_connector
->min_vfreq
= 0;
9003 amdgpu_dm_connector
->max_vfreq
= 0;
9004 amdgpu_dm_connector
->pixel_clock_mhz
= 0;
9009 dm_con_state
= to_dm_connector_state(connector
->state
);
9011 edid_check_required
= false;
9012 if (!amdgpu_dm_connector
->dc_sink
) {
9013 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9016 if (!adev
->dm
.freesync_module
)
9019 * if edid non zero restrict freesync only for dp and edp
9022 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
9023 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
9024 edid_check_required
= is_dp_capable_without_timing_msa(
9026 amdgpu_dm_connector
);
9029 if (edid_check_required
== true && (edid
->version
> 1 ||
9030 (edid
->version
== 1 && edid
->revision
> 1))) {
9031 for (i
= 0; i
< 4; i
++) {
9033 timing
= &edid
->detailed_timings
[i
];
9034 data
= &timing
->data
.other_data
;
9035 range
= &data
->data
.range
;
9037 * Check if monitor has continuous frequency mode
9039 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
9042 * Check for flag range limits only. If flag == 1 then
9043 * no additional timing information provided.
9044 * Default GTF, GTF Secondary curve and CVT are not
9047 if (range
->flags
!= 1)
9050 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
9051 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
9052 amdgpu_dm_connector
->pixel_clock_mhz
=
9053 range
->pixel_clock_mhz
* 10;
9057 if (amdgpu_dm_connector
->max_vfreq
-
9058 amdgpu_dm_connector
->min_vfreq
> 10) {
9060 freesync_capable
= true;
9066 dm_con_state
->freesync_capable
= freesync_capable
;
9068 if (connector
->vrr_capable_property
)
9069 drm_connector_set_vrr_capable_property(connector
,
9073 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
)
9075 uint8_t dpcd_data
[EDP_PSR_RECEIVER_CAP_SIZE
];
9077 if (!(link
->connector_signal
& SIGNAL_TYPE_EDP
))
9079 if (link
->type
== dc_connection_none
)
9081 if (dm_helpers_dp_read_dpcd(NULL
, link
, DP_PSR_SUPPORT
,
9082 dpcd_data
, sizeof(dpcd_data
))) {
9083 link
->dpcd_caps
.psr_caps
.psr_version
= dpcd_data
[0];
9085 if (dpcd_data
[0] == 0) {
9086 link
->psr_settings
.psr_version
= DC_PSR_VERSION_UNSUPPORTED
;
9087 link
->psr_settings
.psr_feature_enabled
= false;
9089 link
->psr_settings
.psr_version
= DC_PSR_VERSION_1
;
9090 link
->psr_settings
.psr_feature_enabled
= true;
9093 DRM_INFO("PSR support:%d\n", link
->psr_settings
.psr_feature_enabled
);
9098 * amdgpu_dm_link_setup_psr() - configure psr link
9099 * @stream: stream state
9101 * Return: true if success
9103 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
)
9105 struct dc_link
*link
= NULL
;
9106 struct psr_config psr_config
= {0};
9107 struct psr_context psr_context
= {0};
9113 link
= stream
->link
;
9115 psr_config
.psr_version
= link
->dpcd_caps
.psr_caps
.psr_version
;
9117 if (psr_config
.psr_version
> 0) {
9118 psr_config
.psr_exit_link_training_required
= 0x1;
9119 psr_config
.psr_frame_capture_indication_req
= 0;
9120 psr_config
.psr_rfb_setup_time
= 0x37;
9121 psr_config
.psr_sdp_transmit_line_num_deadline
= 0x20;
9122 psr_config
.allow_smu_optimizations
= 0x0;
9124 ret
= dc_link_setup_psr(link
, stream
, &psr_config
, &psr_context
);
9127 DRM_DEBUG_DRIVER("PSR link: %d\n", link
->psr_settings
.psr_feature_enabled
);
9133 * amdgpu_dm_psr_enable() - enable psr f/w
9134 * @stream: stream state
9136 * Return: true if success
9138 bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
)
9140 struct dc_link
*link
= stream
->link
;
9141 unsigned int vsync_rate_hz
= 0;
9142 struct dc_static_screen_params params
= {0};
9143 /* Calculate number of static frames before generating interrupt to
9146 // Init fail safe of 2 frames static
9147 unsigned int num_frames_static
= 2;
9149 DRM_DEBUG_DRIVER("Enabling psr...\n");
9151 vsync_rate_hz
= div64_u64(div64_u64((
9152 stream
->timing
.pix_clk_100hz
* 100),
9153 stream
->timing
.v_total
),
9154 stream
->timing
.h_total
);
9157 * Calculate number of frames such that at least 30 ms of time has
9160 if (vsync_rate_hz
!= 0) {
9161 unsigned int frame_time_microsec
= 1000000 / vsync_rate_hz
;
9162 num_frames_static
= (30000 / frame_time_microsec
) + 1;
9165 params
.triggers
.cursor_update
= true;
9166 params
.triggers
.overlay_update
= true;
9167 params
.triggers
.surface_update
= true;
9168 params
.num_frames
= num_frames_static
;
9170 dc_stream_set_static_screen_params(link
->ctx
->dc
,
9174 return dc_link_set_psr_allow_active(link
, true, false);
9178 * amdgpu_dm_psr_disable() - disable psr f/w
9179 * @stream: stream state
9181 * Return: true if success
9183 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
)
9186 DRM_DEBUG_DRIVER("Disabling psr...\n");
9188 return dc_link_set_psr_allow_active(stream
->link
, false, true);