2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB
);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB
);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB
);
104 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU
);
107 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU
);
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
119 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121 * requests into DC requests, and DC responses into DRM responses.
123 * The root control structure is &struct amdgpu_display_manager.
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device
*adev
);
128 static void amdgpu_dm_fini(struct amdgpu_device
*adev
);
131 * initializes drm_device display related structures, based on the information
132 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133 * drm_encoder, drm_mode_config
135 * Returns 0 on success
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
);
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
142 struct drm_plane
*plane
,
143 unsigned long possible_crtcs
,
144 const struct dc_plane_cap
*plane_cap
);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
146 struct drm_plane
*plane
,
147 uint32_t link_index
);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
149 struct amdgpu_dm_connector
*amdgpu_dm_connector
,
151 struct amdgpu_encoder
*amdgpu_encoder
);
152 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
153 struct amdgpu_encoder
*aencoder
,
154 uint32_t link_index
);
156 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
);
158 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
159 struct drm_atomic_state
*state
,
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
);
164 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
165 struct drm_atomic_state
*state
);
167 static void handle_cursor_update(struct drm_plane
*plane
,
168 struct drm_plane_state
*old_plane_state
);
170 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
);
177 * dm_vblank_get_counter
180 * Get counter for number of vertical blanks
183 * struct amdgpu_device *adev - [in] desired amdgpu device
184 * int disp_idx - [in] which CRTC to get the counter from
187 * Counter for vertical blanks
189 static u32
dm_vblank_get_counter(struct amdgpu_device
*adev
, int crtc
)
191 if (crtc
>= adev
->mode_info
.num_crtc
)
194 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
195 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
199 if (acrtc_state
->stream
== NULL
) {
200 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
205 return dc_stream_get_vblank_counter(acrtc_state
->stream
);
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device
*adev
, int crtc
,
210 u32
*vbl
, u32
*position
)
212 uint32_t v_blank_start
, v_blank_end
, h_position
, v_position
;
214 if ((crtc
< 0) || (crtc
>= adev
->mode_info
.num_crtc
))
217 struct amdgpu_crtc
*acrtc
= adev
->mode_info
.crtcs
[crtc
];
218 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(
221 if (acrtc_state
->stream
== NULL
) {
222 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
228 * TODO rework base driver to use values directly.
229 * for now parse it back into reg-format
231 dc_stream_get_scanoutpos(acrtc_state
->stream
,
237 *position
= v_position
| (h_position
<< 16);
238 *vbl
= v_blank_start
| (v_blank_end
<< 16);
244 static bool dm_is_idle(void *handle
)
250 static int dm_wait_for_idle(void *handle
)
256 static bool dm_check_soft_reset(void *handle
)
261 static int dm_soft_reset(void *handle
)
267 static struct amdgpu_crtc
*
268 get_crtc_by_otg_inst(struct amdgpu_device
*adev
,
271 struct drm_device
*dev
= adev
->ddev
;
272 struct drm_crtc
*crtc
;
273 struct amdgpu_crtc
*amdgpu_crtc
;
275 if (otg_inst
== -1) {
277 return adev
->mode_info
.crtcs
[0];
280 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
281 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
283 if (amdgpu_crtc
->otg_inst
== otg_inst
)
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state
*dm_state
)
292 return dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
||
293 dm_state
->freesync_config
.state
== VRR_STATE_ACTIVE_FIXED
;
297 * dm_pflip_high_irq() - Handle pageflip interrupt
298 * @interrupt_params: ignored
300 * Handles the pageflip interrupt by notifying all interested parties
301 * that the pageflip has been completed.
303 static void dm_pflip_high_irq(void *interrupt_params
)
305 struct amdgpu_crtc
*amdgpu_crtc
;
306 struct common_irq_params
*irq_params
= interrupt_params
;
307 struct amdgpu_device
*adev
= irq_params
->adev
;
309 struct drm_pending_vblank_event
*e
;
310 struct dm_crtc_state
*acrtc_state
;
311 uint32_t vpos
, hpos
, v_blank_start
, v_blank_end
;
314 amdgpu_crtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_PFLIP
);
316 /* IRQ could occur when in initial stage */
317 /* TODO work and BO cleanup */
318 if (amdgpu_crtc
== NULL
) {
319 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
323 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
325 if (amdgpu_crtc
->pflip_status
!= AMDGPU_FLIP_SUBMITTED
){
326 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 amdgpu_crtc
->pflip_status
,
328 AMDGPU_FLIP_SUBMITTED
,
329 amdgpu_crtc
->crtc_id
,
331 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
335 /* page flip completed. */
336 e
= amdgpu_crtc
->event
;
337 amdgpu_crtc
->event
= NULL
;
342 acrtc_state
= to_dm_crtc_state(amdgpu_crtc
->base
.state
);
343 vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
345 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
347 !dc_stream_get_scanoutpos(acrtc_state
->stream
, &v_blank_start
,
348 &v_blank_end
, &hpos
, &vpos
) ||
349 (vpos
< v_blank_start
)) {
350 /* Update to correct count and vblank timestamp if racing with
351 * vblank irq. This also updates to the correct vblank timestamp
352 * even in VRR mode, as scanout is past the front-porch atm.
354 drm_crtc_accurate_vblank_count(&amdgpu_crtc
->base
);
356 /* Wake up userspace by sending the pageflip event with proper
357 * count and timestamp of vblank of flip completion.
360 drm_crtc_send_vblank_event(&amdgpu_crtc
->base
, e
);
362 /* Event sent, so done with vblank for this flip */
363 drm_crtc_vblank_put(&amdgpu_crtc
->base
);
366 /* VRR active and inside front-porch: vblank count and
367 * timestamp for pageflip event will only be up to date after
368 * drm_crtc_handle_vblank() has been executed from late vblank
369 * irq handler after start of back-porch (vline 0). We queue the
370 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 * updated timestamp and count, once it runs after us.
373 * We need to open-code this instead of using the helper
374 * drm_crtc_arm_vblank_event(), as that helper would
375 * call drm_crtc_accurate_vblank_count(), which we must
376 * not call in VRR mode while we are in front-porch!
379 /* sequence will be replaced by real count during send-out. */
380 e
->sequence
= drm_crtc_vblank_count(&amdgpu_crtc
->base
);
381 e
->pipe
= amdgpu_crtc
->crtc_id
;
383 list_add_tail(&e
->base
.link
, &adev
->ddev
->vblank_event_list
);
387 /* Keep track of vblank of this flip for flip throttling. We use the
388 * cooked hw counter, as that one incremented at start of this vblank
389 * of pageflip completion, so last_flip_vblank is the forbidden count
390 * for queueing new pageflips if vsync + VRR is enabled.
392 amdgpu_crtc
->last_flip_vblank
=
393 amdgpu_get_vblank_counter_kms(&amdgpu_crtc
->base
);
395 amdgpu_crtc
->pflip_status
= AMDGPU_FLIP_NONE
;
396 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
398 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 amdgpu_crtc
->crtc_id
, amdgpu_crtc
,
400 vrr_active
, (int) !e
);
403 static void dm_vupdate_high_irq(void *interrupt_params
)
405 struct common_irq_params
*irq_params
= interrupt_params
;
406 struct amdgpu_device
*adev
= irq_params
->adev
;
407 struct amdgpu_crtc
*acrtc
;
408 struct dm_crtc_state
*acrtc_state
;
411 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VUPDATE
);
414 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
416 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
418 amdgpu_dm_vrr_active(acrtc_state
));
420 /* Core vblank handling is done here after end of front-porch in
421 * vrr mode, as vblank timestamping will give valid results
422 * while now done after front-porch. This will also deliver
423 * page-flip completion events that have been queued to us
424 * if a pageflip happened inside front-porch.
426 if (amdgpu_dm_vrr_active(acrtc_state
)) {
427 drm_crtc_handle_vblank(&acrtc
->base
);
429 /* BTR processing for pre-DCE12 ASICs */
430 if (acrtc_state
->stream
&&
431 adev
->family
< AMDGPU_FAMILY_AI
) {
432 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
433 mod_freesync_handle_v_update(
434 adev
->dm
.freesync_module
,
436 &acrtc_state
->vrr_params
);
438 dc_stream_adjust_vmin_vmax(
441 &acrtc_state
->vrr_params
.adjust
);
442 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
449 * dm_crtc_high_irq() - Handles CRTC interrupt
450 * @interrupt_params: used for determining the CRTC instance
452 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
455 static void dm_crtc_high_irq(void *interrupt_params
)
457 struct common_irq_params
*irq_params
= interrupt_params
;
458 struct amdgpu_device
*adev
= irq_params
->adev
;
459 struct amdgpu_crtc
*acrtc
;
460 struct dm_crtc_state
*acrtc_state
;
463 acrtc
= get_crtc_by_otg_inst(adev
, irq_params
->irq_src
- IRQ_TYPE_VBLANK
);
467 acrtc_state
= to_dm_crtc_state(acrtc
->base
.state
);
469 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc
->crtc_id
,
470 amdgpu_dm_vrr_active(acrtc_state
),
471 acrtc_state
->active_planes
);
474 * Core vblank handling at start of front-porch is only possible
475 * in non-vrr mode, as only there vblank timestamping will give
476 * valid results while done in front-porch. Otherwise defer it
477 * to dm_vupdate_high_irq after end of front-porch.
479 if (!amdgpu_dm_vrr_active(acrtc_state
))
480 drm_crtc_handle_vblank(&acrtc
->base
);
483 * Following stuff must happen at start of vblank, for crc
484 * computation and below-the-range btr support in vrr mode.
486 amdgpu_dm_crtc_handle_crc_irq(&acrtc
->base
);
488 /* BTR updates need to happen before VUPDATE on Vega and above. */
489 if (adev
->family
< AMDGPU_FAMILY_AI
)
492 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
494 if (acrtc_state
->stream
&& acrtc_state
->vrr_params
.supported
&&
495 acrtc_state
->freesync_config
.state
== VRR_STATE_ACTIVE_VARIABLE
) {
496 mod_freesync_handle_v_update(adev
->dm
.freesync_module
,
498 &acrtc_state
->vrr_params
);
500 dc_stream_adjust_vmin_vmax(adev
->dm
.dc
, acrtc_state
->stream
,
501 &acrtc_state
->vrr_params
.adjust
);
505 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 * In that case, pageflip completion interrupts won't fire and pageflip
507 * completion events won't get delivered. Prevent this by sending
508 * pending pageflip events from here if a flip is still pending.
510 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 * avoid race conditions between flip programming and completion,
512 * which could cause too early flip completion events.
514 if (adev
->family
>= AMDGPU_FAMILY_RV
&&
515 acrtc
->pflip_status
== AMDGPU_FLIP_SUBMITTED
&&
516 acrtc_state
->active_planes
== 0) {
518 drm_crtc_send_vblank_event(&acrtc
->base
, acrtc
->event
);
520 drm_crtc_vblank_put(&acrtc
->base
);
522 acrtc
->pflip_status
= AMDGPU_FLIP_NONE
;
525 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
528 static int dm_set_clockgating_state(void *handle
,
529 enum amd_clockgating_state state
)
534 static int dm_set_powergating_state(void *handle
,
535 enum amd_powergating_state state
)
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle
);
543 /* Allocate memory for FBC compressed data */
544 static void amdgpu_dm_fbc_init(struct drm_connector
*connector
)
546 struct drm_device
*dev
= connector
->dev
;
547 struct amdgpu_device
*adev
= dev
->dev_private
;
548 struct dm_comressor_info
*compressor
= &adev
->dm
.compressor
;
549 struct amdgpu_dm_connector
*aconn
= to_amdgpu_dm_connector(connector
);
550 struct drm_display_mode
*mode
;
551 unsigned long max_size
= 0;
553 if (adev
->dm
.dc
->fbc_compressor
== NULL
)
556 if (aconn
->dc_link
->connector_signal
!= SIGNAL_TYPE_EDP
)
559 if (compressor
->bo_ptr
)
563 list_for_each_entry(mode
, &connector
->modes
, head
) {
564 if (max_size
< mode
->htotal
* mode
->vtotal
)
565 max_size
= mode
->htotal
* mode
->vtotal
;
569 int r
= amdgpu_bo_create_kernel(adev
, max_size
* 4, PAGE_SIZE
,
570 AMDGPU_GEM_DOMAIN_GTT
, &compressor
->bo_ptr
,
571 &compressor
->gpu_addr
, &compressor
->cpu_addr
);
574 DRM_ERROR("DM: Failed to initialize FBC\n");
576 adev
->dm
.dc
->ctx
->fbc_gpu_addr
= compressor
->gpu_addr
;
577 DRM_INFO("DM: FBC alloc %lu\n", max_size
*4);
584 static int amdgpu_dm_audio_component_get_eld(struct device
*kdev
, int port
,
585 int pipe
, bool *enabled
,
586 unsigned char *buf
, int max_bytes
)
588 struct drm_device
*dev
= dev_get_drvdata(kdev
);
589 struct amdgpu_device
*adev
= dev
->dev_private
;
590 struct drm_connector
*connector
;
591 struct drm_connector_list_iter conn_iter
;
592 struct amdgpu_dm_connector
*aconnector
;
597 mutex_lock(&adev
->dm
.audio_lock
);
599 drm_connector_list_iter_begin(dev
, &conn_iter
);
600 drm_for_each_connector_iter(connector
, &conn_iter
) {
601 aconnector
= to_amdgpu_dm_connector(connector
);
602 if (aconnector
->audio_inst
!= port
)
606 ret
= drm_eld_size(connector
->eld
);
607 memcpy(buf
, connector
->eld
, min(max_bytes
, ret
));
611 drm_connector_list_iter_end(&conn_iter
);
613 mutex_unlock(&adev
->dm
.audio_lock
);
615 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port
, ret
, *enabled
);
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops
= {
621 .get_eld
= amdgpu_dm_audio_component_get_eld
,
624 static int amdgpu_dm_audio_component_bind(struct device
*kdev
,
625 struct device
*hda_kdev
, void *data
)
627 struct drm_device
*dev
= dev_get_drvdata(kdev
);
628 struct amdgpu_device
*adev
= dev
->dev_private
;
629 struct drm_audio_component
*acomp
= data
;
631 acomp
->ops
= &amdgpu_dm_audio_component_ops
;
633 adev
->dm
.audio_component
= acomp
;
638 static void amdgpu_dm_audio_component_unbind(struct device
*kdev
,
639 struct device
*hda_kdev
, void *data
)
641 struct drm_device
*dev
= dev_get_drvdata(kdev
);
642 struct amdgpu_device
*adev
= dev
->dev_private
;
643 struct drm_audio_component
*acomp
= data
;
647 adev
->dm
.audio_component
= NULL
;
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops
= {
651 .bind
= amdgpu_dm_audio_component_bind
,
652 .unbind
= amdgpu_dm_audio_component_unbind
,
655 static int amdgpu_dm_audio_init(struct amdgpu_device
*adev
)
662 adev
->mode_info
.audio
.enabled
= true;
664 adev
->mode_info
.audio
.num_pins
= adev
->dm
.dc
->res_pool
->audio_count
;
666 for (i
= 0; i
< adev
->mode_info
.audio
.num_pins
; i
++) {
667 adev
->mode_info
.audio
.pin
[i
].channels
= -1;
668 adev
->mode_info
.audio
.pin
[i
].rate
= -1;
669 adev
->mode_info
.audio
.pin
[i
].bits_per_sample
= -1;
670 adev
->mode_info
.audio
.pin
[i
].status_bits
= 0;
671 adev
->mode_info
.audio
.pin
[i
].category_code
= 0;
672 adev
->mode_info
.audio
.pin
[i
].connected
= false;
673 adev
->mode_info
.audio
.pin
[i
].id
=
674 adev
->dm
.dc
->res_pool
->audios
[i
]->inst
;
675 adev
->mode_info
.audio
.pin
[i
].offset
= 0;
678 ret
= component_add(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
682 adev
->dm
.audio_registered
= true;
687 static void amdgpu_dm_audio_fini(struct amdgpu_device
*adev
)
692 if (!adev
->mode_info
.audio
.enabled
)
695 if (adev
->dm
.audio_registered
) {
696 component_del(adev
->dev
, &amdgpu_dm_audio_component_bind_ops
);
697 adev
->dm
.audio_registered
= false;
700 /* TODO: Disable audio? */
702 adev
->mode_info
.audio
.enabled
= false;
705 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device
*adev
, int pin
)
707 struct drm_audio_component
*acomp
= adev
->dm
.audio_component
;
709 if (acomp
&& acomp
->audio_ops
&& acomp
->audio_ops
->pin_eld_notify
) {
710 DRM_DEBUG_KMS("Notify ELD: %d\n", pin
);
712 acomp
->audio_ops
->pin_eld_notify(acomp
->audio_ops
->audio_ptr
,
717 static int dm_dmub_hw_init(struct amdgpu_device
*adev
)
719 const struct dmcub_firmware_header_v1_0
*hdr
;
720 struct dmub_srv
*dmub_srv
= adev
->dm
.dmub_srv
;
721 struct dmub_srv_fb_info
*fb_info
= adev
->dm
.dmub_fb_info
;
722 const struct firmware
*dmub_fw
= adev
->dm
.dmub_fw
;
723 struct dmcu
*dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
724 struct abm
*abm
= adev
->dm
.dc
->res_pool
->abm
;
725 struct dmub_srv_hw_params hw_params
;
726 enum dmub_status status
;
727 const unsigned char *fw_inst_const
, *fw_bss_data
;
728 uint32_t i
, fw_inst_const_size
, fw_bss_data_size
;
732 /* DMUB isn't supported on the ASIC. */
736 DRM_ERROR("No framebuffer info for DMUB service.\n");
741 /* Firmware required for DMUB support. */
742 DRM_ERROR("No firmware provided for DMUB.\n");
746 status
= dmub_srv_has_hw_support(dmub_srv
, &has_hw_support
);
747 if (status
!= DMUB_STATUS_OK
) {
748 DRM_ERROR("Error checking HW support for DMUB: %d\n", status
);
752 if (!has_hw_support
) {
753 DRM_INFO("DMUB unsupported on ASIC\n");
757 hdr
= (const struct dmcub_firmware_header_v1_0
*)dmub_fw
->data
;
759 fw_inst_const
= dmub_fw
->data
+
760 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
763 fw_bss_data
= dmub_fw
->data
+
764 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
765 le32_to_cpu(hdr
->inst_const_bytes
);
767 /* Copy firmware and bios info into FB memory. */
768 fw_inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
769 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
771 fw_bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
773 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 * amdgpu_ucode_init_single_fw will load dmub firmware
775 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 * will be done by dm_dmub_hw_init
778 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
779 memcpy(fb_info
->fb
[DMUB_WINDOW_0_INST_CONST
].cpu_addr
, fw_inst_const
,
783 if (fw_bss_data_size
)
784 memcpy(fb_info
->fb
[DMUB_WINDOW_2_BSS_DATA
].cpu_addr
,
785 fw_bss_data
, fw_bss_data_size
);
787 /* Copy firmware bios info into FB memory. */
788 memcpy(fb_info
->fb
[DMUB_WINDOW_3_VBIOS
].cpu_addr
, adev
->bios
,
791 /* Reset regions that need to be reset. */
792 memset(fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].cpu_addr
, 0,
793 fb_info
->fb
[DMUB_WINDOW_4_MAILBOX
].size
);
795 memset(fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].cpu_addr
, 0,
796 fb_info
->fb
[DMUB_WINDOW_5_TRACEBUFF
].size
);
798 memset(fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].cpu_addr
, 0,
799 fb_info
->fb
[DMUB_WINDOW_6_FW_STATE
].size
);
801 /* Initialize hardware. */
802 memset(&hw_params
, 0, sizeof(hw_params
));
803 hw_params
.fb_base
= adev
->gmc
.fb_start
;
804 hw_params
.fb_offset
= adev
->gmc
.aper_base
;
806 /* backdoor load firmware and trigger dmub running */
807 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
)
808 hw_params
.load_inst_const
= true;
811 hw_params
.psp_version
= dmcu
->psp_version
;
813 for (i
= 0; i
< fb_info
->num_fb
; ++i
)
814 hw_params
.fb
[i
] = &fb_info
->fb
[i
];
816 status
= dmub_srv_hw_init(dmub_srv
, &hw_params
);
817 if (status
!= DMUB_STATUS_OK
) {
818 DRM_ERROR("Error initializing DMUB HW: %d\n", status
);
822 /* Wait for firmware load to finish. */
823 status
= dmub_srv_wait_for_auto_load(dmub_srv
, 100000);
824 if (status
!= DMUB_STATUS_OK
)
825 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status
);
827 /* Init DMCU and ABM if available. */
829 dmcu
->funcs
->dmcu_init(dmcu
);
830 abm
->dmcu_is_running
= dmcu
->funcs
->is_dmcu_initialized(dmcu
);
833 adev
->dm
.dc
->ctx
->dmub_srv
= dc_dmub_srv_create(adev
->dm
.dc
, dmub_srv
);
834 if (!adev
->dm
.dc
->ctx
->dmub_srv
) {
835 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
839 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 adev
->dm
.dmcub_fw_version
);
845 static int amdgpu_dm_init(struct amdgpu_device
*adev
)
847 struct dc_init_data init_data
;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 struct dc_callback_init init_params
;
853 adev
->dm
.ddev
= adev
->ddev
;
854 adev
->dm
.adev
= adev
;
856 /* Zero all the fields */
857 memset(&init_data
, 0, sizeof(init_data
));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 memset(&init_params
, 0, sizeof(init_params
));
862 mutex_init(&adev
->dm
.dc_lock
);
863 mutex_init(&adev
->dm
.audio_lock
);
865 if(amdgpu_dm_irq_init(adev
)) {
866 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
870 init_data
.asic_id
.chip_family
= adev
->family
;
872 init_data
.asic_id
.pci_revision_id
= adev
->pdev
->revision
;
873 init_data
.asic_id
.hw_internal_rev
= adev
->external_rev_id
;
875 init_data
.asic_id
.vram_width
= adev
->gmc
.vram_width
;
876 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 init_data
.asic_id
.atombios_base_address
=
878 adev
->mode_info
.atom_context
->bios
;
880 init_data
.driver
= adev
;
882 adev
->dm
.cgs_device
= amdgpu_cgs_create_device(adev
);
884 if (!adev
->dm
.cgs_device
) {
885 DRM_ERROR("amdgpu: failed to create cgs device.\n");
889 init_data
.cgs_device
= adev
->dm
.cgs_device
;
891 init_data
.dce_environment
= DCE_ENV_PRODUCTION_DRV
;
893 switch (adev
->asic_type
) {
898 init_data
.flags
.gpu_vm_support
= true;
904 if (amdgpu_dc_feature_mask
& DC_FBC_MASK
)
905 init_data
.flags
.fbc_support
= true;
907 if (amdgpu_dc_feature_mask
& DC_MULTI_MON_PP_MCLK_SWITCH_MASK
)
908 init_data
.flags
.multi_mon_pp_mclk_switch
= true;
910 if (amdgpu_dc_feature_mask
& DC_DISABLE_FRACTIONAL_PWM_MASK
)
911 init_data
.flags
.disable_fractional_pwm
= true;
913 init_data
.flags
.power_down_display_on_boot
= true;
915 init_data
.soc_bounding_box
= adev
->dm
.soc_bounding_box
;
917 /* Display Core create. */
918 adev
->dm
.dc
= dc_create(&init_data
);
921 DRM_INFO("Display Core initialized with v%s!\n", DC_VER
);
923 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER
);
927 if (amdgpu_dc_debug_mask
& DC_DISABLE_PIPE_SPLIT
) {
928 adev
->dm
.dc
->debug
.force_single_disp_pipe_split
= false;
929 adev
->dm
.dc
->debug
.pipe_split_policy
= MPC_SPLIT_AVOID
;
932 if (adev
->asic_type
!= CHIP_CARRIZO
&& adev
->asic_type
!= CHIP_STONEY
)
933 adev
->dm
.dc
->debug
.disable_stutter
= amdgpu_pp_feature_mask
& PP_STUTTER_MODE
? false : true;
935 if (amdgpu_dc_debug_mask
& DC_DISABLE_STUTTER
)
936 adev
->dm
.dc
->debug
.disable_stutter
= true;
938 if (amdgpu_dc_debug_mask
& DC_DISABLE_DSC
)
939 adev
->dm
.dc
->debug
.disable_dsc
= true;
941 if (amdgpu_dc_debug_mask
& DC_DISABLE_CLOCK_GATING
)
942 adev
->dm
.dc
->debug
.disable_clock_gate
= true;
944 r
= dm_dmub_hw_init(adev
);
946 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
950 dc_hardware_init(adev
->dm
.dc
);
952 adev
->dm
.freesync_module
= mod_freesync_create(adev
->dm
.dc
);
953 if (!adev
->dm
.freesync_module
) {
955 "amdgpu: failed to initialize freesync_module.\n");
957 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 adev
->dm
.freesync_module
);
960 amdgpu_dm_init_color_mod();
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 if (adev
->asic_type
>= CHIP_RAVEN
) {
964 adev
->dm
.hdcp_workqueue
= hdcp_create_workqueue(adev
, &init_params
.cp_psp
, adev
->dm
.dc
);
966 if (!adev
->dm
.hdcp_workqueue
)
967 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
969 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev
->dm
.hdcp_workqueue
);
971 dc_init_callbacks(adev
->dm
.dc
, &init_params
);
974 if (amdgpu_dm_initialize_drm_device(adev
)) {
976 "amdgpu: failed to initialize sw for display support.\n");
980 /* Update the actual used number of crtc */
981 adev
->mode_info
.num_crtc
= adev
->dm
.display_indexes_num
;
983 /* create fake encoders for MST */
984 dm_dp_create_fake_mst_encoders(adev
);
986 /* TODO: Add_display_info? */
988 /* TODO use dynamic cursor width */
989 adev
->ddev
->mode_config
.cursor_width
= adev
->dm
.dc
->caps
.max_cursor_size
;
990 adev
->ddev
->mode_config
.cursor_height
= adev
->dm
.dc
->caps
.max_cursor_size
;
992 if (drm_vblank_init(adev
->ddev
, adev
->dm
.display_indexes_num
)) {
994 "amdgpu: failed to initialize sw for display support.\n");
998 DRM_DEBUG_DRIVER("KMS initialized.\n");
1002 amdgpu_dm_fini(adev
);
1007 static void amdgpu_dm_fini(struct amdgpu_device
*adev
)
1011 for (i
= 0; i
< adev
->dm
.display_indexes_num
; i
++) {
1012 drm_encoder_cleanup(&adev
->dm
.mst_encoders
[i
].base
);
1015 amdgpu_dm_audio_fini(adev
);
1017 amdgpu_dm_destroy_drm_device(&adev
->dm
);
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 if (adev
->dm
.hdcp_workqueue
) {
1021 hdcp_destroy(adev
->dm
.hdcp_workqueue
);
1022 adev
->dm
.hdcp_workqueue
= NULL
;
1026 dc_deinit_callbacks(adev
->dm
.dc
);
1028 if (adev
->dm
.dc
->ctx
->dmub_srv
) {
1029 dc_dmub_srv_destroy(&adev
->dm
.dc
->ctx
->dmub_srv
);
1030 adev
->dm
.dc
->ctx
->dmub_srv
= NULL
;
1033 if (adev
->dm
.dmub_bo
)
1034 amdgpu_bo_free_kernel(&adev
->dm
.dmub_bo
,
1035 &adev
->dm
.dmub_bo_gpu_addr
,
1036 &adev
->dm
.dmub_bo_cpu_addr
);
1038 /* DC Destroy TODO: Replace destroy DAL */
1040 dc_destroy(&adev
->dm
.dc
);
1042 * TODO: pageflip, vlank interrupt
1044 * amdgpu_dm_irq_fini(adev);
1047 if (adev
->dm
.cgs_device
) {
1048 amdgpu_cgs_destroy_device(adev
->dm
.cgs_device
);
1049 adev
->dm
.cgs_device
= NULL
;
1051 if (adev
->dm
.freesync_module
) {
1052 mod_freesync_destroy(adev
->dm
.freesync_module
);
1053 adev
->dm
.freesync_module
= NULL
;
1056 mutex_destroy(&adev
->dm
.audio_lock
);
1057 mutex_destroy(&adev
->dm
.dc_lock
);
1062 static int load_dmcu_fw(struct amdgpu_device
*adev
)
1064 const char *fw_name_dmcu
= NULL
;
1066 const struct dmcu_firmware_header_v1_0
*hdr
;
1068 switch(adev
->asic_type
) {
1078 case CHIP_POLARIS11
:
1079 case CHIP_POLARIS10
:
1080 case CHIP_POLARIS12
:
1088 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1089 case CHIP_SIENNA_CICHLID
:
1090 case CHIP_NAVY_FLOUNDER
:
1094 fw_name_dmcu
= FIRMWARE_NAVI12_DMCU
;
1097 if (ASICREV_IS_PICASSO(adev
->external_rev_id
))
1098 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1099 else if (ASICREV_IS_RAVEN2(adev
->external_rev_id
))
1100 fw_name_dmcu
= FIRMWARE_RAVEN_DMCU
;
1105 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
1109 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
1110 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1114 r
= request_firmware_direct(&adev
->dm
.fw_dmcu
, fw_name_dmcu
, adev
->dev
);
1116 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1117 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1118 adev
->dm
.fw_dmcu
= NULL
;
1122 dev_err(adev
->dev
, "amdgpu_dm: Can't load firmware \"%s\"\n",
1127 r
= amdgpu_ucode_validate(adev
->dm
.fw_dmcu
);
1129 dev_err(adev
->dev
, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1131 release_firmware(adev
->dm
.fw_dmcu
);
1132 adev
->dm
.fw_dmcu
= NULL
;
1136 hdr
= (const struct dmcu_firmware_header_v1_0
*)adev
->dm
.fw_dmcu
->data
;
1137 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].ucode_id
= AMDGPU_UCODE_ID_DMCU_ERAM
;
1138 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_ERAM
].fw
= adev
->dm
.fw_dmcu
;
1139 adev
->firmware
.fw_size
+=
1140 ALIGN(le32_to_cpu(hdr
->header
.ucode_size_bytes
) - le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1142 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].ucode_id
= AMDGPU_UCODE_ID_DMCU_INTV
;
1143 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCU_INTV
].fw
= adev
->dm
.fw_dmcu
;
1144 adev
->firmware
.fw_size
+=
1145 ALIGN(le32_to_cpu(hdr
->intv_size_bytes
), PAGE_SIZE
);
1147 adev
->dm
.dmcu_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1149 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1154 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx
, uint32_t address
)
1156 struct amdgpu_device
*adev
= ctx
;
1158 return dm_read_reg(adev
->dm
.dc
->ctx
, address
);
1161 static void amdgpu_dm_dmub_reg_write(void *ctx
, uint32_t address
,
1164 struct amdgpu_device
*adev
= ctx
;
1166 return dm_write_reg(adev
->dm
.dc
->ctx
, address
, value
);
1169 static int dm_dmub_sw_init(struct amdgpu_device
*adev
)
1171 struct dmub_srv_create_params create_params
;
1172 struct dmub_srv_region_params region_params
;
1173 struct dmub_srv_region_info region_info
;
1174 struct dmub_srv_fb_params fb_params
;
1175 struct dmub_srv_fb_info
*fb_info
;
1176 struct dmub_srv
*dmub_srv
;
1177 const struct dmcub_firmware_header_v1_0
*hdr
;
1178 const char *fw_name_dmub
;
1179 enum dmub_asic dmub_asic
;
1180 enum dmub_status status
;
1183 switch (adev
->asic_type
) {
1185 dmub_asic
= DMUB_ASIC_DCN21
;
1186 fw_name_dmub
= FIRMWARE_RENOIR_DMUB
;
1188 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1189 case CHIP_SIENNA_CICHLID
:
1190 dmub_asic
= DMUB_ASIC_DCN30
;
1191 fw_name_dmub
= FIRMWARE_SIENNA_CICHLID_DMUB
;
1193 case CHIP_NAVY_FLOUNDER
:
1194 dmub_asic
= DMUB_ASIC_DCN30
;
1195 fw_name_dmub
= FIRMWARE_NAVY_FLOUNDER_DMUB
;
1200 /* ASIC doesn't support DMUB. */
1204 r
= request_firmware_direct(&adev
->dm
.dmub_fw
, fw_name_dmub
, adev
->dev
);
1206 DRM_ERROR("DMUB firmware loading failed: %d\n", r
);
1210 r
= amdgpu_ucode_validate(adev
->dm
.dmub_fw
);
1212 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r
);
1216 hdr
= (const struct dmcub_firmware_header_v1_0
*)adev
->dm
.dmub_fw
->data
;
1218 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
1219 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].ucode_id
=
1220 AMDGPU_UCODE_ID_DMCUB
;
1221 adev
->firmware
.ucode
[AMDGPU_UCODE_ID_DMCUB
].fw
=
1223 adev
->firmware
.fw_size
+=
1224 ALIGN(le32_to_cpu(hdr
->inst_const_bytes
), PAGE_SIZE
);
1226 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1227 adev
->dm
.dmcub_fw_version
);
1230 adev
->dm
.dmcub_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
1232 adev
->dm
.dmub_srv
= kzalloc(sizeof(*adev
->dm
.dmub_srv
), GFP_KERNEL
);
1233 dmub_srv
= adev
->dm
.dmub_srv
;
1236 DRM_ERROR("Failed to allocate DMUB service!\n");
1240 memset(&create_params
, 0, sizeof(create_params
));
1241 create_params
.user_ctx
= adev
;
1242 create_params
.funcs
.reg_read
= amdgpu_dm_dmub_reg_read
;
1243 create_params
.funcs
.reg_write
= amdgpu_dm_dmub_reg_write
;
1244 create_params
.asic
= dmub_asic
;
1246 /* Create the DMUB service. */
1247 status
= dmub_srv_create(dmub_srv
, &create_params
);
1248 if (status
!= DMUB_STATUS_OK
) {
1249 DRM_ERROR("Error creating DMUB service: %d\n", status
);
1253 /* Calculate the size of all the regions for the DMUB service. */
1254 memset(®ion_params
, 0, sizeof(region_params
));
1256 region_params
.inst_const_size
= le32_to_cpu(hdr
->inst_const_bytes
) -
1257 PSP_HEADER_BYTES
- PSP_FOOTER_BYTES
;
1258 region_params
.bss_data_size
= le32_to_cpu(hdr
->bss_data_bytes
);
1259 region_params
.vbios_size
= adev
->bios_size
;
1260 region_params
.fw_bss_data
= region_params
.bss_data_size
?
1261 adev
->dm
.dmub_fw
->data
+
1262 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
1263 le32_to_cpu(hdr
->inst_const_bytes
) : NULL
;
1264 region_params
.fw_inst_const
=
1265 adev
->dm
.dmub_fw
->data
+
1266 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
) +
1269 status
= dmub_srv_calc_region_info(dmub_srv
, ®ion_params
,
1272 if (status
!= DMUB_STATUS_OK
) {
1273 DRM_ERROR("Error calculating DMUB region info: %d\n", status
);
1278 * Allocate a framebuffer based on the total size of all the regions.
1279 * TODO: Move this into GART.
1281 r
= amdgpu_bo_create_kernel(adev
, region_info
.fb_size
, PAGE_SIZE
,
1282 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->dm
.dmub_bo
,
1283 &adev
->dm
.dmub_bo_gpu_addr
,
1284 &adev
->dm
.dmub_bo_cpu_addr
);
1288 /* Rebase the regions on the framebuffer address. */
1289 memset(&fb_params
, 0, sizeof(fb_params
));
1290 fb_params
.cpu_addr
= adev
->dm
.dmub_bo_cpu_addr
;
1291 fb_params
.gpu_addr
= adev
->dm
.dmub_bo_gpu_addr
;
1292 fb_params
.region_info
= ®ion_info
;
1294 adev
->dm
.dmub_fb_info
=
1295 kzalloc(sizeof(*adev
->dm
.dmub_fb_info
), GFP_KERNEL
);
1296 fb_info
= adev
->dm
.dmub_fb_info
;
1300 "Failed to allocate framebuffer info for DMUB service!\n");
1304 status
= dmub_srv_calc_fb_info(dmub_srv
, &fb_params
, fb_info
);
1305 if (status
!= DMUB_STATUS_OK
) {
1306 DRM_ERROR("Error calculating DMUB FB info: %d\n", status
);
1313 static int dm_sw_init(void *handle
)
1315 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1318 r
= dm_dmub_sw_init(adev
);
1322 return load_dmcu_fw(adev
);
1325 static int dm_sw_fini(void *handle
)
1327 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1329 kfree(adev
->dm
.dmub_fb_info
);
1330 adev
->dm
.dmub_fb_info
= NULL
;
1332 if (adev
->dm
.dmub_srv
) {
1333 dmub_srv_destroy(adev
->dm
.dmub_srv
);
1334 adev
->dm
.dmub_srv
= NULL
;
1337 release_firmware(adev
->dm
.dmub_fw
);
1338 adev
->dm
.dmub_fw
= NULL
;
1340 release_firmware(adev
->dm
.fw_dmcu
);
1341 adev
->dm
.fw_dmcu
= NULL
;
1346 static int detect_mst_link_for_all_connectors(struct drm_device
*dev
)
1348 struct amdgpu_dm_connector
*aconnector
;
1349 struct drm_connector
*connector
;
1350 struct drm_connector_list_iter iter
;
1353 drm_connector_list_iter_begin(dev
, &iter
);
1354 drm_for_each_connector_iter(connector
, &iter
) {
1355 aconnector
= to_amdgpu_dm_connector(connector
);
1356 if (aconnector
->dc_link
->type
== dc_connection_mst_branch
&&
1357 aconnector
->mst_mgr
.aux
) {
1358 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1360 aconnector
->base
.base
.id
);
1362 ret
= drm_dp_mst_topology_mgr_set_mst(&aconnector
->mst_mgr
, true);
1364 DRM_ERROR("DM_MST: Failed to start MST\n");
1365 aconnector
->dc_link
->type
=
1366 dc_connection_single
;
1371 drm_connector_list_iter_end(&iter
);
1376 static int dm_late_init(void *handle
)
1378 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1380 struct dmcu_iram_parameters params
;
1381 unsigned int linear_lut
[16];
1383 struct dmcu
*dmcu
= NULL
;
1386 if (!adev
->dm
.fw_dmcu
&& !adev
->dm
.dmub_fw
)
1387 return detect_mst_link_for_all_connectors(adev
->ddev
);
1389 dmcu
= adev
->dm
.dc
->res_pool
->dmcu
;
1391 for (i
= 0; i
< 16; i
++)
1392 linear_lut
[i
] = 0xFFFF * i
/ 15;
1395 params
.backlight_ramping_start
= 0xCCCC;
1396 params
.backlight_ramping_reduction
= 0xCCCCCCCC;
1397 params
.backlight_lut_array_size
= 16;
1398 params
.backlight_lut_array
= linear_lut
;
1400 /* Min backlight level after ABM reduction, Don't allow below 1%
1401 * 0xFFFF x 0.01 = 0x28F
1403 params
.min_abm_backlight
= 0x28F;
1405 /* In the case where abm is implemented on dmcub,
1406 * dmcu object will be null.
1407 * ABM 2.4 and up are implemented on dmcub.
1410 ret
= dmcu_load_iram(dmcu
, params
);
1411 else if (adev
->dm
.dc
->ctx
->dmub_srv
)
1412 ret
= dmub_init_abm_config(adev
->dm
.dc
->res_pool
->abm
, params
);
1417 return detect_mst_link_for_all_connectors(adev
->ddev
);
1420 static void s3_handle_mst(struct drm_device
*dev
, bool suspend
)
1422 struct amdgpu_dm_connector
*aconnector
;
1423 struct drm_connector
*connector
;
1424 struct drm_connector_list_iter iter
;
1425 struct drm_dp_mst_topology_mgr
*mgr
;
1427 bool need_hotplug
= false;
1429 drm_connector_list_iter_begin(dev
, &iter
);
1430 drm_for_each_connector_iter(connector
, &iter
) {
1431 aconnector
= to_amdgpu_dm_connector(connector
);
1432 if (aconnector
->dc_link
->type
!= dc_connection_mst_branch
||
1433 aconnector
->mst_port
)
1436 mgr
= &aconnector
->mst_mgr
;
1439 drm_dp_mst_topology_mgr_suspend(mgr
);
1441 ret
= drm_dp_mst_topology_mgr_resume(mgr
, true);
1443 drm_dp_mst_topology_mgr_set_mst(mgr
, false);
1444 need_hotplug
= true;
1448 drm_connector_list_iter_end(&iter
);
1451 drm_kms_helper_hotplug_event(dev
);
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device
*adev
)
1456 struct smu_context
*smu
= &adev
->smu
;
1459 if (!is_support_sw_smu(adev
))
1462 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 * on window driver dc implementation.
1464 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 * should be passed to smu during boot up and resume from s3.
1466 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 * dcn20_resource_construct
1468 * then call pplib functions below to pass the settings to smu:
1469 * smu_set_watermarks_for_clock_ranges
1470 * smu_set_watermarks_table
1471 * navi10_set_watermarks_table
1472 * smu_write_watermarks_table
1474 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 * dc has implemented different flow for window driver:
1476 * dc_hardware_init / dc_set_power_state
1481 * smu_set_watermarks_for_clock_ranges
1482 * renoir_set_watermarks_table
1483 * smu_write_watermarks_table
1486 * dc_hardware_init -> amdgpu_dm_init
1487 * dc_set_power_state --> dm_resume
1489 * therefore, this function apply to navi10/12/14 but not Renoir
1492 switch(adev
->asic_type
) {
1501 ret
= smu_write_watermarks_table(smu
);
1503 DRM_ERROR("Failed to update WMTABLE!\n");
1511 * dm_hw_init() - Initialize DC device
1512 * @handle: The base driver device containing the amdgpu_dm device.
1514 * Initialize the &struct amdgpu_display_manager device. This involves calling
1515 * the initializers of each DM component, then populating the struct with them.
1517 * Although the function implies hardware initialization, both hardware and
1518 * software are initialized here. Splitting them out to their relevant init
1519 * hooks is a future TODO item.
1521 * Some notable things that are initialized here:
1523 * - Display Core, both software and hardware
1524 * - DC modules that we need (freesync and color management)
1525 * - DRM software states
1526 * - Interrupt sources and handlers
1528 * - Debug FS entries, if enabled
1530 static int dm_hw_init(void *handle
)
1532 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1533 /* Create DAL display manager */
1534 amdgpu_dm_init(adev
);
1535 amdgpu_dm_hpd_init(adev
);
1541 * dm_hw_fini() - Teardown DC device
1542 * @handle: The base driver device containing the amdgpu_dm device.
1544 * Teardown components within &struct amdgpu_display_manager that require
1545 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1546 * were loaded. Also flush IRQ workqueues and disable them.
1548 static int dm_hw_fini(void *handle
)
1550 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1552 amdgpu_dm_hpd_fini(adev
);
1554 amdgpu_dm_irq_fini(adev
);
1555 amdgpu_dm_fini(adev
);
1560 static int dm_enable_vblank(struct drm_crtc
*crtc
);
1561 static void dm_disable_vblank(struct drm_crtc
*crtc
);
1563 static void dm_gpureset_toggle_interrupts(struct amdgpu_device
*adev
,
1564 struct dc_state
*state
, bool enable
)
1566 enum dc_irq_source irq_source
;
1567 struct amdgpu_crtc
*acrtc
;
1571 for (i
= 0; i
< state
->stream_count
; i
++) {
1572 acrtc
= get_crtc_by_otg_inst(
1573 adev
, state
->stream_status
[i
].primary_otg_inst
);
1575 if (acrtc
&& state
->stream_status
[i
].plane_count
!= 0) {
1576 irq_source
= IRQ_TYPE_PFLIP
+ acrtc
->otg_inst
;
1577 rc
= dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
1578 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1579 acrtc
->crtc_id
, enable
? "en" : "dis", rc
);
1581 DRM_WARN("Failed to %s pflip interrupts\n",
1582 enable
? "enable" : "disable");
1585 rc
= dm_enable_vblank(&acrtc
->base
);
1587 DRM_WARN("Failed to enable vblank interrupts\n");
1589 dm_disable_vblank(&acrtc
->base
);
1597 static enum dc_status
amdgpu_dm_commit_zero_streams(struct dc
*dc
)
1599 struct dc_state
*context
= NULL
;
1600 enum dc_status res
= DC_ERROR_UNEXPECTED
;
1602 struct dc_stream_state
*del_streams
[MAX_PIPES
];
1603 int del_streams_count
= 0;
1605 memset(del_streams
, 0, sizeof(del_streams
));
1607 context
= dc_create_state(dc
);
1608 if (context
== NULL
)
1609 goto context_alloc_fail
;
1611 dc_resource_state_copy_construct_current(dc
, context
);
1613 /* First remove from context all streams */
1614 for (i
= 0; i
< context
->stream_count
; i
++) {
1615 struct dc_stream_state
*stream
= context
->streams
[i
];
1617 del_streams
[del_streams_count
++] = stream
;
1620 /* Remove all planes for removed streams and then remove the streams */
1621 for (i
= 0; i
< del_streams_count
; i
++) {
1622 if (!dc_rem_all_planes_for_stream(dc
, del_streams
[i
], context
)) {
1623 res
= DC_FAIL_DETACH_SURFACES
;
1627 res
= dc_remove_stream_from_ctx(dc
, context
, del_streams
[i
]);
1633 res
= dc_validate_global_state(dc
, context
, false);
1636 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__
, res
);
1640 res
= dc_commit_state(dc
, context
);
1643 dc_release_state(context
);
1649 static int dm_suspend(void *handle
)
1651 struct amdgpu_device
*adev
= handle
;
1652 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1655 if (adev
->in_gpu_reset
) {
1656 mutex_lock(&dm
->dc_lock
);
1657 dm
->cached_dc_state
= dc_copy_state(dm
->dc
->current_state
);
1659 dm_gpureset_toggle_interrupts(adev
, dm
->cached_dc_state
, false);
1661 amdgpu_dm_commit_zero_streams(dm
->dc
);
1663 amdgpu_dm_irq_suspend(adev
);
1668 WARN_ON(adev
->dm
.cached_state
);
1669 adev
->dm
.cached_state
= drm_atomic_helper_suspend(adev
->ddev
);
1671 s3_handle_mst(adev
->ddev
, true);
1673 amdgpu_dm_irq_suspend(adev
);
1676 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D3
);
1681 static struct amdgpu_dm_connector
*
1682 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state
*state
,
1683 struct drm_crtc
*crtc
)
1686 struct drm_connector_state
*new_con_state
;
1687 struct drm_connector
*connector
;
1688 struct drm_crtc
*crtc_from_state
;
1690 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
1691 crtc_from_state
= new_con_state
->crtc
;
1693 if (crtc_from_state
== crtc
)
1694 return to_amdgpu_dm_connector(connector
);
1700 static void emulated_link_detect(struct dc_link
*link
)
1702 struct dc_sink_init_data sink_init_data
= { 0 };
1703 struct display_sink_capability sink_caps
= { 0 };
1704 enum dc_edid_status edid_status
;
1705 struct dc_context
*dc_ctx
= link
->ctx
;
1706 struct dc_sink
*sink
= NULL
;
1707 struct dc_sink
*prev_sink
= NULL
;
1709 link
->type
= dc_connection_none
;
1710 prev_sink
= link
->local_sink
;
1712 if (prev_sink
!= NULL
)
1713 dc_sink_retain(prev_sink
);
1715 switch (link
->connector_signal
) {
1716 case SIGNAL_TYPE_HDMI_TYPE_A
: {
1717 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1718 sink_caps
.signal
= SIGNAL_TYPE_HDMI_TYPE_A
;
1722 case SIGNAL_TYPE_DVI_SINGLE_LINK
: {
1723 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1724 sink_caps
.signal
= SIGNAL_TYPE_DVI_SINGLE_LINK
;
1728 case SIGNAL_TYPE_DVI_DUAL_LINK
: {
1729 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1730 sink_caps
.signal
= SIGNAL_TYPE_DVI_DUAL_LINK
;
1734 case SIGNAL_TYPE_LVDS
: {
1735 sink_caps
.transaction_type
= DDC_TRANSACTION_TYPE_I2C
;
1736 sink_caps
.signal
= SIGNAL_TYPE_LVDS
;
1740 case SIGNAL_TYPE_EDP
: {
1741 sink_caps
.transaction_type
=
1742 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1743 sink_caps
.signal
= SIGNAL_TYPE_EDP
;
1747 case SIGNAL_TYPE_DISPLAY_PORT
: {
1748 sink_caps
.transaction_type
=
1749 DDC_TRANSACTION_TYPE_I2C_OVER_AUX
;
1750 sink_caps
.signal
= SIGNAL_TYPE_VIRTUAL
;
1755 DC_ERROR("Invalid connector type! signal:%d\n",
1756 link
->connector_signal
);
1760 sink_init_data
.link
= link
;
1761 sink_init_data
.sink_signal
= sink_caps
.signal
;
1763 sink
= dc_sink_create(&sink_init_data
);
1765 DC_ERROR("Failed to create sink!\n");
1769 /* dc_sink_create returns a new reference */
1770 link
->local_sink
= sink
;
1772 edid_status
= dm_helpers_read_local_edid(
1777 if (edid_status
!= EDID_OK
)
1778 DC_ERROR("Failed to read EDID");
1782 static void dm_gpureset_commit_state(struct dc_state
*dc_state
,
1783 struct amdgpu_display_manager
*dm
)
1786 struct dc_surface_update surface_updates
[MAX_SURFACES
];
1787 struct dc_plane_info plane_infos
[MAX_SURFACES
];
1788 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
1789 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
1790 struct dc_stream_update stream_update
;
1794 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
1797 dm_error("Failed to allocate update bundle\n");
1801 for (k
= 0; k
< dc_state
->stream_count
; k
++) {
1802 bundle
->stream_update
.stream
= dc_state
->streams
[k
];
1804 for (m
= 0; m
< dc_state
->stream_status
->plane_count
; m
++) {
1805 bundle
->surface_updates
[m
].surface
=
1806 dc_state
->stream_status
->plane_states
[m
];
1807 bundle
->surface_updates
[m
].surface
->force_full_update
=
1810 dc_commit_updates_for_stream(
1811 dm
->dc
, bundle
->surface_updates
,
1812 dc_state
->stream_status
->plane_count
,
1813 dc_state
->streams
[k
], &bundle
->stream_update
, dc_state
);
1822 static int dm_resume(void *handle
)
1824 struct amdgpu_device
*adev
= handle
;
1825 struct drm_device
*ddev
= adev
->ddev
;
1826 struct amdgpu_display_manager
*dm
= &adev
->dm
;
1827 struct amdgpu_dm_connector
*aconnector
;
1828 struct drm_connector
*connector
;
1829 struct drm_connector_list_iter iter
;
1830 struct drm_crtc
*crtc
;
1831 struct drm_crtc_state
*new_crtc_state
;
1832 struct dm_crtc_state
*dm_new_crtc_state
;
1833 struct drm_plane
*plane
;
1834 struct drm_plane_state
*new_plane_state
;
1835 struct dm_plane_state
*dm_new_plane_state
;
1836 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(dm
->atomic_obj
.state
);
1837 enum dc_connection_type new_connection_type
= dc_connection_none
;
1838 struct dc_state
*dc_state
;
1841 if (adev
->in_gpu_reset
) {
1842 dc_state
= dm
->cached_dc_state
;
1844 r
= dm_dmub_hw_init(adev
);
1846 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
1848 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
1851 amdgpu_dm_irq_resume_early(adev
);
1853 for (i
= 0; i
< dc_state
->stream_count
; i
++) {
1854 dc_state
->streams
[i
]->mode_changed
= true;
1855 for (j
= 0; j
< dc_state
->stream_status
->plane_count
; j
++) {
1856 dc_state
->stream_status
->plane_states
[j
]->update_flags
.raw
1861 WARN_ON(!dc_commit_state(dm
->dc
, dc_state
));
1863 dm_gpureset_commit_state(dm
->cached_dc_state
, dm
);
1865 dm_gpureset_toggle_interrupts(adev
, dm
->cached_dc_state
, true);
1867 dc_release_state(dm
->cached_dc_state
);
1868 dm
->cached_dc_state
= NULL
;
1870 amdgpu_dm_irq_resume_late(adev
);
1872 mutex_unlock(&dm
->dc_lock
);
1876 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1877 dc_release_state(dm_state
->context
);
1878 dm_state
->context
= dc_create_state(dm
->dc
);
1879 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1880 dc_resource_state_construct(dm
->dc
, dm_state
->context
);
1882 /* Before powering on DC we need to re-initialize DMUB. */
1883 r
= dm_dmub_hw_init(adev
);
1885 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r
);
1887 /* power on hardware */
1888 dc_set_power_state(dm
->dc
, DC_ACPI_CM_POWER_STATE_D0
);
1890 /* program HPD filter */
1894 * early enable HPD Rx IRQ, should be done before set mode as short
1895 * pulse interrupts are used for MST
1897 amdgpu_dm_irq_resume_early(adev
);
1899 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1900 s3_handle_mst(ddev
, false);
1903 drm_connector_list_iter_begin(ddev
, &iter
);
1904 drm_for_each_connector_iter(connector
, &iter
) {
1905 aconnector
= to_amdgpu_dm_connector(connector
);
1908 * this is the case when traversing through already created
1909 * MST connectors, should be skipped
1911 if (aconnector
->mst_port
)
1914 mutex_lock(&aconnector
->hpd_lock
);
1915 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
1916 DRM_ERROR("KMS: Failed to detect connector\n");
1918 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
)
1919 emulated_link_detect(aconnector
->dc_link
);
1921 dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
);
1923 if (aconnector
->fake_enable
&& aconnector
->dc_link
->local_sink
)
1924 aconnector
->fake_enable
= false;
1926 if (aconnector
->dc_sink
)
1927 dc_sink_release(aconnector
->dc_sink
);
1928 aconnector
->dc_sink
= NULL
;
1929 amdgpu_dm_update_connector_after_detect(aconnector
);
1930 mutex_unlock(&aconnector
->hpd_lock
);
1932 drm_connector_list_iter_end(&iter
);
1934 /* Force mode set in atomic commit */
1935 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
)
1936 new_crtc_state
->active_changed
= true;
1939 * atomic_check is expected to create the dc states. We need to release
1940 * them here, since they were duplicated as part of the suspend
1943 for_each_new_crtc_in_state(dm
->cached_state
, crtc
, new_crtc_state
, i
) {
1944 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
1945 if (dm_new_crtc_state
->stream
) {
1946 WARN_ON(kref_read(&dm_new_crtc_state
->stream
->refcount
) > 1);
1947 dc_stream_release(dm_new_crtc_state
->stream
);
1948 dm_new_crtc_state
->stream
= NULL
;
1952 for_each_new_plane_in_state(dm
->cached_state
, plane
, new_plane_state
, i
) {
1953 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
1954 if (dm_new_plane_state
->dc_state
) {
1955 WARN_ON(kref_read(&dm_new_plane_state
->dc_state
->refcount
) > 1);
1956 dc_plane_state_release(dm_new_plane_state
->dc_state
);
1957 dm_new_plane_state
->dc_state
= NULL
;
1961 drm_atomic_helper_resume(ddev
, dm
->cached_state
);
1963 dm
->cached_state
= NULL
;
1965 amdgpu_dm_irq_resume_late(adev
);
1967 amdgpu_dm_smu_write_watermarks_table(adev
);
1975 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1976 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1977 * the base driver's device list to be initialized and torn down accordingly.
1979 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1982 static const struct amd_ip_funcs amdgpu_dm_funcs
= {
1984 .early_init
= dm_early_init
,
1985 .late_init
= dm_late_init
,
1986 .sw_init
= dm_sw_init
,
1987 .sw_fini
= dm_sw_fini
,
1988 .hw_init
= dm_hw_init
,
1989 .hw_fini
= dm_hw_fini
,
1990 .suspend
= dm_suspend
,
1991 .resume
= dm_resume
,
1992 .is_idle
= dm_is_idle
,
1993 .wait_for_idle
= dm_wait_for_idle
,
1994 .check_soft_reset
= dm_check_soft_reset
,
1995 .soft_reset
= dm_soft_reset
,
1996 .set_clockgating_state
= dm_set_clockgating_state
,
1997 .set_powergating_state
= dm_set_powergating_state
,
2000 const struct amdgpu_ip_block_version dm_ip_block
=
2002 .type
= AMD_IP_BLOCK_TYPE_DCE
,
2006 .funcs
= &amdgpu_dm_funcs
,
2016 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs
= {
2017 .fb_create
= amdgpu_display_user_framebuffer_create
,
2018 .output_poll_changed
= drm_fb_helper_output_poll_changed
,
2019 .atomic_check
= amdgpu_dm_atomic_check
,
2020 .atomic_commit
= amdgpu_dm_atomic_commit
,
2023 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs
= {
2024 .atomic_commit_tail
= amdgpu_dm_atomic_commit_tail
2027 static void update_connector_ext_caps(struct amdgpu_dm_connector
*aconnector
)
2029 u32 max_cll
, min_cll
, max
, min
, q
, r
;
2030 struct amdgpu_dm_backlight_caps
*caps
;
2031 struct amdgpu_display_manager
*dm
;
2032 struct drm_connector
*conn_base
;
2033 struct amdgpu_device
*adev
;
2034 struct dc_link
*link
= NULL
;
2035 static const u8 pre_computed_values
[] = {
2036 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2037 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2039 if (!aconnector
|| !aconnector
->dc_link
)
2042 link
= aconnector
->dc_link
;
2043 if (link
->connector_signal
!= SIGNAL_TYPE_EDP
)
2046 conn_base
= &aconnector
->base
;
2047 adev
= conn_base
->dev
->dev_private
;
2049 caps
= &dm
->backlight_caps
;
2050 caps
->ext_caps
= &aconnector
->dc_link
->dpcd_sink_ext_caps
;
2051 caps
->aux_support
= false;
2052 max_cll
= conn_base
->hdr_sink_metadata
.hdmi_type1
.max_cll
;
2053 min_cll
= conn_base
->hdr_sink_metadata
.hdmi_type1
.min_cll
;
2055 if (caps
->ext_caps
->bits
.oled
== 1 ||
2056 caps
->ext_caps
->bits
.sdr_aux_backlight_control
== 1 ||
2057 caps
->ext_caps
->bits
.hdr_aux_backlight_control
== 1)
2058 caps
->aux_support
= true;
2060 /* From the specification (CTA-861-G), for calculating the maximum
2061 * luminance we need to use:
2062 * Luminance = 50*2**(CV/32)
2063 * Where CV is a one-byte value.
2064 * For calculating this expression we may need float point precision;
2065 * to avoid this complexity level, we take advantage that CV is divided
2066 * by a constant. From the Euclids division algorithm, we know that CV
2067 * can be written as: CV = 32*q + r. Next, we replace CV in the
2068 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2069 * need to pre-compute the value of r/32. For pre-computing the values
2070 * We just used the following Ruby line:
2071 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2072 * The results of the above expressions can be verified at
2073 * pre_computed_values.
2077 max
= (1 << q
) * pre_computed_values
[r
];
2079 // min luminance: maxLum * (CV/255)^2 / 100
2080 q
= DIV_ROUND_CLOSEST(min_cll
, 255);
2081 min
= max
* DIV_ROUND_CLOSEST((q
* q
), 100);
2083 caps
->aux_max_input_signal
= max
;
2084 caps
->aux_min_input_signal
= min
;
2087 void amdgpu_dm_update_connector_after_detect(
2088 struct amdgpu_dm_connector
*aconnector
)
2090 struct drm_connector
*connector
= &aconnector
->base
;
2091 struct drm_device
*dev
= connector
->dev
;
2092 struct dc_sink
*sink
;
2094 /* MST handled by drm_mst framework */
2095 if (aconnector
->mst_mgr
.mst_state
== true)
2099 sink
= aconnector
->dc_link
->local_sink
;
2101 dc_sink_retain(sink
);
2104 * Edid mgmt connector gets first update only in mode_valid hook and then
2105 * the connector sink is set to either fake or physical sink depends on link status.
2106 * Skip if already done during boot.
2108 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
2109 && aconnector
->dc_em_sink
) {
2112 * For S3 resume with headless use eml_sink to fake stream
2113 * because on resume connector->sink is set to NULL
2115 mutex_lock(&dev
->mode_config
.mutex
);
2118 if (aconnector
->dc_sink
) {
2119 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2121 * retain and release below are used to
2122 * bump up refcount for sink because the link doesn't point
2123 * to it anymore after disconnect, so on next crtc to connector
2124 * reshuffle by UMD we will get into unwanted dc_sink release
2126 dc_sink_release(aconnector
->dc_sink
);
2128 aconnector
->dc_sink
= sink
;
2129 dc_sink_retain(aconnector
->dc_sink
);
2130 amdgpu_dm_update_freesync_caps(connector
,
2133 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2134 if (!aconnector
->dc_sink
) {
2135 aconnector
->dc_sink
= aconnector
->dc_em_sink
;
2136 dc_sink_retain(aconnector
->dc_sink
);
2140 mutex_unlock(&dev
->mode_config
.mutex
);
2143 dc_sink_release(sink
);
2148 * TODO: temporary guard to look for proper fix
2149 * if this sink is MST sink, we should not do anything
2151 if (sink
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
2152 dc_sink_release(sink
);
2156 if (aconnector
->dc_sink
== sink
) {
2158 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2161 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2162 aconnector
->connector_id
);
2164 dc_sink_release(sink
);
2168 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2169 aconnector
->connector_id
, aconnector
->dc_sink
, sink
);
2171 mutex_lock(&dev
->mode_config
.mutex
);
2174 * 1. Update status of the drm connector
2175 * 2. Send an event and let userspace tell us what to do
2179 * TODO: check if we still need the S3 mode update workaround.
2180 * If yes, put it here.
2182 if (aconnector
->dc_sink
)
2183 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2185 aconnector
->dc_sink
= sink
;
2186 dc_sink_retain(aconnector
->dc_sink
);
2187 if (sink
->dc_edid
.length
== 0) {
2188 aconnector
->edid
= NULL
;
2189 if (aconnector
->dc_link
->aux_mode
) {
2190 drm_dp_cec_unset_edid(
2191 &aconnector
->dm_dp_aux
.aux
);
2195 (struct edid
*)sink
->dc_edid
.raw_edid
;
2197 drm_connector_update_edid_property(connector
,
2199 drm_add_edid_modes(connector
, aconnector
->edid
);
2201 if (aconnector
->dc_link
->aux_mode
)
2202 drm_dp_cec_set_edid(&aconnector
->dm_dp_aux
.aux
,
2206 amdgpu_dm_update_freesync_caps(connector
, aconnector
->edid
);
2207 update_connector_ext_caps(aconnector
);
2209 drm_dp_cec_unset_edid(&aconnector
->dm_dp_aux
.aux
);
2210 amdgpu_dm_update_freesync_caps(connector
, NULL
);
2211 drm_connector_update_edid_property(connector
, NULL
);
2212 aconnector
->num_modes
= 0;
2213 dc_sink_release(aconnector
->dc_sink
);
2214 aconnector
->dc_sink
= NULL
;
2215 aconnector
->edid
= NULL
;
2216 #ifdef CONFIG_DRM_AMD_DC_HDCP
2217 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2218 if (connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
2219 connector
->state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
2223 mutex_unlock(&dev
->mode_config
.mutex
);
2226 dc_sink_release(sink
);
2229 static void handle_hpd_irq(void *param
)
2231 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2232 struct drm_connector
*connector
= &aconnector
->base
;
2233 struct drm_device
*dev
= connector
->dev
;
2234 enum dc_connection_type new_connection_type
= dc_connection_none
;
2235 #ifdef CONFIG_DRM_AMD_DC_HDCP
2236 struct amdgpu_device
*adev
= dev
->dev_private
;
2240 * In case of failure or MST no need to update connector status or notify the OS
2241 * since (for MST case) MST does this in its own context.
2243 mutex_lock(&aconnector
->hpd_lock
);
2245 #ifdef CONFIG_DRM_AMD_DC_HDCP
2246 if (adev
->dm
.hdcp_workqueue
)
2247 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
2249 if (aconnector
->fake_enable
)
2250 aconnector
->fake_enable
= false;
2252 if (!dc_link_detect_sink(aconnector
->dc_link
, &new_connection_type
))
2253 DRM_ERROR("KMS: Failed to detect connector\n");
2255 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2256 emulated_link_detect(aconnector
->dc_link
);
2259 drm_modeset_lock_all(dev
);
2260 dm_restore_drm_connector_state(dev
, connector
);
2261 drm_modeset_unlock_all(dev
);
2263 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2264 drm_kms_helper_hotplug_event(dev
);
2266 } else if (dc_link_detect(aconnector
->dc_link
, DETECT_REASON_HPD
)) {
2267 amdgpu_dm_update_connector_after_detect(aconnector
);
2270 drm_modeset_lock_all(dev
);
2271 dm_restore_drm_connector_state(dev
, connector
);
2272 drm_modeset_unlock_all(dev
);
2274 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
)
2275 drm_kms_helper_hotplug_event(dev
);
2277 mutex_unlock(&aconnector
->hpd_lock
);
2281 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector
*aconnector
)
2283 uint8_t esi
[DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
] = { 0 };
2285 bool new_irq_handled
= false;
2287 int dpcd_bytes_to_read
;
2289 const int max_process_count
= 30;
2290 int process_count
= 0;
2292 const struct dc_link_status
*link_status
= dc_link_get_status(aconnector
->dc_link
);
2294 if (link_status
->dpcd_caps
->dpcd_rev
.raw
< 0x12) {
2295 dpcd_bytes_to_read
= DP_LANE0_1_STATUS
- DP_SINK_COUNT
;
2296 /* DPCD 0x200 - 0x201 for downstream IRQ */
2297 dpcd_addr
= DP_SINK_COUNT
;
2299 dpcd_bytes_to_read
= DP_PSR_ERROR_STATUS
- DP_SINK_COUNT_ESI
;
2300 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2301 dpcd_addr
= DP_SINK_COUNT_ESI
;
2304 dret
= drm_dp_dpcd_read(
2305 &aconnector
->dm_dp_aux
.aux
,
2308 dpcd_bytes_to_read
);
2310 while (dret
== dpcd_bytes_to_read
&&
2311 process_count
< max_process_count
) {
2317 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi
[0], esi
[1], esi
[2]);
2318 /* handle HPD short pulse irq */
2319 if (aconnector
->mst_mgr
.mst_state
)
2321 &aconnector
->mst_mgr
,
2325 if (new_irq_handled
) {
2326 /* ACK at DPCD to notify down stream */
2327 const int ack_dpcd_bytes_to_write
=
2328 dpcd_bytes_to_read
- 1;
2330 for (retry
= 0; retry
< 3; retry
++) {
2333 wret
= drm_dp_dpcd_write(
2334 &aconnector
->dm_dp_aux
.aux
,
2337 ack_dpcd_bytes_to_write
);
2338 if (wret
== ack_dpcd_bytes_to_write
)
2342 /* check if there is new irq to be handled */
2343 dret
= drm_dp_dpcd_read(
2344 &aconnector
->dm_dp_aux
.aux
,
2347 dpcd_bytes_to_read
);
2349 new_irq_handled
= false;
2355 if (process_count
== max_process_count
)
2356 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2359 static void handle_hpd_rx_irq(void *param
)
2361 struct amdgpu_dm_connector
*aconnector
= (struct amdgpu_dm_connector
*)param
;
2362 struct drm_connector
*connector
= &aconnector
->base
;
2363 struct drm_device
*dev
= connector
->dev
;
2364 struct dc_link
*dc_link
= aconnector
->dc_link
;
2365 bool is_mst_root_connector
= aconnector
->mst_mgr
.mst_state
;
2366 enum dc_connection_type new_connection_type
= dc_connection_none
;
2367 #ifdef CONFIG_DRM_AMD_DC_HDCP
2368 union hpd_irq_data hpd_irq_data
;
2369 struct amdgpu_device
*adev
= dev
->dev_private
;
2371 memset(&hpd_irq_data
, 0, sizeof(hpd_irq_data
));
2375 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2376 * conflict, after implement i2c helper, this mutex should be
2379 if (dc_link
->type
!= dc_connection_mst_branch
)
2380 mutex_lock(&aconnector
->hpd_lock
);
2383 #ifdef CONFIG_DRM_AMD_DC_HDCP
2384 if (dc_link_handle_hpd_rx_irq(dc_link
, &hpd_irq_data
, NULL
) &&
2386 if (dc_link_handle_hpd_rx_irq(dc_link
, NULL
, NULL
) &&
2388 !is_mst_root_connector
) {
2389 /* Downstream Port status changed. */
2390 if (!dc_link_detect_sink(dc_link
, &new_connection_type
))
2391 DRM_ERROR("KMS: Failed to detect connector\n");
2393 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
2394 emulated_link_detect(dc_link
);
2396 if (aconnector
->fake_enable
)
2397 aconnector
->fake_enable
= false;
2399 amdgpu_dm_update_connector_after_detect(aconnector
);
2402 drm_modeset_lock_all(dev
);
2403 dm_restore_drm_connector_state(dev
, connector
);
2404 drm_modeset_unlock_all(dev
);
2406 drm_kms_helper_hotplug_event(dev
);
2407 } else if (dc_link_detect(dc_link
, DETECT_REASON_HPDRX
)) {
2409 if (aconnector
->fake_enable
)
2410 aconnector
->fake_enable
= false;
2412 amdgpu_dm_update_connector_after_detect(aconnector
);
2415 drm_modeset_lock_all(dev
);
2416 dm_restore_drm_connector_state(dev
, connector
);
2417 drm_modeset_unlock_all(dev
);
2419 drm_kms_helper_hotplug_event(dev
);
2422 #ifdef CONFIG_DRM_AMD_DC_HDCP
2423 if (hpd_irq_data
.bytes
.device_service_irq
.bits
.CP_IRQ
) {
2424 if (adev
->dm
.hdcp_workqueue
)
2425 hdcp_handle_cpirq(adev
->dm
.hdcp_workqueue
, aconnector
->base
.index
);
2428 if ((dc_link
->cur_link_settings
.lane_count
!= LANE_COUNT_UNKNOWN
) ||
2429 (dc_link
->type
== dc_connection_mst_branch
))
2430 dm_handle_hpd_rx_irq(aconnector
);
2432 if (dc_link
->type
!= dc_connection_mst_branch
) {
2433 drm_dp_cec_irq(&aconnector
->dm_dp_aux
.aux
);
2434 mutex_unlock(&aconnector
->hpd_lock
);
2438 static void register_hpd_handlers(struct amdgpu_device
*adev
)
2440 struct drm_device
*dev
= adev
->ddev
;
2441 struct drm_connector
*connector
;
2442 struct amdgpu_dm_connector
*aconnector
;
2443 const struct dc_link
*dc_link
;
2444 struct dc_interrupt_params int_params
= {0};
2446 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2447 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2449 list_for_each_entry(connector
,
2450 &dev
->mode_config
.connector_list
, head
) {
2452 aconnector
= to_amdgpu_dm_connector(connector
);
2453 dc_link
= aconnector
->dc_link
;
2455 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd
) {
2456 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2457 int_params
.irq_source
= dc_link
->irq_source_hpd
;
2459 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2461 (void *) aconnector
);
2464 if (DC_IRQ_SOURCE_INVALID
!= dc_link
->irq_source_hpd_rx
) {
2466 /* Also register for DP short pulse (hpd_rx). */
2467 int_params
.int_context
= INTERRUPT_LOW_IRQ_CONTEXT
;
2468 int_params
.irq_source
= dc_link
->irq_source_hpd_rx
;
2470 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2472 (void *) aconnector
);
2477 /* Register IRQ sources and initialize IRQ callbacks */
2478 static int dce110_register_irq_handlers(struct amdgpu_device
*adev
)
2480 struct dc
*dc
= adev
->dm
.dc
;
2481 struct common_irq_params
*c_irq_params
;
2482 struct dc_interrupt_params int_params
= {0};
2485 unsigned client_id
= AMDGPU_IRQ_CLIENTID_LEGACY
;
2487 if (adev
->asic_type
>= CHIP_VEGA10
)
2488 client_id
= SOC15_IH_CLIENTID_DCE
;
2490 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2491 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2494 * Actions of amdgpu_irq_add_id():
2495 * 1. Register a set() function with base driver.
2496 * Base driver will call set() function to enable/disable an
2497 * interrupt in DC hardware.
2498 * 2. Register amdgpu_dm_irq_handler().
2499 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2500 * coming from DC hardware.
2501 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2502 * for acknowledging and handling. */
2504 /* Use VBLANK interrupt */
2505 for (i
= VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0
; i
<= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0
; i
++) {
2506 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->crtc_irq
);
2508 DRM_ERROR("Failed to add crtc irq id!\n");
2512 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2513 int_params
.irq_source
=
2514 dc_interrupt_to_irq_source(dc
, i
, 0);
2516 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2518 c_irq_params
->adev
= adev
;
2519 c_irq_params
->irq_src
= int_params
.irq_source
;
2521 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2522 dm_crtc_high_irq
, c_irq_params
);
2525 /* Use VUPDATE interrupt */
2526 for (i
= VISLANDS30_IV_SRCID_D1_V_UPDATE_INT
; i
<= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT
; i
+= 2) {
2527 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->vupdate_irq
);
2529 DRM_ERROR("Failed to add vupdate irq id!\n");
2533 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2534 int_params
.irq_source
=
2535 dc_interrupt_to_irq_source(dc
, i
, 0);
2537 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
2539 c_irq_params
->adev
= adev
;
2540 c_irq_params
->irq_src
= int_params
.irq_source
;
2542 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2543 dm_vupdate_high_irq
, c_irq_params
);
2546 /* Use GRPH_PFLIP interrupt */
2547 for (i
= VISLANDS30_IV_SRCID_D1_GRPH_PFLIP
;
2548 i
<= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP
; i
+= 2) {
2549 r
= amdgpu_irq_add_id(adev
, client_id
, i
, &adev
->pageflip_irq
);
2551 DRM_ERROR("Failed to add page flip irq id!\n");
2555 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2556 int_params
.irq_source
=
2557 dc_interrupt_to_irq_source(dc
, i
, 0);
2559 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2561 c_irq_params
->adev
= adev
;
2562 c_irq_params
->irq_src
= int_params
.irq_source
;
2564 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2565 dm_pflip_high_irq
, c_irq_params
);
2570 r
= amdgpu_irq_add_id(adev
, client_id
,
2571 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A
, &adev
->hpd_irq
);
2573 DRM_ERROR("Failed to add hpd irq id!\n");
2577 register_hpd_handlers(adev
);
2582 #if defined(CONFIG_DRM_AMD_DC_DCN)
2583 /* Register IRQ sources and initialize IRQ callbacks */
2584 static int dcn10_register_irq_handlers(struct amdgpu_device
*adev
)
2586 struct dc
*dc
= adev
->dm
.dc
;
2587 struct common_irq_params
*c_irq_params
;
2588 struct dc_interrupt_params int_params
= {0};
2592 int_params
.requested_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2593 int_params
.current_polarity
= INTERRUPT_POLARITY_DEFAULT
;
2596 * Actions of amdgpu_irq_add_id():
2597 * 1. Register a set() function with base driver.
2598 * Base driver will call set() function to enable/disable an
2599 * interrupt in DC hardware.
2600 * 2. Register amdgpu_dm_irq_handler().
2601 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2602 * coming from DC hardware.
2603 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2604 * for acknowledging and handling.
2607 /* Use VSTARTUP interrupt */
2608 for (i
= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
;
2609 i
<= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP
+ adev
->mode_info
.num_crtc
- 1;
2611 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->crtc_irq
);
2614 DRM_ERROR("Failed to add crtc irq id!\n");
2618 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2619 int_params
.irq_source
=
2620 dc_interrupt_to_irq_source(dc
, i
, 0);
2622 c_irq_params
= &adev
->dm
.vblank_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VBLANK1
];
2624 c_irq_params
->adev
= adev
;
2625 c_irq_params
->irq_src
= int_params
.irq_source
;
2627 amdgpu_dm_irq_register_interrupt(
2628 adev
, &int_params
, dm_crtc_high_irq
, c_irq_params
);
2631 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2632 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2633 * to trigger at end of each vblank, regardless of state of the lock,
2634 * matching DCE behaviour.
2636 for (i
= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT
;
2637 i
<= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
2639 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->vupdate_irq
);
2642 DRM_ERROR("Failed to add vupdate irq id!\n");
2646 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2647 int_params
.irq_source
=
2648 dc_interrupt_to_irq_source(dc
, i
, 0);
2650 c_irq_params
= &adev
->dm
.vupdate_params
[int_params
.irq_source
- DC_IRQ_SOURCE_VUPDATE1
];
2652 c_irq_params
->adev
= adev
;
2653 c_irq_params
->irq_src
= int_params
.irq_source
;
2655 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2656 dm_vupdate_high_irq
, c_irq_params
);
2659 /* Use GRPH_PFLIP interrupt */
2660 for (i
= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
;
2661 i
<= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT
+ adev
->mode_info
.num_crtc
- 1;
2663 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, i
, &adev
->pageflip_irq
);
2665 DRM_ERROR("Failed to add page flip irq id!\n");
2669 int_params
.int_context
= INTERRUPT_HIGH_IRQ_CONTEXT
;
2670 int_params
.irq_source
=
2671 dc_interrupt_to_irq_source(dc
, i
, 0);
2673 c_irq_params
= &adev
->dm
.pflip_params
[int_params
.irq_source
- DC_IRQ_SOURCE_PFLIP_FIRST
];
2675 c_irq_params
->adev
= adev
;
2676 c_irq_params
->irq_src
= int_params
.irq_source
;
2678 amdgpu_dm_irq_register_interrupt(adev
, &int_params
,
2679 dm_pflip_high_irq
, c_irq_params
);
2684 r
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_DCE
, DCN_1_0__SRCID__DC_HPD1_INT
,
2687 DRM_ERROR("Failed to add hpd irq id!\n");
2691 register_hpd_handlers(adev
);
2698 * Acquires the lock for the atomic state object and returns
2699 * the new atomic state.
2701 * This should only be called during atomic check.
2703 static int dm_atomic_get_state(struct drm_atomic_state
*state
,
2704 struct dm_atomic_state
**dm_state
)
2706 struct drm_device
*dev
= state
->dev
;
2707 struct amdgpu_device
*adev
= dev
->dev_private
;
2708 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2709 struct drm_private_state
*priv_state
;
2714 priv_state
= drm_atomic_get_private_obj_state(state
, &dm
->atomic_obj
);
2715 if (IS_ERR(priv_state
))
2716 return PTR_ERR(priv_state
);
2718 *dm_state
= to_dm_atomic_state(priv_state
);
2723 static struct dm_atomic_state
*
2724 dm_atomic_get_new_state(struct drm_atomic_state
*state
)
2726 struct drm_device
*dev
= state
->dev
;
2727 struct amdgpu_device
*adev
= dev
->dev_private
;
2728 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2729 struct drm_private_obj
*obj
;
2730 struct drm_private_state
*new_obj_state
;
2733 for_each_new_private_obj_in_state(state
, obj
, new_obj_state
, i
) {
2734 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2735 return to_dm_atomic_state(new_obj_state
);
2741 static struct dm_atomic_state
*
2742 dm_atomic_get_old_state(struct drm_atomic_state
*state
)
2744 struct drm_device
*dev
= state
->dev
;
2745 struct amdgpu_device
*adev
= dev
->dev_private
;
2746 struct amdgpu_display_manager
*dm
= &adev
->dm
;
2747 struct drm_private_obj
*obj
;
2748 struct drm_private_state
*old_obj_state
;
2751 for_each_old_private_obj_in_state(state
, obj
, old_obj_state
, i
) {
2752 if (obj
->funcs
== dm
->atomic_obj
.funcs
)
2753 return to_dm_atomic_state(old_obj_state
);
2759 static struct drm_private_state
*
2760 dm_atomic_duplicate_state(struct drm_private_obj
*obj
)
2762 struct dm_atomic_state
*old_state
, *new_state
;
2764 new_state
= kzalloc(sizeof(*new_state
), GFP_KERNEL
);
2768 __drm_atomic_helper_private_obj_duplicate_state(obj
, &new_state
->base
);
2770 old_state
= to_dm_atomic_state(obj
->state
);
2772 if (old_state
&& old_state
->context
)
2773 new_state
->context
= dc_copy_state(old_state
->context
);
2775 if (!new_state
->context
) {
2780 return &new_state
->base
;
2783 static void dm_atomic_destroy_state(struct drm_private_obj
*obj
,
2784 struct drm_private_state
*state
)
2786 struct dm_atomic_state
*dm_state
= to_dm_atomic_state(state
);
2788 if (dm_state
&& dm_state
->context
)
2789 dc_release_state(dm_state
->context
);
2794 static struct drm_private_state_funcs dm_atomic_state_funcs
= {
2795 .atomic_duplicate_state
= dm_atomic_duplicate_state
,
2796 .atomic_destroy_state
= dm_atomic_destroy_state
,
2799 static int amdgpu_dm_mode_config_init(struct amdgpu_device
*adev
)
2801 struct dm_atomic_state
*state
;
2804 adev
->mode_info
.mode_config_initialized
= true;
2806 adev
->ddev
->mode_config
.funcs
= (void *)&amdgpu_dm_mode_funcs
;
2807 adev
->ddev
->mode_config
.helper_private
= &amdgpu_dm_mode_config_helperfuncs
;
2809 adev
->ddev
->mode_config
.max_width
= 16384;
2810 adev
->ddev
->mode_config
.max_height
= 16384;
2812 adev
->ddev
->mode_config
.preferred_depth
= 24;
2813 adev
->ddev
->mode_config
.prefer_shadow
= 1;
2814 /* indicates support for immediate flip */
2815 adev
->ddev
->mode_config
.async_page_flip
= true;
2817 adev
->ddev
->mode_config
.fb_base
= adev
->gmc
.aper_base
;
2819 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
2823 state
->context
= dc_create_state(adev
->dm
.dc
);
2824 if (!state
->context
) {
2829 dc_resource_state_copy_construct_current(adev
->dm
.dc
, state
->context
);
2831 drm_atomic_private_obj_init(adev
->ddev
,
2832 &adev
->dm
.atomic_obj
,
2834 &dm_atomic_state_funcs
);
2836 r
= amdgpu_display_modeset_create_props(adev
);
2838 dc_release_state(state
->context
);
2843 r
= amdgpu_dm_audio_init(adev
);
2845 dc_release_state(state
->context
);
2853 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2854 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2855 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2857 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2858 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2860 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager
*dm
)
2862 #if defined(CONFIG_ACPI)
2863 struct amdgpu_dm_backlight_caps caps
;
2865 memset(&caps
, 0, sizeof(caps
));
2867 if (dm
->backlight_caps
.caps_valid
)
2870 amdgpu_acpi_get_backlight_caps(dm
->adev
, &caps
);
2871 if (caps
.caps_valid
) {
2872 dm
->backlight_caps
.caps_valid
= true;
2873 if (caps
.aux_support
)
2875 dm
->backlight_caps
.min_input_signal
= caps
.min_input_signal
;
2876 dm
->backlight_caps
.max_input_signal
= caps
.max_input_signal
;
2878 dm
->backlight_caps
.min_input_signal
=
2879 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2880 dm
->backlight_caps
.max_input_signal
=
2881 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2884 if (dm
->backlight_caps
.aux_support
)
2887 dm
->backlight_caps
.min_input_signal
= AMDGPU_DM_DEFAULT_MIN_BACKLIGHT
;
2888 dm
->backlight_caps
.max_input_signal
= AMDGPU_DM_DEFAULT_MAX_BACKLIGHT
;
2892 static int set_backlight_via_aux(struct dc_link
*link
, uint32_t brightness
)
2899 rc
= dc_link_set_backlight_level_nits(link
, true, brightness
,
2900 AUX_BL_DEFAULT_TRANSITION_TIME_MS
);
2905 static int get_brightness_range(const struct amdgpu_dm_backlight_caps
*caps
,
2906 unsigned *min
, unsigned *max
)
2911 if (caps
->aux_support
) {
2912 // Firmware limits are in nits, DC API wants millinits.
2913 *max
= 1000 * caps
->aux_max_input_signal
;
2914 *min
= 1000 * caps
->aux_min_input_signal
;
2916 // Firmware limits are 8-bit, PWM control is 16-bit.
2917 *max
= 0x101 * caps
->max_input_signal
;
2918 *min
= 0x101 * caps
->min_input_signal
;
2923 static u32
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps
*caps
,
2924 uint32_t brightness
)
2928 if (!get_brightness_range(caps
, &min
, &max
))
2931 // Rescale 0..255 to min..max
2932 return min
+ DIV_ROUND_CLOSEST((max
- min
) * brightness
,
2933 AMDGPU_MAX_BL_LEVEL
);
2936 static u32
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps
*caps
,
2937 uint32_t brightness
)
2941 if (!get_brightness_range(caps
, &min
, &max
))
2944 if (brightness
< min
)
2946 // Rescale min..max to 0..255
2947 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL
* (brightness
- min
),
2951 static int amdgpu_dm_backlight_update_status(struct backlight_device
*bd
)
2953 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2954 struct amdgpu_dm_backlight_caps caps
;
2955 struct dc_link
*link
= NULL
;
2959 amdgpu_dm_update_backlight_caps(dm
);
2960 caps
= dm
->backlight_caps
;
2962 link
= (struct dc_link
*)dm
->backlight_link
;
2964 brightness
= convert_brightness_from_user(&caps
, bd
->props
.brightness
);
2965 // Change brightness based on AUX property
2966 if (caps
.aux_support
)
2967 return set_backlight_via_aux(link
, brightness
);
2969 rc
= dc_link_set_backlight_level(dm
->backlight_link
, brightness
, 0);
2974 static int amdgpu_dm_backlight_get_brightness(struct backlight_device
*bd
)
2976 struct amdgpu_display_manager
*dm
= bl_get_data(bd
);
2977 int ret
= dc_link_get_backlight_level(dm
->backlight_link
);
2979 if (ret
== DC_ERROR_UNEXPECTED
)
2980 return bd
->props
.brightness
;
2981 return convert_brightness_to_user(&dm
->backlight_caps
, ret
);
2984 static const struct backlight_ops amdgpu_dm_backlight_ops
= {
2985 .options
= BL_CORE_SUSPENDRESUME
,
2986 .get_brightness
= amdgpu_dm_backlight_get_brightness
,
2987 .update_status
= amdgpu_dm_backlight_update_status
,
2991 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager
*dm
)
2994 struct backlight_properties props
= { 0 };
2996 amdgpu_dm_update_backlight_caps(dm
);
2998 props
.max_brightness
= AMDGPU_MAX_BL_LEVEL
;
2999 props
.brightness
= AMDGPU_MAX_BL_LEVEL
;
3000 props
.type
= BACKLIGHT_RAW
;
3002 snprintf(bl_name
, sizeof(bl_name
), "amdgpu_bl%d",
3003 dm
->adev
->ddev
->primary
->index
);
3005 dm
->backlight_dev
= backlight_device_register(bl_name
,
3006 dm
->adev
->ddev
->dev
,
3008 &amdgpu_dm_backlight_ops
,
3011 if (IS_ERR(dm
->backlight_dev
))
3012 DRM_ERROR("DM: Backlight registration failed!\n");
3014 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name
);
3019 static int initialize_plane(struct amdgpu_display_manager
*dm
,
3020 struct amdgpu_mode_info
*mode_info
, int plane_id
,
3021 enum drm_plane_type plane_type
,
3022 const struct dc_plane_cap
*plane_cap
)
3024 struct drm_plane
*plane
;
3025 unsigned long possible_crtcs
;
3028 plane
= kzalloc(sizeof(struct drm_plane
), GFP_KERNEL
);
3030 DRM_ERROR("KMS: Failed to allocate plane\n");
3033 plane
->type
= plane_type
;
3036 * HACK: IGT tests expect that the primary plane for a CRTC
3037 * can only have one possible CRTC. Only expose support for
3038 * any CRTC if they're not going to be used as a primary plane
3039 * for a CRTC - like overlay or underlay planes.
3041 possible_crtcs
= 1 << plane_id
;
3042 if (plane_id
>= dm
->dc
->caps
.max_streams
)
3043 possible_crtcs
= 0xff;
3045 ret
= amdgpu_dm_plane_init(dm
, plane
, possible_crtcs
, plane_cap
);
3048 DRM_ERROR("KMS: Failed to initialize plane\n");
3054 mode_info
->planes
[plane_id
] = plane
;
3060 static void register_backlight_device(struct amdgpu_display_manager
*dm
,
3061 struct dc_link
*link
)
3063 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3064 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3066 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
3067 link
->type
!= dc_connection_none
) {
3069 * Event if registration failed, we should continue with
3070 * DM initialization because not having a backlight control
3071 * is better then a black screen.
3073 amdgpu_dm_register_backlight_device(dm
);
3075 if (dm
->backlight_dev
)
3076 dm
->backlight_link
= link
;
3083 * In this architecture, the association
3084 * connector -> encoder -> crtc
3085 * id not really requried. The crtc and connector will hold the
3086 * display_index as an abstraction to use with DAL component
3088 * Returns 0 on success
3090 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device
*adev
)
3092 struct amdgpu_display_manager
*dm
= &adev
->dm
;
3094 struct amdgpu_dm_connector
*aconnector
= NULL
;
3095 struct amdgpu_encoder
*aencoder
= NULL
;
3096 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
3098 int32_t primary_planes
;
3099 enum dc_connection_type new_connection_type
= dc_connection_none
;
3100 const struct dc_plane_cap
*plane
;
3102 link_cnt
= dm
->dc
->caps
.max_links
;
3103 if (amdgpu_dm_mode_config_init(dm
->adev
)) {
3104 DRM_ERROR("DM: Failed to initialize mode config\n");
3108 /* There is one primary plane per CRTC */
3109 primary_planes
= dm
->dc
->caps
.max_streams
;
3110 ASSERT(primary_planes
<= AMDGPU_MAX_PLANES
);
3113 * Initialize primary planes, implicit planes for legacy IOCTLS.
3114 * Order is reversed to match iteration order in atomic check.
3116 for (i
= (primary_planes
- 1); i
>= 0; i
--) {
3117 plane
= &dm
->dc
->caps
.planes
[i
];
3119 if (initialize_plane(dm
, mode_info
, i
,
3120 DRM_PLANE_TYPE_PRIMARY
, plane
)) {
3121 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3127 * Initialize overlay planes, index starting after primary planes.
3128 * These planes have a higher DRM index than the primary planes since
3129 * they should be considered as having a higher z-order.
3130 * Order is reversed to match iteration order in atomic check.
3132 * Only support DCN for now, and only expose one so we don't encourage
3133 * userspace to use up all the pipes.
3135 for (i
= 0; i
< dm
->dc
->caps
.max_planes
; ++i
) {
3136 struct dc_plane_cap
*plane
= &dm
->dc
->caps
.planes
[i
];
3138 if (plane
->type
!= DC_PLANE_TYPE_DCN_UNIVERSAL
)
3141 if (!plane
->blends_with_above
|| !plane
->blends_with_below
)
3144 if (!plane
->pixel_format_support
.argb8888
)
3147 if (initialize_plane(dm
, NULL
, primary_planes
+ i
,
3148 DRM_PLANE_TYPE_OVERLAY
, plane
)) {
3149 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3153 /* Only create one overlay plane. */
3157 for (i
= 0; i
< dm
->dc
->caps
.max_streams
; i
++)
3158 if (amdgpu_dm_crtc_init(dm
, mode_info
->planes
[i
], i
)) {
3159 DRM_ERROR("KMS: Failed to initialize crtc\n");
3163 dm
->display_indexes_num
= dm
->dc
->caps
.max_streams
;
3165 /* loops over all connectors on the board */
3166 for (i
= 0; i
< link_cnt
; i
++) {
3167 struct dc_link
*link
= NULL
;
3169 if (i
> AMDGPU_DM_MAX_DISPLAY_INDEX
) {
3171 "KMS: Cannot support more than %d display indexes\n",
3172 AMDGPU_DM_MAX_DISPLAY_INDEX
);
3176 aconnector
= kzalloc(sizeof(*aconnector
), GFP_KERNEL
);
3180 aencoder
= kzalloc(sizeof(*aencoder
), GFP_KERNEL
);
3184 if (amdgpu_dm_encoder_init(dm
->ddev
, aencoder
, i
)) {
3185 DRM_ERROR("KMS: Failed to initialize encoder\n");
3189 if (amdgpu_dm_connector_init(dm
, aconnector
, i
, aencoder
)) {
3190 DRM_ERROR("KMS: Failed to initialize connector\n");
3194 link
= dc_get_link_at_index(dm
->dc
, i
);
3196 if (!dc_link_detect_sink(link
, &new_connection_type
))
3197 DRM_ERROR("KMS: Failed to detect connector\n");
3199 if (aconnector
->base
.force
&& new_connection_type
== dc_connection_none
) {
3200 emulated_link_detect(link
);
3201 amdgpu_dm_update_connector_after_detect(aconnector
);
3203 } else if (dc_link_detect(link
, DETECT_REASON_BOOT
)) {
3204 amdgpu_dm_update_connector_after_detect(aconnector
);
3205 register_backlight_device(dm
, link
);
3206 if (amdgpu_dc_feature_mask
& DC_PSR_MASK
)
3207 amdgpu_dm_set_psr_caps(link
);
3213 /* Software is initialized. Now we can register interrupt handlers. */
3214 switch (adev
->asic_type
) {
3224 case CHIP_POLARIS11
:
3225 case CHIP_POLARIS10
:
3226 case CHIP_POLARIS12
:
3231 if (dce110_register_irq_handlers(dm
->adev
)) {
3232 DRM_ERROR("DM: Failed to initialize IRQ\n");
3236 #if defined(CONFIG_DRM_AMD_DC_DCN)
3242 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3243 case CHIP_SIENNA_CICHLID
:
3244 case CHIP_NAVY_FLOUNDER
:
3246 if (dcn10_register_irq_handlers(dm
->adev
)) {
3247 DRM_ERROR("DM: Failed to initialize IRQ\n");
3253 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
3257 /* No userspace support. */
3258 dm
->dc
->debug
.disable_tri_buf
= true;
3268 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager
*dm
)
3270 drm_mode_config_cleanup(dm
->ddev
);
3271 drm_atomic_private_obj_fini(&dm
->atomic_obj
);
3275 /******************************************************************************
3276 * amdgpu_display_funcs functions
3277 *****************************************************************************/
3280 * dm_bandwidth_update - program display watermarks
3282 * @adev: amdgpu_device pointer
3284 * Calculate and program the display watermarks and line buffer allocation.
3286 static void dm_bandwidth_update(struct amdgpu_device
*adev
)
3288 /* TODO: implement later */
3291 static const struct amdgpu_display_funcs dm_display_funcs
= {
3292 .bandwidth_update
= dm_bandwidth_update
, /* called unconditionally */
3293 .vblank_get_counter
= dm_vblank_get_counter
,/* called unconditionally */
3294 .backlight_set_level
= NULL
, /* never called for DC */
3295 .backlight_get_level
= NULL
, /* never called for DC */
3296 .hpd_sense
= NULL
,/* called unconditionally */
3297 .hpd_set_polarity
= NULL
, /* called unconditionally */
3298 .hpd_get_gpio_reg
= NULL
, /* VBIOS parsing. DAL does it. */
3299 .page_flip_get_scanoutpos
=
3300 dm_crtc_get_scanoutpos
,/* called unconditionally */
3301 .add_encoder
= NULL
, /* VBIOS parsing. DAL does it. */
3302 .add_connector
= NULL
, /* VBIOS parsing. DAL does it. */
3305 #if defined(CONFIG_DEBUG_KERNEL_DC)
3307 static ssize_t
s3_debug_store(struct device
*device
,
3308 struct device_attribute
*attr
,
3314 struct drm_device
*drm_dev
= dev_get_drvdata(device
);
3315 struct amdgpu_device
*adev
= drm_dev
->dev_private
;
3317 ret
= kstrtoint(buf
, 0, &s3_state
);
3322 drm_kms_helper_hotplug_event(adev
->ddev
);
3327 return ret
== 0 ? count
: 0;
3330 DEVICE_ATTR_WO(s3_debug
);
3334 static int dm_early_init(void *handle
)
3336 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3338 switch (adev
->asic_type
) {
3341 adev
->mode_info
.num_crtc
= 6;
3342 adev
->mode_info
.num_hpd
= 6;
3343 adev
->mode_info
.num_dig
= 6;
3346 adev
->mode_info
.num_crtc
= 4;
3347 adev
->mode_info
.num_hpd
= 6;
3348 adev
->mode_info
.num_dig
= 7;
3352 adev
->mode_info
.num_crtc
= 2;
3353 adev
->mode_info
.num_hpd
= 6;
3354 adev
->mode_info
.num_dig
= 6;
3358 adev
->mode_info
.num_crtc
= 6;
3359 adev
->mode_info
.num_hpd
= 6;
3360 adev
->mode_info
.num_dig
= 7;
3363 adev
->mode_info
.num_crtc
= 3;
3364 adev
->mode_info
.num_hpd
= 6;
3365 adev
->mode_info
.num_dig
= 9;
3368 adev
->mode_info
.num_crtc
= 2;
3369 adev
->mode_info
.num_hpd
= 6;
3370 adev
->mode_info
.num_dig
= 9;
3372 case CHIP_POLARIS11
:
3373 case CHIP_POLARIS12
:
3374 adev
->mode_info
.num_crtc
= 5;
3375 adev
->mode_info
.num_hpd
= 5;
3376 adev
->mode_info
.num_dig
= 5;
3378 case CHIP_POLARIS10
:
3380 adev
->mode_info
.num_crtc
= 6;
3381 adev
->mode_info
.num_hpd
= 6;
3382 adev
->mode_info
.num_dig
= 6;
3387 adev
->mode_info
.num_crtc
= 6;
3388 adev
->mode_info
.num_hpd
= 6;
3389 adev
->mode_info
.num_dig
= 6;
3391 #if defined(CONFIG_DRM_AMD_DC_DCN)
3393 adev
->mode_info
.num_crtc
= 4;
3394 adev
->mode_info
.num_hpd
= 4;
3395 adev
->mode_info
.num_dig
= 4;
3400 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3401 case CHIP_SIENNA_CICHLID
:
3402 case CHIP_NAVY_FLOUNDER
:
3404 adev
->mode_info
.num_crtc
= 6;
3405 adev
->mode_info
.num_hpd
= 6;
3406 adev
->mode_info
.num_dig
= 6;
3409 adev
->mode_info
.num_crtc
= 5;
3410 adev
->mode_info
.num_hpd
= 5;
3411 adev
->mode_info
.num_dig
= 5;
3414 adev
->mode_info
.num_crtc
= 4;
3415 adev
->mode_info
.num_hpd
= 4;
3416 adev
->mode_info
.num_dig
= 4;
3419 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev
->asic_type
);
3423 amdgpu_dm_set_irq_funcs(adev
);
3425 if (adev
->mode_info
.funcs
== NULL
)
3426 adev
->mode_info
.funcs
= &dm_display_funcs
;
3429 * Note: Do NOT change adev->audio_endpt_rreg and
3430 * adev->audio_endpt_wreg because they are initialised in
3431 * amdgpu_device_init()
3433 #if defined(CONFIG_DEBUG_KERNEL_DC)
3436 &dev_attr_s3_debug
);
3442 static bool modeset_required(struct drm_crtc_state
*crtc_state
,
3443 struct dc_stream_state
*new_stream
,
3444 struct dc_stream_state
*old_stream
)
3446 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
3449 if (!crtc_state
->enable
)
3452 return crtc_state
->active
;
3455 static bool modereset_required(struct drm_crtc_state
*crtc_state
)
3457 if (!drm_atomic_crtc_needs_modeset(crtc_state
))
3460 return !crtc_state
->enable
|| !crtc_state
->active
;
3463 static void amdgpu_dm_encoder_destroy(struct drm_encoder
*encoder
)
3465 drm_encoder_cleanup(encoder
);
3469 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs
= {
3470 .destroy
= amdgpu_dm_encoder_destroy
,
3474 static int fill_dc_scaling_info(const struct drm_plane_state
*state
,
3475 struct dc_scaling_info
*scaling_info
)
3477 int scale_w
, scale_h
;
3479 memset(scaling_info
, 0, sizeof(*scaling_info
));
3481 /* Source is fixed 16.16 but we ignore mantissa for now... */
3482 scaling_info
->src_rect
.x
= state
->src_x
>> 16;
3483 scaling_info
->src_rect
.y
= state
->src_y
>> 16;
3485 scaling_info
->src_rect
.width
= state
->src_w
>> 16;
3486 if (scaling_info
->src_rect
.width
== 0)
3489 scaling_info
->src_rect
.height
= state
->src_h
>> 16;
3490 if (scaling_info
->src_rect
.height
== 0)
3493 scaling_info
->dst_rect
.x
= state
->crtc_x
;
3494 scaling_info
->dst_rect
.y
= state
->crtc_y
;
3496 if (state
->crtc_w
== 0)
3499 scaling_info
->dst_rect
.width
= state
->crtc_w
;
3501 if (state
->crtc_h
== 0)
3504 scaling_info
->dst_rect
.height
= state
->crtc_h
;
3506 /* DRM doesn't specify clipping on destination output. */
3507 scaling_info
->clip_rect
= scaling_info
->dst_rect
;
3509 /* TODO: Validate scaling per-format with DC plane caps */
3510 scale_w
= scaling_info
->dst_rect
.width
* 1000 /
3511 scaling_info
->src_rect
.width
;
3513 if (scale_w
< 250 || scale_w
> 16000)
3516 scale_h
= scaling_info
->dst_rect
.height
* 1000 /
3517 scaling_info
->src_rect
.height
;
3519 if (scale_h
< 250 || scale_h
> 16000)
3523 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3524 * assume reasonable defaults based on the format.
3530 static int get_fb_info(const struct amdgpu_framebuffer
*amdgpu_fb
,
3531 uint64_t *tiling_flags
, bool *tmz_surface
)
3533 struct amdgpu_bo
*rbo
= gem_to_amdgpu_bo(amdgpu_fb
->base
.obj
[0]);
3534 int r
= amdgpu_bo_reserve(rbo
, false);
3537 /* Don't show error message when returning -ERESTARTSYS */
3538 if (r
!= -ERESTARTSYS
)
3539 DRM_ERROR("Unable to reserve buffer: %d\n", r
);
3544 amdgpu_bo_get_tiling_flags(rbo
, tiling_flags
);
3547 *tmz_surface
= amdgpu_bo_encrypted(rbo
);
3549 amdgpu_bo_unreserve(rbo
);
3554 static inline uint64_t get_dcc_address(uint64_t address
, uint64_t tiling_flags
)
3556 uint32_t offset
= AMDGPU_TILING_GET(tiling_flags
, DCC_OFFSET_256B
);
3558 return offset
? (address
+ offset
* 256) : 0;
3562 fill_plane_dcc_attributes(struct amdgpu_device
*adev
,
3563 const struct amdgpu_framebuffer
*afb
,
3564 const enum surface_pixel_format format
,
3565 const enum dc_rotation_angle rotation
,
3566 const struct plane_size
*plane_size
,
3567 const union dc_tiling_info
*tiling_info
,
3568 const uint64_t info
,
3569 struct dc_plane_dcc_param
*dcc
,
3570 struct dc_plane_address
*address
,
3571 bool force_disable_dcc
)
3573 struct dc
*dc
= adev
->dm
.dc
;
3574 struct dc_dcc_surface_param input
;
3575 struct dc_surface_dcc_cap output
;
3576 uint32_t offset
= AMDGPU_TILING_GET(info
, DCC_OFFSET_256B
);
3577 uint32_t i64b
= AMDGPU_TILING_GET(info
, DCC_INDEPENDENT_64B
) != 0;
3578 uint64_t dcc_address
;
3580 memset(&input
, 0, sizeof(input
));
3581 memset(&output
, 0, sizeof(output
));
3583 if (force_disable_dcc
)
3589 if (format
>= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3592 if (!dc
->cap_funcs
.get_dcc_compression_cap
)
3595 input
.format
= format
;
3596 input
.surface_size
.width
= plane_size
->surface_size
.width
;
3597 input
.surface_size
.height
= plane_size
->surface_size
.height
;
3598 input
.swizzle_mode
= tiling_info
->gfx9
.swizzle
;
3600 if (rotation
== ROTATION_ANGLE_0
|| rotation
== ROTATION_ANGLE_180
)
3601 input
.scan
= SCAN_DIRECTION_HORIZONTAL
;
3602 else if (rotation
== ROTATION_ANGLE_90
|| rotation
== ROTATION_ANGLE_270
)
3603 input
.scan
= SCAN_DIRECTION_VERTICAL
;
3605 if (!dc
->cap_funcs
.get_dcc_compression_cap(dc
, &input
, &output
))
3608 if (!output
.capable
)
3611 if (i64b
== 0 && output
.grph
.rgb
.independent_64b_blks
!= 0)
3616 AMDGPU_TILING_GET(info
, DCC_PITCH_MAX
) + 1;
3617 dcc
->independent_64b_blks
= i64b
;
3619 dcc_address
= get_dcc_address(afb
->address
, info
);
3620 address
->grph
.meta_addr
.low_part
= lower_32_bits(dcc_address
);
3621 address
->grph
.meta_addr
.high_part
= upper_32_bits(dcc_address
);
3627 fill_plane_buffer_attributes(struct amdgpu_device
*adev
,
3628 const struct amdgpu_framebuffer
*afb
,
3629 const enum surface_pixel_format format
,
3630 const enum dc_rotation_angle rotation
,
3631 const uint64_t tiling_flags
,
3632 union dc_tiling_info
*tiling_info
,
3633 struct plane_size
*plane_size
,
3634 struct dc_plane_dcc_param
*dcc
,
3635 struct dc_plane_address
*address
,
3637 bool force_disable_dcc
)
3639 const struct drm_framebuffer
*fb
= &afb
->base
;
3642 memset(tiling_info
, 0, sizeof(*tiling_info
));
3643 memset(plane_size
, 0, sizeof(*plane_size
));
3644 memset(dcc
, 0, sizeof(*dcc
));
3645 memset(address
, 0, sizeof(*address
));
3647 address
->tmz_surface
= tmz_surface
;
3649 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
) {
3650 plane_size
->surface_size
.x
= 0;
3651 plane_size
->surface_size
.y
= 0;
3652 plane_size
->surface_size
.width
= fb
->width
;
3653 plane_size
->surface_size
.height
= fb
->height
;
3654 plane_size
->surface_pitch
=
3655 fb
->pitches
[0] / fb
->format
->cpp
[0];
3657 address
->type
= PLN_ADDR_TYPE_GRAPHICS
;
3658 address
->grph
.addr
.low_part
= lower_32_bits(afb
->address
);
3659 address
->grph
.addr
.high_part
= upper_32_bits(afb
->address
);
3660 } else if (format
< SURFACE_PIXEL_FORMAT_INVALID
) {
3661 uint64_t chroma_addr
= afb
->address
+ fb
->offsets
[1];
3663 plane_size
->surface_size
.x
= 0;
3664 plane_size
->surface_size
.y
= 0;
3665 plane_size
->surface_size
.width
= fb
->width
;
3666 plane_size
->surface_size
.height
= fb
->height
;
3667 plane_size
->surface_pitch
=
3668 fb
->pitches
[0] / fb
->format
->cpp
[0];
3670 plane_size
->chroma_size
.x
= 0;
3671 plane_size
->chroma_size
.y
= 0;
3672 /* TODO: set these based on surface format */
3673 plane_size
->chroma_size
.width
= fb
->width
/ 2;
3674 plane_size
->chroma_size
.height
= fb
->height
/ 2;
3676 plane_size
->chroma_pitch
=
3677 fb
->pitches
[1] / fb
->format
->cpp
[1];
3679 address
->type
= PLN_ADDR_TYPE_VIDEO_PROGRESSIVE
;
3680 address
->video_progressive
.luma_addr
.low_part
=
3681 lower_32_bits(afb
->address
);
3682 address
->video_progressive
.luma_addr
.high_part
=
3683 upper_32_bits(afb
->address
);
3684 address
->video_progressive
.chroma_addr
.low_part
=
3685 lower_32_bits(chroma_addr
);
3686 address
->video_progressive
.chroma_addr
.high_part
=
3687 upper_32_bits(chroma_addr
);
3690 /* Fill GFX8 params */
3691 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == DC_ARRAY_2D_TILED_THIN1
) {
3692 unsigned int bankw
, bankh
, mtaspect
, tile_split
, num_banks
;
3694 bankw
= AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
3695 bankh
= AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
3696 mtaspect
= AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
3697 tile_split
= AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
);
3698 num_banks
= AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
3700 /* XXX fix me for VI */
3701 tiling_info
->gfx8
.num_banks
= num_banks
;
3702 tiling_info
->gfx8
.array_mode
=
3703 DC_ARRAY_2D_TILED_THIN1
;
3704 tiling_info
->gfx8
.tile_split
= tile_split
;
3705 tiling_info
->gfx8
.bank_width
= bankw
;
3706 tiling_info
->gfx8
.bank_height
= bankh
;
3707 tiling_info
->gfx8
.tile_aspect
= mtaspect
;
3708 tiling_info
->gfx8
.tile_mode
=
3709 DC_ADDR_SURF_MICRO_TILING_DISPLAY
;
3710 } else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
)
3711 == DC_ARRAY_1D_TILED_THIN1
) {
3712 tiling_info
->gfx8
.array_mode
= DC_ARRAY_1D_TILED_THIN1
;
3715 tiling_info
->gfx8
.pipe_config
=
3716 AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
3718 if (adev
->asic_type
== CHIP_VEGA10
||
3719 adev
->asic_type
== CHIP_VEGA12
||
3720 adev
->asic_type
== CHIP_VEGA20
||
3721 adev
->asic_type
== CHIP_NAVI10
||
3722 adev
->asic_type
== CHIP_NAVI14
||
3723 adev
->asic_type
== CHIP_NAVI12
||
3724 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3725 adev
->asic_type
== CHIP_SIENNA_CICHLID
||
3726 adev
->asic_type
== CHIP_NAVY_FLOUNDER
||
3728 adev
->asic_type
== CHIP_RENOIR
||
3729 adev
->asic_type
== CHIP_RAVEN
) {
3730 /* Fill GFX9 params */
3731 tiling_info
->gfx9
.num_pipes
=
3732 adev
->gfx
.config
.gb_addr_config_fields
.num_pipes
;
3733 tiling_info
->gfx9
.num_banks
=
3734 adev
->gfx
.config
.gb_addr_config_fields
.num_banks
;
3735 tiling_info
->gfx9
.pipe_interleave
=
3736 adev
->gfx
.config
.gb_addr_config_fields
.pipe_interleave_size
;
3737 tiling_info
->gfx9
.num_shader_engines
=
3738 adev
->gfx
.config
.gb_addr_config_fields
.num_se
;
3739 tiling_info
->gfx9
.max_compressed_frags
=
3740 adev
->gfx
.config
.gb_addr_config_fields
.max_compress_frags
;
3741 tiling_info
->gfx9
.num_rb_per_se
=
3742 adev
->gfx
.config
.gb_addr_config_fields
.num_rb_per_se
;
3743 tiling_info
->gfx9
.swizzle
=
3744 AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
3745 tiling_info
->gfx9
.shaderEnable
= 1;
3747 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3748 if (adev
->asic_type
== CHIP_SIENNA_CICHLID
||
3749 adev
->asic_type
== CHIP_NAVY_FLOUNDER
)
3750 tiling_info
->gfx9
.num_pkrs
= adev
->gfx
.config
.gb_addr_config_fields
.num_pkrs
;
3752 ret
= fill_plane_dcc_attributes(adev
, afb
, format
, rotation
,
3753 plane_size
, tiling_info
,
3754 tiling_flags
, dcc
, address
,
3764 fill_blending_from_plane_state(const struct drm_plane_state
*plane_state
,
3765 bool *per_pixel_alpha
, bool *global_alpha
,
3766 int *global_alpha_value
)
3768 *per_pixel_alpha
= false;
3769 *global_alpha
= false;
3770 *global_alpha_value
= 0xff;
3772 if (plane_state
->plane
->type
!= DRM_PLANE_TYPE_OVERLAY
)
3775 if (plane_state
->pixel_blend_mode
== DRM_MODE_BLEND_PREMULTI
) {
3776 static const uint32_t alpha_formats
[] = {
3777 DRM_FORMAT_ARGB8888
,
3778 DRM_FORMAT_RGBA8888
,
3779 DRM_FORMAT_ABGR8888
,
3781 uint32_t format
= plane_state
->fb
->format
->format
;
3784 for (i
= 0; i
< ARRAY_SIZE(alpha_formats
); ++i
) {
3785 if (format
== alpha_formats
[i
]) {
3786 *per_pixel_alpha
= true;
3792 if (plane_state
->alpha
< 0xffff) {
3793 *global_alpha
= true;
3794 *global_alpha_value
= plane_state
->alpha
>> 8;
3799 fill_plane_color_attributes(const struct drm_plane_state
*plane_state
,
3800 const enum surface_pixel_format format
,
3801 enum dc_color_space
*color_space
)
3805 *color_space
= COLOR_SPACE_SRGB
;
3807 /* DRM color properties only affect non-RGB formats. */
3808 if (format
< SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
)
3811 full_range
= (plane_state
->color_range
== DRM_COLOR_YCBCR_FULL_RANGE
);
3813 switch (plane_state
->color_encoding
) {
3814 case DRM_COLOR_YCBCR_BT601
:
3816 *color_space
= COLOR_SPACE_YCBCR601
;
3818 *color_space
= COLOR_SPACE_YCBCR601_LIMITED
;
3821 case DRM_COLOR_YCBCR_BT709
:
3823 *color_space
= COLOR_SPACE_YCBCR709
;
3825 *color_space
= COLOR_SPACE_YCBCR709_LIMITED
;
3828 case DRM_COLOR_YCBCR_BT2020
:
3830 *color_space
= COLOR_SPACE_2020_YCBCR
;
3843 fill_dc_plane_info_and_addr(struct amdgpu_device
*adev
,
3844 const struct drm_plane_state
*plane_state
,
3845 const uint64_t tiling_flags
,
3846 struct dc_plane_info
*plane_info
,
3847 struct dc_plane_address
*address
,
3849 bool force_disable_dcc
)
3851 const struct drm_framebuffer
*fb
= plane_state
->fb
;
3852 const struct amdgpu_framebuffer
*afb
=
3853 to_amdgpu_framebuffer(plane_state
->fb
);
3854 struct drm_format_name_buf format_name
;
3857 memset(plane_info
, 0, sizeof(*plane_info
));
3859 switch (fb
->format
->format
) {
3861 plane_info
->format
=
3862 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS
;
3864 case DRM_FORMAT_RGB565
:
3865 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_RGB565
;
3867 case DRM_FORMAT_XRGB8888
:
3868 case DRM_FORMAT_ARGB8888
:
3869 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888
;
3871 case DRM_FORMAT_XRGB2101010
:
3872 case DRM_FORMAT_ARGB2101010
:
3873 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010
;
3875 case DRM_FORMAT_XBGR2101010
:
3876 case DRM_FORMAT_ABGR2101010
:
3877 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
;
3879 case DRM_FORMAT_XBGR8888
:
3880 case DRM_FORMAT_ABGR8888
:
3881 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
;
3883 case DRM_FORMAT_NV21
:
3884 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr
;
3886 case DRM_FORMAT_NV12
:
3887 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb
;
3889 case DRM_FORMAT_P010
:
3890 plane_info
->format
= SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb
;
3892 case DRM_FORMAT_XRGB16161616F
:
3893 case DRM_FORMAT_ARGB16161616F
:
3894 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F
;
3896 case DRM_FORMAT_XBGR16161616F
:
3897 case DRM_FORMAT_ABGR16161616F
:
3898 plane_info
->format
= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
;
3902 "Unsupported screen format %s\n",
3903 drm_get_format_name(fb
->format
->format
, &format_name
));
3907 switch (plane_state
->rotation
& DRM_MODE_ROTATE_MASK
) {
3908 case DRM_MODE_ROTATE_0
:
3909 plane_info
->rotation
= ROTATION_ANGLE_0
;
3911 case DRM_MODE_ROTATE_90
:
3912 plane_info
->rotation
= ROTATION_ANGLE_90
;
3914 case DRM_MODE_ROTATE_180
:
3915 plane_info
->rotation
= ROTATION_ANGLE_180
;
3917 case DRM_MODE_ROTATE_270
:
3918 plane_info
->rotation
= ROTATION_ANGLE_270
;
3921 plane_info
->rotation
= ROTATION_ANGLE_0
;
3925 plane_info
->visible
= true;
3926 plane_info
->stereo_format
= PLANE_STEREO_FORMAT_NONE
;
3928 plane_info
->layer_index
= 0;
3930 ret
= fill_plane_color_attributes(plane_state
, plane_info
->format
,
3931 &plane_info
->color_space
);
3935 ret
= fill_plane_buffer_attributes(adev
, afb
, plane_info
->format
,
3936 plane_info
->rotation
, tiling_flags
,
3937 &plane_info
->tiling_info
,
3938 &plane_info
->plane_size
,
3939 &plane_info
->dcc
, address
, tmz_surface
,
3944 fill_blending_from_plane_state(
3945 plane_state
, &plane_info
->per_pixel_alpha
,
3946 &plane_info
->global_alpha
, &plane_info
->global_alpha_value
);
3951 static int fill_dc_plane_attributes(struct amdgpu_device
*adev
,
3952 struct dc_plane_state
*dc_plane_state
,
3953 struct drm_plane_state
*plane_state
,
3954 struct drm_crtc_state
*crtc_state
)
3956 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(crtc_state
);
3957 const struct amdgpu_framebuffer
*amdgpu_fb
=
3958 to_amdgpu_framebuffer(plane_state
->fb
);
3959 struct dc_scaling_info scaling_info
;
3960 struct dc_plane_info plane_info
;
3961 uint64_t tiling_flags
;
3963 bool tmz_surface
= false;
3964 bool force_disable_dcc
= false;
3966 ret
= fill_dc_scaling_info(plane_state
, &scaling_info
);
3970 dc_plane_state
->src_rect
= scaling_info
.src_rect
;
3971 dc_plane_state
->dst_rect
= scaling_info
.dst_rect
;
3972 dc_plane_state
->clip_rect
= scaling_info
.clip_rect
;
3973 dc_plane_state
->scaling_quality
= scaling_info
.scaling_quality
;
3975 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
, &tmz_surface
);
3979 force_disable_dcc
= adev
->asic_type
== CHIP_RAVEN
&& adev
->in_suspend
;
3980 ret
= fill_dc_plane_info_and_addr(adev
, plane_state
, tiling_flags
,
3982 &dc_plane_state
->address
,
3988 dc_plane_state
->format
= plane_info
.format
;
3989 dc_plane_state
->color_space
= plane_info
.color_space
;
3990 dc_plane_state
->format
= plane_info
.format
;
3991 dc_plane_state
->plane_size
= plane_info
.plane_size
;
3992 dc_plane_state
->rotation
= plane_info
.rotation
;
3993 dc_plane_state
->horizontal_mirror
= plane_info
.horizontal_mirror
;
3994 dc_plane_state
->stereo_format
= plane_info
.stereo_format
;
3995 dc_plane_state
->tiling_info
= plane_info
.tiling_info
;
3996 dc_plane_state
->visible
= plane_info
.visible
;
3997 dc_plane_state
->per_pixel_alpha
= plane_info
.per_pixel_alpha
;
3998 dc_plane_state
->global_alpha
= plane_info
.global_alpha
;
3999 dc_plane_state
->global_alpha_value
= plane_info
.global_alpha_value
;
4000 dc_plane_state
->dcc
= plane_info
.dcc
;
4001 dc_plane_state
->layer_index
= plane_info
.layer_index
; // Always returns 0
4004 * Always set input transfer function, since plane state is refreshed
4007 ret
= amdgpu_dm_update_plane_color_mgmt(dm_crtc_state
, dc_plane_state
);
4014 static void update_stream_scaling_settings(const struct drm_display_mode
*mode
,
4015 const struct dm_connector_state
*dm_state
,
4016 struct dc_stream_state
*stream
)
4018 enum amdgpu_rmx_type rmx_type
;
4020 struct rect src
= { 0 }; /* viewport in composition space*/
4021 struct rect dst
= { 0 }; /* stream addressable area */
4023 /* no mode. nothing to be done */
4027 /* Full screen scaling by default */
4028 src
.width
= mode
->hdisplay
;
4029 src
.height
= mode
->vdisplay
;
4030 dst
.width
= stream
->timing
.h_addressable
;
4031 dst
.height
= stream
->timing
.v_addressable
;
4034 rmx_type
= dm_state
->scaling
;
4035 if (rmx_type
== RMX_ASPECT
|| rmx_type
== RMX_OFF
) {
4036 if (src
.width
* dst
.height
<
4037 src
.height
* dst
.width
) {
4038 /* height needs less upscaling/more downscaling */
4039 dst
.width
= src
.width
*
4040 dst
.height
/ src
.height
;
4042 /* width needs less upscaling/more downscaling */
4043 dst
.height
= src
.height
*
4044 dst
.width
/ src
.width
;
4046 } else if (rmx_type
== RMX_CENTER
) {
4050 dst
.x
= (stream
->timing
.h_addressable
- dst
.width
) / 2;
4051 dst
.y
= (stream
->timing
.v_addressable
- dst
.height
) / 2;
4053 if (dm_state
->underscan_enable
) {
4054 dst
.x
+= dm_state
->underscan_hborder
/ 2;
4055 dst
.y
+= dm_state
->underscan_vborder
/ 2;
4056 dst
.width
-= dm_state
->underscan_hborder
;
4057 dst
.height
-= dm_state
->underscan_vborder
;
4064 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4065 dst
.x
, dst
.y
, dst
.width
, dst
.height
);
4069 static enum dc_color_depth
4070 convert_color_depth_from_display_info(const struct drm_connector
*connector
,
4071 bool is_y420
, int requested_bpc
)
4078 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4079 if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_48
)
4081 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_36
)
4083 else if (connector
->display_info
.hdmi
.y420_dc_modes
& DRM_EDID_YCBCR420_DC_30
)
4086 bpc
= (uint8_t)connector
->display_info
.bpc
;
4087 /* Assume 8 bpc by default if no bpc is specified. */
4088 bpc
= bpc
? bpc
: 8;
4091 if (requested_bpc
> 0) {
4093 * Cap display bpc based on the user requested value.
4095 * The value for state->max_bpc may not correctly updated
4096 * depending on when the connector gets added to the state
4097 * or if this was called outside of atomic check, so it
4098 * can't be used directly.
4100 bpc
= min_t(u8
, bpc
, requested_bpc
);
4102 /* Round down to the nearest even number. */
4103 bpc
= bpc
- (bpc
& 1);
4109 * Temporary Work around, DRM doesn't parse color depth for
4110 * EDID revision before 1.4
4111 * TODO: Fix edid parsing
4113 return COLOR_DEPTH_888
;
4115 return COLOR_DEPTH_666
;
4117 return COLOR_DEPTH_888
;
4119 return COLOR_DEPTH_101010
;
4121 return COLOR_DEPTH_121212
;
4123 return COLOR_DEPTH_141414
;
4125 return COLOR_DEPTH_161616
;
4127 return COLOR_DEPTH_UNDEFINED
;
4131 static enum dc_aspect_ratio
4132 get_aspect_ratio(const struct drm_display_mode
*mode_in
)
4134 /* 1-1 mapping, since both enums follow the HDMI spec. */
4135 return (enum dc_aspect_ratio
) mode_in
->picture_aspect_ratio
;
4138 static enum dc_color_space
4139 get_output_color_space(const struct dc_crtc_timing
*dc_crtc_timing
)
4141 enum dc_color_space color_space
= COLOR_SPACE_SRGB
;
4143 switch (dc_crtc_timing
->pixel_encoding
) {
4144 case PIXEL_ENCODING_YCBCR422
:
4145 case PIXEL_ENCODING_YCBCR444
:
4146 case PIXEL_ENCODING_YCBCR420
:
4149 * 27030khz is the separation point between HDTV and SDTV
4150 * according to HDMI spec, we use YCbCr709 and YCbCr601
4153 if (dc_crtc_timing
->pix_clk_100hz
> 270300) {
4154 if (dc_crtc_timing
->flags
.Y_ONLY
)
4156 COLOR_SPACE_YCBCR709_LIMITED
;
4158 color_space
= COLOR_SPACE_YCBCR709
;
4160 if (dc_crtc_timing
->flags
.Y_ONLY
)
4162 COLOR_SPACE_YCBCR601_LIMITED
;
4164 color_space
= COLOR_SPACE_YCBCR601
;
4169 case PIXEL_ENCODING_RGB
:
4170 color_space
= COLOR_SPACE_SRGB
;
4181 static bool adjust_colour_depth_from_display_info(
4182 struct dc_crtc_timing
*timing_out
,
4183 const struct drm_display_info
*info
)
4185 enum dc_color_depth depth
= timing_out
->display_color_depth
;
4188 normalized_clk
= timing_out
->pix_clk_100hz
/ 10;
4189 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4190 if (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
)
4191 normalized_clk
/= 2;
4192 /* Adjusting pix clock following on HDMI spec based on colour depth */
4194 case COLOR_DEPTH_888
:
4196 case COLOR_DEPTH_101010
:
4197 normalized_clk
= (normalized_clk
* 30) / 24;
4199 case COLOR_DEPTH_121212
:
4200 normalized_clk
= (normalized_clk
* 36) / 24;
4202 case COLOR_DEPTH_161616
:
4203 normalized_clk
= (normalized_clk
* 48) / 24;
4206 /* The above depths are the only ones valid for HDMI. */
4209 if (normalized_clk
<= info
->max_tmds_clock
) {
4210 timing_out
->display_color_depth
= depth
;
4213 } while (--depth
> COLOR_DEPTH_666
);
4217 static void fill_stream_properties_from_drm_display_mode(
4218 struct dc_stream_state
*stream
,
4219 const struct drm_display_mode
*mode_in
,
4220 const struct drm_connector
*connector
,
4221 const struct drm_connector_state
*connector_state
,
4222 const struct dc_stream_state
*old_stream
,
4225 struct dc_crtc_timing
*timing_out
= &stream
->timing
;
4226 const struct drm_display_info
*info
= &connector
->display_info
;
4227 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4228 struct hdmi_vendor_infoframe hv_frame
;
4229 struct hdmi_avi_infoframe avi_frame
;
4231 memset(&hv_frame
, 0, sizeof(hv_frame
));
4232 memset(&avi_frame
, 0, sizeof(avi_frame
));
4234 timing_out
->h_border_left
= 0;
4235 timing_out
->h_border_right
= 0;
4236 timing_out
->v_border_top
= 0;
4237 timing_out
->v_border_bottom
= 0;
4238 /* TODO: un-hardcode */
4239 if (drm_mode_is_420_only(info
, mode_in
)
4240 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4241 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4242 else if (drm_mode_is_420_also(info
, mode_in
)
4243 && aconnector
->force_yuv420_output
)
4244 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4245 else if ((connector
->display_info
.color_formats
& DRM_COLOR_FORMAT_YCRCB444
)
4246 && stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4247 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR444
;
4249 timing_out
->pixel_encoding
= PIXEL_ENCODING_RGB
;
4251 timing_out
->timing_3d_format
= TIMING_3D_FORMAT_NONE
;
4252 timing_out
->display_color_depth
= convert_color_depth_from_display_info(
4254 (timing_out
->pixel_encoding
== PIXEL_ENCODING_YCBCR420
),
4256 timing_out
->scan_type
= SCANNING_TYPE_NODATA
;
4257 timing_out
->hdmi_vic
= 0;
4260 timing_out
->vic
= old_stream
->timing
.vic
;
4261 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.HSYNC_POSITIVE_POLARITY
;
4262 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= old_stream
->timing
.flags
.VSYNC_POSITIVE_POLARITY
;
4264 timing_out
->vic
= drm_match_cea_mode(mode_in
);
4265 if (mode_in
->flags
& DRM_MODE_FLAG_PHSYNC
)
4266 timing_out
->flags
.HSYNC_POSITIVE_POLARITY
= 1;
4267 if (mode_in
->flags
& DRM_MODE_FLAG_PVSYNC
)
4268 timing_out
->flags
.VSYNC_POSITIVE_POLARITY
= 1;
4271 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
4272 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame
, (struct drm_connector
*)connector
, mode_in
);
4273 timing_out
->vic
= avi_frame
.video_code
;
4274 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame
, (struct drm_connector
*)connector
, mode_in
);
4275 timing_out
->hdmi_vic
= hv_frame
.vic
;
4278 timing_out
->h_addressable
= mode_in
->crtc_hdisplay
;
4279 timing_out
->h_total
= mode_in
->crtc_htotal
;
4280 timing_out
->h_sync_width
=
4281 mode_in
->crtc_hsync_end
- mode_in
->crtc_hsync_start
;
4282 timing_out
->h_front_porch
=
4283 mode_in
->crtc_hsync_start
- mode_in
->crtc_hdisplay
;
4284 timing_out
->v_total
= mode_in
->crtc_vtotal
;
4285 timing_out
->v_addressable
= mode_in
->crtc_vdisplay
;
4286 timing_out
->v_front_porch
=
4287 mode_in
->crtc_vsync_start
- mode_in
->crtc_vdisplay
;
4288 timing_out
->v_sync_width
=
4289 mode_in
->crtc_vsync_end
- mode_in
->crtc_vsync_start
;
4290 timing_out
->pix_clk_100hz
= mode_in
->crtc_clock
* 10;
4291 timing_out
->aspect_ratio
= get_aspect_ratio(mode_in
);
4293 stream
->output_color_space
= get_output_color_space(timing_out
);
4295 stream
->out_transfer_func
->type
= TF_TYPE_PREDEFINED
;
4296 stream
->out_transfer_func
->tf
= TRANSFER_FUNCTION_SRGB
;
4297 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
) {
4298 if (!adjust_colour_depth_from_display_info(timing_out
, info
) &&
4299 drm_mode_is_420_also(info
, mode_in
) &&
4300 timing_out
->pixel_encoding
!= PIXEL_ENCODING_YCBCR420
) {
4301 timing_out
->pixel_encoding
= PIXEL_ENCODING_YCBCR420
;
4302 adjust_colour_depth_from_display_info(timing_out
, info
);
4307 static void fill_audio_info(struct audio_info
*audio_info
,
4308 const struct drm_connector
*drm_connector
,
4309 const struct dc_sink
*dc_sink
)
4312 int cea_revision
= 0;
4313 const struct dc_edid_caps
*edid_caps
= &dc_sink
->edid_caps
;
4315 audio_info
->manufacture_id
= edid_caps
->manufacturer_id
;
4316 audio_info
->product_id
= edid_caps
->product_id
;
4318 cea_revision
= drm_connector
->display_info
.cea_rev
;
4320 strscpy(audio_info
->display_name
,
4321 edid_caps
->display_name
,
4322 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS
);
4324 if (cea_revision
>= 3) {
4325 audio_info
->mode_count
= edid_caps
->audio_mode_count
;
4327 for (i
= 0; i
< audio_info
->mode_count
; ++i
) {
4328 audio_info
->modes
[i
].format_code
=
4329 (enum audio_format_code
)
4330 (edid_caps
->audio_modes
[i
].format_code
);
4331 audio_info
->modes
[i
].channel_count
=
4332 edid_caps
->audio_modes
[i
].channel_count
;
4333 audio_info
->modes
[i
].sample_rates
.all
=
4334 edid_caps
->audio_modes
[i
].sample_rate
;
4335 audio_info
->modes
[i
].sample_size
=
4336 edid_caps
->audio_modes
[i
].sample_size
;
4340 audio_info
->flags
.all
= edid_caps
->speaker_flags
;
4342 /* TODO: We only check for the progressive mode, check for interlace mode too */
4343 if (drm_connector
->latency_present
[0]) {
4344 audio_info
->video_latency
= drm_connector
->video_latency
[0];
4345 audio_info
->audio_latency
= drm_connector
->audio_latency
[0];
4348 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4353 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode
*src_mode
,
4354 struct drm_display_mode
*dst_mode
)
4356 dst_mode
->crtc_hdisplay
= src_mode
->crtc_hdisplay
;
4357 dst_mode
->crtc_vdisplay
= src_mode
->crtc_vdisplay
;
4358 dst_mode
->crtc_clock
= src_mode
->crtc_clock
;
4359 dst_mode
->crtc_hblank_start
= src_mode
->crtc_hblank_start
;
4360 dst_mode
->crtc_hblank_end
= src_mode
->crtc_hblank_end
;
4361 dst_mode
->crtc_hsync_start
= src_mode
->crtc_hsync_start
;
4362 dst_mode
->crtc_hsync_end
= src_mode
->crtc_hsync_end
;
4363 dst_mode
->crtc_htotal
= src_mode
->crtc_htotal
;
4364 dst_mode
->crtc_hskew
= src_mode
->crtc_hskew
;
4365 dst_mode
->crtc_vblank_start
= src_mode
->crtc_vblank_start
;
4366 dst_mode
->crtc_vblank_end
= src_mode
->crtc_vblank_end
;
4367 dst_mode
->crtc_vsync_start
= src_mode
->crtc_vsync_start
;
4368 dst_mode
->crtc_vsync_end
= src_mode
->crtc_vsync_end
;
4369 dst_mode
->crtc_vtotal
= src_mode
->crtc_vtotal
;
4373 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode
*drm_mode
,
4374 const struct drm_display_mode
*native_mode
,
4377 if (scale_enabled
) {
4378 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
4379 } else if (native_mode
->clock
== drm_mode
->clock
&&
4380 native_mode
->htotal
== drm_mode
->htotal
&&
4381 native_mode
->vtotal
== drm_mode
->vtotal
) {
4382 copy_crtc_timing_for_drm_display_mode(native_mode
, drm_mode
);
4384 /* no scaling nor amdgpu inserted, no need to patch */
4388 static struct dc_sink
*
4389 create_fake_sink(struct amdgpu_dm_connector
*aconnector
)
4391 struct dc_sink_init_data sink_init_data
= { 0 };
4392 struct dc_sink
*sink
= NULL
;
4393 sink_init_data
.link
= aconnector
->dc_link
;
4394 sink_init_data
.sink_signal
= aconnector
->dc_link
->connector_signal
;
4396 sink
= dc_sink_create(&sink_init_data
);
4398 DRM_ERROR("Failed to create sink!\n");
4401 sink
->sink_signal
= SIGNAL_TYPE_VIRTUAL
;
4406 static void set_multisync_trigger_params(
4407 struct dc_stream_state
*stream
)
4409 if (stream
->triggered_crtc_reset
.enabled
) {
4410 stream
->triggered_crtc_reset
.event
= CRTC_EVENT_VSYNC_RISING
;
4411 stream
->triggered_crtc_reset
.delay
= TRIGGER_DELAY_NEXT_LINE
;
4415 static void set_master_stream(struct dc_stream_state
*stream_set
[],
4418 int j
, highest_rfr
= 0, master_stream
= 0;
4420 for (j
= 0; j
< stream_count
; j
++) {
4421 if (stream_set
[j
] && stream_set
[j
]->triggered_crtc_reset
.enabled
) {
4422 int refresh_rate
= 0;
4424 refresh_rate
= (stream_set
[j
]->timing
.pix_clk_100hz
*100)/
4425 (stream_set
[j
]->timing
.h_total
*stream_set
[j
]->timing
.v_total
);
4426 if (refresh_rate
> highest_rfr
) {
4427 highest_rfr
= refresh_rate
;
4432 for (j
= 0; j
< stream_count
; j
++) {
4434 stream_set
[j
]->triggered_crtc_reset
.event_source
= stream_set
[master_stream
];
4438 static void dm_enable_per_frame_crtc_master_sync(struct dc_state
*context
)
4442 if (context
->stream_count
< 2)
4444 for (i
= 0; i
< context
->stream_count
; i
++) {
4445 if (!context
->streams
[i
])
4448 * TODO: add a function to read AMD VSDB bits and set
4449 * crtc_sync_master.multi_sync_enabled flag
4450 * For now it's set to false
4452 set_multisync_trigger_params(context
->streams
[i
]);
4454 set_master_stream(context
->streams
, context
->stream_count
);
4457 static struct dc_stream_state
*
4458 create_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
4459 const struct drm_display_mode
*drm_mode
,
4460 const struct dm_connector_state
*dm_state
,
4461 const struct dc_stream_state
*old_stream
,
4464 struct drm_display_mode
*preferred_mode
= NULL
;
4465 struct drm_connector
*drm_connector
;
4466 const struct drm_connector_state
*con_state
=
4467 dm_state
? &dm_state
->base
: NULL
;
4468 struct dc_stream_state
*stream
= NULL
;
4469 struct drm_display_mode mode
= *drm_mode
;
4470 bool native_mode_found
= false;
4471 bool scale
= dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false;
4473 int preferred_refresh
= 0;
4474 #if defined(CONFIG_DRM_AMD_DC_DCN)
4475 struct dsc_dec_dpcd_caps dsc_caps
;
4477 uint32_t link_bandwidth_kbps
;
4479 struct dc_sink
*sink
= NULL
;
4480 if (aconnector
== NULL
) {
4481 DRM_ERROR("aconnector is NULL!\n");
4485 drm_connector
= &aconnector
->base
;
4487 if (!aconnector
->dc_sink
) {
4488 sink
= create_fake_sink(aconnector
);
4492 sink
= aconnector
->dc_sink
;
4493 dc_sink_retain(sink
);
4496 stream
= dc_create_stream_for_sink(sink
);
4498 if (stream
== NULL
) {
4499 DRM_ERROR("Failed to create stream for sink!\n");
4503 stream
->dm_stream_context
= aconnector
;
4505 stream
->timing
.flags
.LTE_340MCSC_SCRAMBLE
=
4506 drm_connector
->display_info
.hdmi
.scdc
.scrambling
.low_rates
;
4508 list_for_each_entry(preferred_mode
, &aconnector
->base
.modes
, head
) {
4509 /* Search for preferred mode */
4510 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
) {
4511 native_mode_found
= true;
4515 if (!native_mode_found
)
4516 preferred_mode
= list_first_entry_or_null(
4517 &aconnector
->base
.modes
,
4518 struct drm_display_mode
,
4521 mode_refresh
= drm_mode_vrefresh(&mode
);
4523 if (preferred_mode
== NULL
) {
4525 * This may not be an error, the use case is when we have no
4526 * usermode calls to reset and set mode upon hotplug. In this
4527 * case, we call set mode ourselves to restore the previous mode
4528 * and the modelist may not be filled in in time.
4530 DRM_DEBUG_DRIVER("No preferred mode found\n");
4532 decide_crtc_timing_for_drm_display_mode(
4533 &mode
, preferred_mode
,
4534 dm_state
? (dm_state
->scaling
!= RMX_OFF
) : false);
4535 preferred_refresh
= drm_mode_vrefresh(preferred_mode
);
4539 drm_mode_set_crtcinfo(&mode
, 0);
4542 * If scaling is enabled and refresh rate didn't change
4543 * we copy the vic and polarities of the old timings
4545 if (!scale
|| mode_refresh
!= preferred_refresh
)
4546 fill_stream_properties_from_drm_display_mode(stream
,
4547 &mode
, &aconnector
->base
, con_state
, NULL
, requested_bpc
);
4549 fill_stream_properties_from_drm_display_mode(stream
,
4550 &mode
, &aconnector
->base
, con_state
, old_stream
, requested_bpc
);
4552 stream
->timing
.flags
.DSC
= 0;
4554 if (aconnector
->dc_link
&& sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
4555 #if defined(CONFIG_DRM_AMD_DC_DCN)
4556 dc_dsc_parse_dsc_dpcd(aconnector
->dc_link
->ctx
->dc
,
4557 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_basic_caps
.raw
,
4558 aconnector
->dc_link
->dpcd_caps
.dsc_caps
.dsc_branch_decoder_caps
.raw
,
4561 link_bandwidth_kbps
= dc_link_bandwidth_kbps(aconnector
->dc_link
,
4562 dc_link_get_link_cap(aconnector
->dc_link
));
4564 #if defined(CONFIG_DRM_AMD_DC_DCN)
4565 if (dsc_caps
.is_dsc_supported
)
4566 if (dc_dsc_compute_config(aconnector
->dc_link
->ctx
->dc
->res_pool
->dscs
[0],
4568 aconnector
->dc_link
->ctx
->dc
->debug
.dsc_min_slice_height_override
,
4569 link_bandwidth_kbps
,
4571 &stream
->timing
.dsc_cfg
))
4572 stream
->timing
.flags
.DSC
= 1;
4576 update_stream_scaling_settings(&mode
, dm_state
, stream
);
4579 &stream
->audio_info
,
4583 update_stream_signal(stream
, sink
);
4585 if (stream
->signal
== SIGNAL_TYPE_HDMI_TYPE_A
)
4586 mod_build_hf_vsif_infopacket(stream
, &stream
->vsp_infopacket
, false, false);
4587 if (stream
->link
->psr_settings
.psr_feature_enabled
) {
4589 // should decide stream support vsc sdp colorimetry capability
4590 // before building vsc info packet
4592 stream
->use_vsc_sdp_for_colorimetry
= false;
4593 if (aconnector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT_MST
) {
4594 stream
->use_vsc_sdp_for_colorimetry
=
4595 aconnector
->dc_sink
->is_vsc_sdp_colorimetry_supported
;
4597 if (stream
->link
->dpcd_caps
.dprx_feature
.bits
.VSC_SDP_COLORIMETRY_SUPPORTED
)
4598 stream
->use_vsc_sdp_for_colorimetry
= true;
4600 mod_build_vsc_infopacket(stream
, &stream
->vsc_infopacket
);
4603 dc_sink_release(sink
);
4608 static void amdgpu_dm_crtc_destroy(struct drm_crtc
*crtc
)
4610 drm_crtc_cleanup(crtc
);
4614 static void dm_crtc_destroy_state(struct drm_crtc
*crtc
,
4615 struct drm_crtc_state
*state
)
4617 struct dm_crtc_state
*cur
= to_dm_crtc_state(state
);
4619 /* TODO Destroy dc_stream objects are stream object is flattened */
4621 dc_stream_release(cur
->stream
);
4624 __drm_atomic_helper_crtc_destroy_state(state
);
4630 static void dm_crtc_reset_state(struct drm_crtc
*crtc
)
4632 struct dm_crtc_state
*state
;
4635 dm_crtc_destroy_state(crtc
, crtc
->state
);
4637 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4638 if (WARN_ON(!state
))
4641 __drm_atomic_helper_crtc_reset(crtc
, &state
->base
);
4644 static struct drm_crtc_state
*
4645 dm_crtc_duplicate_state(struct drm_crtc
*crtc
)
4647 struct dm_crtc_state
*state
, *cur
;
4649 cur
= to_dm_crtc_state(crtc
->state
);
4651 if (WARN_ON(!crtc
->state
))
4654 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4658 __drm_atomic_helper_crtc_duplicate_state(crtc
, &state
->base
);
4661 state
->stream
= cur
->stream
;
4662 dc_stream_retain(state
->stream
);
4665 state
->active_planes
= cur
->active_planes
;
4666 state
->vrr_params
= cur
->vrr_params
;
4667 state
->vrr_infopacket
= cur
->vrr_infopacket
;
4668 state
->abm_level
= cur
->abm_level
;
4669 state
->vrr_supported
= cur
->vrr_supported
;
4670 state
->freesync_config
= cur
->freesync_config
;
4671 state
->crc_src
= cur
->crc_src
;
4672 state
->cm_has_degamma
= cur
->cm_has_degamma
;
4673 state
->cm_is_degamma_srgb
= cur
->cm_is_degamma_srgb
;
4675 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4677 return &state
->base
;
4680 static inline int dm_set_vupdate_irq(struct drm_crtc
*crtc
, bool enable
)
4682 enum dc_irq_source irq_source
;
4683 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4684 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4687 irq_source
= IRQ_TYPE_VUPDATE
+ acrtc
->otg_inst
;
4689 rc
= dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4691 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4692 acrtc
->crtc_id
, enable
? "en" : "dis", rc
);
4696 static inline int dm_set_vblank(struct drm_crtc
*crtc
, bool enable
)
4698 enum dc_irq_source irq_source
;
4699 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
4700 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
4701 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(crtc
->state
);
4705 /* vblank irq on -> Only need vupdate irq in vrr mode */
4706 if (amdgpu_dm_vrr_active(acrtc_state
))
4707 rc
= dm_set_vupdate_irq(crtc
, true);
4709 /* vblank irq off -> vupdate irq off */
4710 rc
= dm_set_vupdate_irq(crtc
, false);
4716 irq_source
= IRQ_TYPE_VBLANK
+ acrtc
->otg_inst
;
4717 return dc_interrupt_set(adev
->dm
.dc
, irq_source
, enable
) ? 0 : -EBUSY
;
4720 static int dm_enable_vblank(struct drm_crtc
*crtc
)
4722 return dm_set_vblank(crtc
, true);
4725 static void dm_disable_vblank(struct drm_crtc
*crtc
)
4727 dm_set_vblank(crtc
, false);
4730 /* Implemented only the options currently availible for the driver */
4731 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs
= {
4732 .reset
= dm_crtc_reset_state
,
4733 .destroy
= amdgpu_dm_crtc_destroy
,
4734 .gamma_set
= drm_atomic_helper_legacy_gamma_set
,
4735 .set_config
= drm_atomic_helper_set_config
,
4736 .page_flip
= drm_atomic_helper_page_flip
,
4737 .atomic_duplicate_state
= dm_crtc_duplicate_state
,
4738 .atomic_destroy_state
= dm_crtc_destroy_state
,
4739 .set_crc_source
= amdgpu_dm_crtc_set_crc_source
,
4740 .verify_crc_source
= amdgpu_dm_crtc_verify_crc_source
,
4741 .get_crc_sources
= amdgpu_dm_crtc_get_crc_sources
,
4742 .get_vblank_counter
= amdgpu_get_vblank_counter_kms
,
4743 .enable_vblank
= dm_enable_vblank
,
4744 .disable_vblank
= dm_disable_vblank
,
4745 .get_vblank_timestamp
= drm_crtc_vblank_helper_get_vblank_timestamp
,
4748 static enum drm_connector_status
4749 amdgpu_dm_connector_detect(struct drm_connector
*connector
, bool force
)
4752 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4756 * 1. This interface is NOT called in context of HPD irq.
4757 * 2. This interface *is called* in context of user-mode ioctl. Which
4758 * makes it a bad place for *any* MST-related activity.
4761 if (aconnector
->base
.force
== DRM_FORCE_UNSPECIFIED
&&
4762 !aconnector
->fake_enable
)
4763 connected
= (aconnector
->dc_sink
!= NULL
);
4765 connected
= (aconnector
->base
.force
== DRM_FORCE_ON
);
4767 return (connected
? connector_status_connected
:
4768 connector_status_disconnected
);
4771 int amdgpu_dm_connector_atomic_set_property(struct drm_connector
*connector
,
4772 struct drm_connector_state
*connector_state
,
4773 struct drm_property
*property
,
4776 struct drm_device
*dev
= connector
->dev
;
4777 struct amdgpu_device
*adev
= dev
->dev_private
;
4778 struct dm_connector_state
*dm_old_state
=
4779 to_dm_connector_state(connector
->state
);
4780 struct dm_connector_state
*dm_new_state
=
4781 to_dm_connector_state(connector_state
);
4785 if (property
== dev
->mode_config
.scaling_mode_property
) {
4786 enum amdgpu_rmx_type rmx_type
;
4789 case DRM_MODE_SCALE_CENTER
:
4790 rmx_type
= RMX_CENTER
;
4792 case DRM_MODE_SCALE_ASPECT
:
4793 rmx_type
= RMX_ASPECT
;
4795 case DRM_MODE_SCALE_FULLSCREEN
:
4796 rmx_type
= RMX_FULL
;
4798 case DRM_MODE_SCALE_NONE
:
4804 if (dm_old_state
->scaling
== rmx_type
)
4807 dm_new_state
->scaling
= rmx_type
;
4809 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4810 dm_new_state
->underscan_hborder
= val
;
4812 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4813 dm_new_state
->underscan_vborder
= val
;
4815 } else if (property
== adev
->mode_info
.underscan_property
) {
4816 dm_new_state
->underscan_enable
= val
;
4818 } else if (property
== adev
->mode_info
.abm_level_property
) {
4819 dm_new_state
->abm_level
= val
;
4826 int amdgpu_dm_connector_atomic_get_property(struct drm_connector
*connector
,
4827 const struct drm_connector_state
*state
,
4828 struct drm_property
*property
,
4831 struct drm_device
*dev
= connector
->dev
;
4832 struct amdgpu_device
*adev
= dev
->dev_private
;
4833 struct dm_connector_state
*dm_state
=
4834 to_dm_connector_state(state
);
4837 if (property
== dev
->mode_config
.scaling_mode_property
) {
4838 switch (dm_state
->scaling
) {
4840 *val
= DRM_MODE_SCALE_CENTER
;
4843 *val
= DRM_MODE_SCALE_ASPECT
;
4846 *val
= DRM_MODE_SCALE_FULLSCREEN
;
4850 *val
= DRM_MODE_SCALE_NONE
;
4854 } else if (property
== adev
->mode_info
.underscan_hborder_property
) {
4855 *val
= dm_state
->underscan_hborder
;
4857 } else if (property
== adev
->mode_info
.underscan_vborder_property
) {
4858 *val
= dm_state
->underscan_vborder
;
4860 } else if (property
== adev
->mode_info
.underscan_property
) {
4861 *val
= dm_state
->underscan_enable
;
4863 } else if (property
== adev
->mode_info
.abm_level_property
) {
4864 *val
= dm_state
->abm_level
;
4871 static void amdgpu_dm_connector_unregister(struct drm_connector
*connector
)
4873 struct amdgpu_dm_connector
*amdgpu_dm_connector
= to_amdgpu_dm_connector(connector
);
4875 drm_dp_aux_unregister(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4878 static void amdgpu_dm_connector_destroy(struct drm_connector
*connector
)
4880 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
4881 const struct dc_link
*link
= aconnector
->dc_link
;
4882 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
4883 struct amdgpu_display_manager
*dm
= &adev
->dm
;
4885 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4886 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4888 if ((link
->connector_signal
& (SIGNAL_TYPE_EDP
| SIGNAL_TYPE_LVDS
)) &&
4889 link
->type
!= dc_connection_none
&&
4890 dm
->backlight_dev
) {
4891 backlight_device_unregister(dm
->backlight_dev
);
4892 dm
->backlight_dev
= NULL
;
4896 if (aconnector
->dc_em_sink
)
4897 dc_sink_release(aconnector
->dc_em_sink
);
4898 aconnector
->dc_em_sink
= NULL
;
4899 if (aconnector
->dc_sink
)
4900 dc_sink_release(aconnector
->dc_sink
);
4901 aconnector
->dc_sink
= NULL
;
4903 drm_dp_cec_unregister_connector(&aconnector
->dm_dp_aux
.aux
);
4904 drm_connector_unregister(connector
);
4905 drm_connector_cleanup(connector
);
4906 if (aconnector
->i2c
) {
4907 i2c_del_adapter(&aconnector
->i2c
->base
);
4908 kfree(aconnector
->i2c
);
4910 kfree(aconnector
->dm_dp_aux
.aux
.name
);
4915 void amdgpu_dm_connector_funcs_reset(struct drm_connector
*connector
)
4917 struct dm_connector_state
*state
=
4918 to_dm_connector_state(connector
->state
);
4920 if (connector
->state
)
4921 __drm_atomic_helper_connector_destroy_state(connector
->state
);
4925 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
4928 state
->scaling
= RMX_OFF
;
4929 state
->underscan_enable
= false;
4930 state
->underscan_hborder
= 0;
4931 state
->underscan_vborder
= 0;
4932 state
->base
.max_requested_bpc
= 8;
4933 state
->vcpi_slots
= 0;
4935 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4936 state
->abm_level
= amdgpu_dm_abm_level
;
4938 __drm_atomic_helper_connector_reset(connector
, &state
->base
);
4942 struct drm_connector_state
*
4943 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector
*connector
)
4945 struct dm_connector_state
*state
=
4946 to_dm_connector_state(connector
->state
);
4948 struct dm_connector_state
*new_state
=
4949 kmemdup(state
, sizeof(*state
), GFP_KERNEL
);
4954 __drm_atomic_helper_connector_duplicate_state(connector
, &new_state
->base
);
4956 new_state
->freesync_capable
= state
->freesync_capable
;
4957 new_state
->abm_level
= state
->abm_level
;
4958 new_state
->scaling
= state
->scaling
;
4959 new_state
->underscan_enable
= state
->underscan_enable
;
4960 new_state
->underscan_hborder
= state
->underscan_hborder
;
4961 new_state
->underscan_vborder
= state
->underscan_vborder
;
4962 new_state
->vcpi_slots
= state
->vcpi_slots
;
4963 new_state
->pbn
= state
->pbn
;
4964 return &new_state
->base
;
4968 amdgpu_dm_connector_late_register(struct drm_connector
*connector
)
4970 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
4971 to_amdgpu_dm_connector(connector
);
4974 if ((connector
->connector_type
== DRM_MODE_CONNECTOR_DisplayPort
) ||
4975 (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)) {
4976 amdgpu_dm_connector
->dm_dp_aux
.aux
.dev
= connector
->kdev
;
4977 r
= drm_dp_aux_register(&amdgpu_dm_connector
->dm_dp_aux
.aux
);
4982 #if defined(CONFIG_DEBUG_FS)
4983 connector_debugfs_init(amdgpu_dm_connector
);
4989 static const struct drm_connector_funcs amdgpu_dm_connector_funcs
= {
4990 .reset
= amdgpu_dm_connector_funcs_reset
,
4991 .detect
= amdgpu_dm_connector_detect
,
4992 .fill_modes
= drm_helper_probe_single_connector_modes
,
4993 .destroy
= amdgpu_dm_connector_destroy
,
4994 .atomic_duplicate_state
= amdgpu_dm_connector_atomic_duplicate_state
,
4995 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4996 .atomic_set_property
= amdgpu_dm_connector_atomic_set_property
,
4997 .atomic_get_property
= amdgpu_dm_connector_atomic_get_property
,
4998 .late_register
= amdgpu_dm_connector_late_register
,
4999 .early_unregister
= amdgpu_dm_connector_unregister
5002 static int get_modes(struct drm_connector
*connector
)
5004 return amdgpu_dm_connector_get_modes(connector
);
5007 static void create_eml_sink(struct amdgpu_dm_connector
*aconnector
)
5009 struct dc_sink_init_data init_params
= {
5010 .link
= aconnector
->dc_link
,
5011 .sink_signal
= SIGNAL_TYPE_VIRTUAL
5015 if (!aconnector
->base
.edid_blob_ptr
) {
5016 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5017 aconnector
->base
.name
);
5019 aconnector
->base
.force
= DRM_FORCE_OFF
;
5020 aconnector
->base
.override_edid
= false;
5024 edid
= (struct edid
*) aconnector
->base
.edid_blob_ptr
->data
;
5026 aconnector
->edid
= edid
;
5028 aconnector
->dc_em_sink
= dc_link_add_remote_sink(
5029 aconnector
->dc_link
,
5031 (edid
->extensions
+ 1) * EDID_LENGTH
,
5034 if (aconnector
->base
.force
== DRM_FORCE_ON
) {
5035 aconnector
->dc_sink
= aconnector
->dc_link
->local_sink
?
5036 aconnector
->dc_link
->local_sink
:
5037 aconnector
->dc_em_sink
;
5038 dc_sink_retain(aconnector
->dc_sink
);
5042 static void handle_edid_mgmt(struct amdgpu_dm_connector
*aconnector
)
5044 struct dc_link
*link
= (struct dc_link
*)aconnector
->dc_link
;
5047 * In case of headless boot with force on for DP managed connector
5048 * Those settings have to be != 0 to get initial modeset
5050 if (link
->connector_signal
== SIGNAL_TYPE_DISPLAY_PORT
) {
5051 link
->verified_link_cap
.lane_count
= LANE_COUNT_FOUR
;
5052 link
->verified_link_cap
.link_rate
= LINK_RATE_HIGH2
;
5056 aconnector
->base
.override_edid
= true;
5057 create_eml_sink(aconnector
);
5060 static struct dc_stream_state
*
5061 create_validate_stream_for_sink(struct amdgpu_dm_connector
*aconnector
,
5062 const struct drm_display_mode
*drm_mode
,
5063 const struct dm_connector_state
*dm_state
,
5064 const struct dc_stream_state
*old_stream
)
5066 struct drm_connector
*connector
= &aconnector
->base
;
5067 struct amdgpu_device
*adev
= connector
->dev
->dev_private
;
5068 struct dc_stream_state
*stream
;
5069 const struct drm_connector_state
*drm_state
= dm_state
? &dm_state
->base
: NULL
;
5070 int requested_bpc
= drm_state
? drm_state
->max_requested_bpc
: 8;
5071 enum dc_status dc_result
= DC_OK
;
5074 stream
= create_stream_for_sink(aconnector
, drm_mode
,
5075 dm_state
, old_stream
,
5077 if (stream
== NULL
) {
5078 DRM_ERROR("Failed to create stream for sink!\n");
5082 dc_result
= dc_validate_stream(adev
->dm
.dc
, stream
);
5084 if (dc_result
!= DC_OK
) {
5085 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5090 dc_status_to_str(dc_result
));
5092 dc_stream_release(stream
);
5094 requested_bpc
-= 2; /* lower bpc to retry validation */
5097 } while (stream
== NULL
&& requested_bpc
>= 6);
5102 enum drm_mode_status
amdgpu_dm_connector_mode_valid(struct drm_connector
*connector
,
5103 struct drm_display_mode
*mode
)
5105 int result
= MODE_ERROR
;
5106 struct dc_sink
*dc_sink
;
5107 /* TODO: Unhardcode stream count */
5108 struct dc_stream_state
*stream
;
5109 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5111 if ((mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
5112 (mode
->flags
& DRM_MODE_FLAG_DBLSCAN
))
5116 * Only run this the first time mode_valid is called to initilialize
5119 if (aconnector
->base
.force
!= DRM_FORCE_UNSPECIFIED
&&
5120 !aconnector
->dc_em_sink
)
5121 handle_edid_mgmt(aconnector
);
5123 dc_sink
= to_amdgpu_dm_connector(connector
)->dc_sink
;
5125 if (dc_sink
== NULL
) {
5126 DRM_ERROR("dc_sink is NULL!\n");
5130 stream
= create_validate_stream_for_sink(aconnector
, mode
, NULL
, NULL
);
5132 dc_stream_release(stream
);
5137 /* TODO: error handling*/
5141 static int fill_hdr_info_packet(const struct drm_connector_state
*state
,
5142 struct dc_info_packet
*out
)
5144 struct hdmi_drm_infoframe frame
;
5145 unsigned char buf
[30]; /* 26 + 4 */
5149 memset(out
, 0, sizeof(*out
));
5151 if (!state
->hdr_output_metadata
)
5154 ret
= drm_hdmi_infoframe_set_hdr_metadata(&frame
, state
);
5158 len
= hdmi_drm_infoframe_pack_only(&frame
, buf
, sizeof(buf
));
5162 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5166 /* Prepare the infopacket for DC. */
5167 switch (state
->connector
->connector_type
) {
5168 case DRM_MODE_CONNECTOR_HDMIA
:
5169 out
->hb0
= 0x87; /* type */
5170 out
->hb1
= 0x01; /* version */
5171 out
->hb2
= 0x1A; /* length */
5172 out
->sb
[0] = buf
[3]; /* checksum */
5176 case DRM_MODE_CONNECTOR_DisplayPort
:
5177 case DRM_MODE_CONNECTOR_eDP
:
5178 out
->hb0
= 0x00; /* sdp id, zero */
5179 out
->hb1
= 0x87; /* type */
5180 out
->hb2
= 0x1D; /* payload len - 1 */
5181 out
->hb3
= (0x13 << 2); /* sdp version */
5182 out
->sb
[0] = 0x01; /* version */
5183 out
->sb
[1] = 0x1A; /* length */
5191 memcpy(&out
->sb
[i
], &buf
[4], 26);
5194 print_hex_dump(KERN_DEBUG
, "HDR SB:", DUMP_PREFIX_NONE
, 16, 1, out
->sb
,
5195 sizeof(out
->sb
), false);
5201 is_hdr_metadata_different(const struct drm_connector_state
*old_state
,
5202 const struct drm_connector_state
*new_state
)
5204 struct drm_property_blob
*old_blob
= old_state
->hdr_output_metadata
;
5205 struct drm_property_blob
*new_blob
= new_state
->hdr_output_metadata
;
5207 if (old_blob
!= new_blob
) {
5208 if (old_blob
&& new_blob
&&
5209 old_blob
->length
== new_blob
->length
)
5210 return memcmp(old_blob
->data
, new_blob
->data
,
5220 amdgpu_dm_connector_atomic_check(struct drm_connector
*conn
,
5221 struct drm_atomic_state
*state
)
5223 struct drm_connector_state
*new_con_state
=
5224 drm_atomic_get_new_connector_state(state
, conn
);
5225 struct drm_connector_state
*old_con_state
=
5226 drm_atomic_get_old_connector_state(state
, conn
);
5227 struct drm_crtc
*crtc
= new_con_state
->crtc
;
5228 struct drm_crtc_state
*new_crtc_state
;
5234 if (is_hdr_metadata_different(old_con_state
, new_con_state
)) {
5235 struct dc_info_packet hdr_infopacket
;
5237 ret
= fill_hdr_info_packet(new_con_state
, &hdr_infopacket
);
5241 new_crtc_state
= drm_atomic_get_crtc_state(state
, crtc
);
5242 if (IS_ERR(new_crtc_state
))
5243 return PTR_ERR(new_crtc_state
);
5246 * DC considers the stream backends changed if the
5247 * static metadata changes. Forcing the modeset also
5248 * gives a simple way for userspace to switch from
5249 * 8bpc to 10bpc when setting the metadata to enter
5252 * Changing the static metadata after it's been
5253 * set is permissible, however. So only force a
5254 * modeset if we're entering or exiting HDR.
5256 new_crtc_state
->mode_changed
=
5257 !old_con_state
->hdr_output_metadata
||
5258 !new_con_state
->hdr_output_metadata
;
5264 static const struct drm_connector_helper_funcs
5265 amdgpu_dm_connector_helper_funcs
= {
5267 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5268 * modes will be filtered by drm_mode_validate_size(), and those modes
5269 * are missing after user start lightdm. So we need to renew modes list.
5270 * in get_modes call back, not just return the modes count
5272 .get_modes
= get_modes
,
5273 .mode_valid
= amdgpu_dm_connector_mode_valid
,
5274 .atomic_check
= amdgpu_dm_connector_atomic_check
,
5277 static void dm_crtc_helper_disable(struct drm_crtc
*crtc
)
5281 static bool does_crtc_have_active_cursor(struct drm_crtc_state
*new_crtc_state
)
5283 struct drm_device
*dev
= new_crtc_state
->crtc
->dev
;
5284 struct drm_plane
*plane
;
5286 drm_for_each_plane_mask(plane
, dev
, new_crtc_state
->plane_mask
) {
5287 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5294 static int count_crtc_active_planes(struct drm_crtc_state
*new_crtc_state
)
5296 struct drm_atomic_state
*state
= new_crtc_state
->state
;
5297 struct drm_plane
*plane
;
5300 drm_for_each_plane_mask(plane
, state
->dev
, new_crtc_state
->plane_mask
) {
5301 struct drm_plane_state
*new_plane_state
;
5303 /* Cursor planes are "fake". */
5304 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
5307 new_plane_state
= drm_atomic_get_new_plane_state(state
, plane
);
5309 if (!new_plane_state
) {
5311 * The plane is enable on the CRTC and hasn't changed
5312 * state. This means that it previously passed
5313 * validation and is therefore enabled.
5319 /* We need a framebuffer to be considered enabled. */
5320 num_active
+= (new_plane_state
->fb
!= NULL
);
5326 static void dm_update_crtc_active_planes(struct drm_crtc
*crtc
,
5327 struct drm_crtc_state
*new_crtc_state
)
5329 struct dm_crtc_state
*dm_new_crtc_state
=
5330 to_dm_crtc_state(new_crtc_state
);
5332 dm_new_crtc_state
->active_planes
= 0;
5334 if (!dm_new_crtc_state
->stream
)
5337 dm_new_crtc_state
->active_planes
=
5338 count_crtc_active_planes(new_crtc_state
);
5341 static int dm_crtc_helper_atomic_check(struct drm_crtc
*crtc
,
5342 struct drm_crtc_state
*state
)
5344 struct amdgpu_device
*adev
= crtc
->dev
->dev_private
;
5345 struct dc
*dc
= adev
->dm
.dc
;
5346 struct dm_crtc_state
*dm_crtc_state
= to_dm_crtc_state(state
);
5349 dm_update_crtc_active_planes(crtc
, state
);
5351 if (unlikely(!dm_crtc_state
->stream
&&
5352 modeset_required(state
, NULL
, dm_crtc_state
->stream
))) {
5357 /* In some use cases, like reset, no stream is attached */
5358 if (!dm_crtc_state
->stream
)
5362 * We want at least one hardware plane enabled to use
5363 * the stream with a cursor enabled.
5365 if (state
->enable
&& state
->active
&&
5366 does_crtc_have_active_cursor(state
) &&
5367 dm_crtc_state
->active_planes
== 0)
5370 if (dc_validate_stream(dc
, dm_crtc_state
->stream
) == DC_OK
)
5376 static bool dm_crtc_helper_mode_fixup(struct drm_crtc
*crtc
,
5377 const struct drm_display_mode
*mode
,
5378 struct drm_display_mode
*adjusted_mode
)
5383 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs
= {
5384 .disable
= dm_crtc_helper_disable
,
5385 .atomic_check
= dm_crtc_helper_atomic_check
,
5386 .mode_fixup
= dm_crtc_helper_mode_fixup
,
5387 .get_scanout_position
= amdgpu_crtc_get_scanout_position
,
5390 static void dm_encoder_helper_disable(struct drm_encoder
*encoder
)
5395 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth
)
5397 switch (display_color_depth
) {
5398 case COLOR_DEPTH_666
:
5400 case COLOR_DEPTH_888
:
5402 case COLOR_DEPTH_101010
:
5404 case COLOR_DEPTH_121212
:
5406 case COLOR_DEPTH_141414
:
5408 case COLOR_DEPTH_161616
:
5416 static int dm_encoder_helper_atomic_check(struct drm_encoder
*encoder
,
5417 struct drm_crtc_state
*crtc_state
,
5418 struct drm_connector_state
*conn_state
)
5420 struct drm_atomic_state
*state
= crtc_state
->state
;
5421 struct drm_connector
*connector
= conn_state
->connector
;
5422 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
5423 struct dm_connector_state
*dm_new_connector_state
= to_dm_connector_state(conn_state
);
5424 const struct drm_display_mode
*adjusted_mode
= &crtc_state
->adjusted_mode
;
5425 struct drm_dp_mst_topology_mgr
*mst_mgr
;
5426 struct drm_dp_mst_port
*mst_port
;
5427 enum dc_color_depth color_depth
;
5429 bool is_y420
= false;
5431 if (!aconnector
->port
|| !aconnector
->dc_sink
)
5434 mst_port
= aconnector
->port
;
5435 mst_mgr
= &aconnector
->mst_port
->mst_mgr
;
5437 if (!crtc_state
->connectors_changed
&& !crtc_state
->mode_changed
)
5440 if (!state
->duplicated
) {
5441 int max_bpc
= conn_state
->max_requested_bpc
;
5442 is_y420
= drm_mode_is_420_also(&connector
->display_info
, adjusted_mode
) &&
5443 aconnector
->force_yuv420_output
;
5444 color_depth
= convert_color_depth_from_display_info(connector
,
5447 bpp
= convert_dc_color_depth_into_bpc(color_depth
) * 3;
5448 clock
= adjusted_mode
->clock
;
5449 dm_new_connector_state
->pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, false);
5451 dm_new_connector_state
->vcpi_slots
= drm_dp_atomic_find_vcpi_slots(state
,
5454 dm_new_connector_state
->pbn
,
5455 dm_mst_get_pbn_divider(aconnector
->dc_link
));
5456 if (dm_new_connector_state
->vcpi_slots
< 0) {
5457 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state
->vcpi_slots
);
5458 return dm_new_connector_state
->vcpi_slots
;
5463 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs
= {
5464 .disable
= dm_encoder_helper_disable
,
5465 .atomic_check
= dm_encoder_helper_atomic_check
5468 #if defined(CONFIG_DRM_AMD_DC_DCN)
5469 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state
*state
,
5470 struct dc_state
*dc_state
)
5472 struct dc_stream_state
*stream
= NULL
;
5473 struct drm_connector
*connector
;
5474 struct drm_connector_state
*new_con_state
, *old_con_state
;
5475 struct amdgpu_dm_connector
*aconnector
;
5476 struct dm_connector_state
*dm_conn_state
;
5477 int i
, j
, clock
, bpp
;
5478 int vcpi
, pbn_div
, pbn
= 0;
5480 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
5482 aconnector
= to_amdgpu_dm_connector(connector
);
5484 if (!aconnector
->port
)
5487 if (!new_con_state
|| !new_con_state
->crtc
)
5490 dm_conn_state
= to_dm_connector_state(new_con_state
);
5492 for (j
= 0; j
< dc_state
->stream_count
; j
++) {
5493 stream
= dc_state
->streams
[j
];
5497 if ((struct amdgpu_dm_connector
*)stream
->dm_stream_context
== aconnector
)
5506 if (stream
->timing
.flags
.DSC
!= 1) {
5507 drm_dp_mst_atomic_enable_dsc(state
,
5515 pbn_div
= dm_mst_get_pbn_divider(stream
->link
);
5516 bpp
= stream
->timing
.dsc_cfg
.bits_per_pixel
;
5517 clock
= stream
->timing
.pix_clk_100hz
/ 10;
5518 pbn
= drm_dp_calc_pbn_mode(clock
, bpp
, true);
5519 vcpi
= drm_dp_mst_atomic_enable_dsc(state
,
5526 dm_conn_state
->pbn
= pbn
;
5527 dm_conn_state
->vcpi_slots
= vcpi
;
5533 static void dm_drm_plane_reset(struct drm_plane
*plane
)
5535 struct dm_plane_state
*amdgpu_state
= NULL
;
5538 plane
->funcs
->atomic_destroy_state(plane
, plane
->state
);
5540 amdgpu_state
= kzalloc(sizeof(*amdgpu_state
), GFP_KERNEL
);
5541 WARN_ON(amdgpu_state
== NULL
);
5544 __drm_atomic_helper_plane_reset(plane
, &amdgpu_state
->base
);
5547 static struct drm_plane_state
*
5548 dm_drm_plane_duplicate_state(struct drm_plane
*plane
)
5550 struct dm_plane_state
*dm_plane_state
, *old_dm_plane_state
;
5552 old_dm_plane_state
= to_dm_plane_state(plane
->state
);
5553 dm_plane_state
= kzalloc(sizeof(*dm_plane_state
), GFP_KERNEL
);
5554 if (!dm_plane_state
)
5557 __drm_atomic_helper_plane_duplicate_state(plane
, &dm_plane_state
->base
);
5559 if (old_dm_plane_state
->dc_state
) {
5560 dm_plane_state
->dc_state
= old_dm_plane_state
->dc_state
;
5561 dc_plane_state_retain(dm_plane_state
->dc_state
);
5564 return &dm_plane_state
->base
;
5567 static void dm_drm_plane_destroy_state(struct drm_plane
*plane
,
5568 struct drm_plane_state
*state
)
5570 struct dm_plane_state
*dm_plane_state
= to_dm_plane_state(state
);
5572 if (dm_plane_state
->dc_state
)
5573 dc_plane_state_release(dm_plane_state
->dc_state
);
5575 drm_atomic_helper_plane_destroy_state(plane
, state
);
5578 static const struct drm_plane_funcs dm_plane_funcs
= {
5579 .update_plane
= drm_atomic_helper_update_plane
,
5580 .disable_plane
= drm_atomic_helper_disable_plane
,
5581 .destroy
= drm_primary_helper_destroy
,
5582 .reset
= dm_drm_plane_reset
,
5583 .atomic_duplicate_state
= dm_drm_plane_duplicate_state
,
5584 .atomic_destroy_state
= dm_drm_plane_destroy_state
,
5587 static int dm_plane_helper_prepare_fb(struct drm_plane
*plane
,
5588 struct drm_plane_state
*new_state
)
5590 struct amdgpu_framebuffer
*afb
;
5591 struct drm_gem_object
*obj
;
5592 struct amdgpu_device
*adev
;
5593 struct amdgpu_bo
*rbo
;
5594 struct dm_plane_state
*dm_plane_state_new
, *dm_plane_state_old
;
5595 struct list_head list
;
5596 struct ttm_validate_buffer tv
;
5597 struct ww_acquire_ctx ticket
;
5598 uint64_t tiling_flags
;
5601 bool tmz_surface
= false;
5602 bool force_disable_dcc
= false;
5604 dm_plane_state_old
= to_dm_plane_state(plane
->state
);
5605 dm_plane_state_new
= to_dm_plane_state(new_state
);
5607 if (!new_state
->fb
) {
5608 DRM_DEBUG_DRIVER("No FB bound\n");
5612 afb
= to_amdgpu_framebuffer(new_state
->fb
);
5613 obj
= new_state
->fb
->obj
[0];
5614 rbo
= gem_to_amdgpu_bo(obj
);
5615 adev
= amdgpu_ttm_adev(rbo
->tbo
.bdev
);
5616 INIT_LIST_HEAD(&list
);
5620 list_add(&tv
.head
, &list
);
5622 r
= ttm_eu_reserve_buffers(&ticket
, &list
, false, NULL
);
5624 dev_err(adev
->dev
, "fail to reserve bo (%d)\n", r
);
5628 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5629 domain
= amdgpu_display_supported_domains(adev
, rbo
->flags
);
5631 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
5633 r
= amdgpu_bo_pin(rbo
, domain
);
5634 if (unlikely(r
!= 0)) {
5635 if (r
!= -ERESTARTSYS
)
5636 DRM_ERROR("Failed to pin framebuffer with error %d\n", r
);
5637 ttm_eu_backoff_reservation(&ticket
, &list
);
5641 r
= amdgpu_ttm_alloc_gart(&rbo
->tbo
);
5642 if (unlikely(r
!= 0)) {
5643 amdgpu_bo_unpin(rbo
);
5644 ttm_eu_backoff_reservation(&ticket
, &list
);
5645 DRM_ERROR("%p bind failed\n", rbo
);
5649 amdgpu_bo_get_tiling_flags(rbo
, &tiling_flags
);
5651 tmz_surface
= amdgpu_bo_encrypted(rbo
);
5653 ttm_eu_backoff_reservation(&ticket
, &list
);
5655 afb
->address
= amdgpu_bo_gpu_offset(rbo
);
5659 if (dm_plane_state_new
->dc_state
&&
5660 dm_plane_state_old
->dc_state
!= dm_plane_state_new
->dc_state
) {
5661 struct dc_plane_state
*plane_state
= dm_plane_state_new
->dc_state
;
5663 force_disable_dcc
= adev
->asic_type
== CHIP_RAVEN
&& adev
->in_suspend
;
5664 fill_plane_buffer_attributes(
5665 adev
, afb
, plane_state
->format
, plane_state
->rotation
,
5666 tiling_flags
, &plane_state
->tiling_info
,
5667 &plane_state
->plane_size
, &plane_state
->dcc
,
5668 &plane_state
->address
, tmz_surface
,
5675 static void dm_plane_helper_cleanup_fb(struct drm_plane
*plane
,
5676 struct drm_plane_state
*old_state
)
5678 struct amdgpu_bo
*rbo
;
5684 rbo
= gem_to_amdgpu_bo(old_state
->fb
->obj
[0]);
5685 r
= amdgpu_bo_reserve(rbo
, false);
5687 DRM_ERROR("failed to reserve rbo before unpin\n");
5691 amdgpu_bo_unpin(rbo
);
5692 amdgpu_bo_unreserve(rbo
);
5693 amdgpu_bo_unref(&rbo
);
5696 static int dm_plane_helper_check_state(struct drm_plane_state
*state
,
5697 struct drm_crtc_state
*new_crtc_state
)
5699 int max_downscale
= 0;
5700 int max_upscale
= INT_MAX
;
5702 /* TODO: These should be checked against DC plane caps */
5703 return drm_atomic_helper_check_plane_state(
5704 state
, new_crtc_state
, max_downscale
, max_upscale
, true, true);
5707 static int dm_plane_atomic_check(struct drm_plane
*plane
,
5708 struct drm_plane_state
*state
)
5710 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
5711 struct dc
*dc
= adev
->dm
.dc
;
5712 struct dm_plane_state
*dm_plane_state
;
5713 struct dc_scaling_info scaling_info
;
5714 struct drm_crtc_state
*new_crtc_state
;
5717 dm_plane_state
= to_dm_plane_state(state
);
5719 if (!dm_plane_state
->dc_state
)
5723 drm_atomic_get_new_crtc_state(state
->state
, state
->crtc
);
5724 if (!new_crtc_state
)
5727 ret
= dm_plane_helper_check_state(state
, new_crtc_state
);
5731 ret
= fill_dc_scaling_info(state
, &scaling_info
);
5735 if (dc_validate_plane(dc
, dm_plane_state
->dc_state
) == DC_OK
)
5741 static int dm_plane_atomic_async_check(struct drm_plane
*plane
,
5742 struct drm_plane_state
*new_plane_state
)
5744 /* Only support async updates on cursor planes. */
5745 if (plane
->type
!= DRM_PLANE_TYPE_CURSOR
)
5751 static void dm_plane_atomic_async_update(struct drm_plane
*plane
,
5752 struct drm_plane_state
*new_state
)
5754 struct drm_plane_state
*old_state
=
5755 drm_atomic_get_old_plane_state(new_state
->state
, plane
);
5757 swap(plane
->state
->fb
, new_state
->fb
);
5759 plane
->state
->src_x
= new_state
->src_x
;
5760 plane
->state
->src_y
= new_state
->src_y
;
5761 plane
->state
->src_w
= new_state
->src_w
;
5762 plane
->state
->src_h
= new_state
->src_h
;
5763 plane
->state
->crtc_x
= new_state
->crtc_x
;
5764 plane
->state
->crtc_y
= new_state
->crtc_y
;
5765 plane
->state
->crtc_w
= new_state
->crtc_w
;
5766 plane
->state
->crtc_h
= new_state
->crtc_h
;
5768 handle_cursor_update(plane
, old_state
);
5771 static const struct drm_plane_helper_funcs dm_plane_helper_funcs
= {
5772 .prepare_fb
= dm_plane_helper_prepare_fb
,
5773 .cleanup_fb
= dm_plane_helper_cleanup_fb
,
5774 .atomic_check
= dm_plane_atomic_check
,
5775 .atomic_async_check
= dm_plane_atomic_async_check
,
5776 .atomic_async_update
= dm_plane_atomic_async_update
5780 * TODO: these are currently initialized to rgb formats only.
5781 * For future use cases we should either initialize them dynamically based on
5782 * plane capabilities, or initialize this array to all formats, so internal drm
5783 * check will succeed, and let DC implement proper check
5785 static const uint32_t rgb_formats
[] = {
5786 DRM_FORMAT_XRGB8888
,
5787 DRM_FORMAT_ARGB8888
,
5788 DRM_FORMAT_RGBA8888
,
5789 DRM_FORMAT_XRGB2101010
,
5790 DRM_FORMAT_XBGR2101010
,
5791 DRM_FORMAT_ARGB2101010
,
5792 DRM_FORMAT_ABGR2101010
,
5793 DRM_FORMAT_XBGR8888
,
5794 DRM_FORMAT_ABGR8888
,
5798 static const uint32_t overlay_formats
[] = {
5799 DRM_FORMAT_XRGB8888
,
5800 DRM_FORMAT_ARGB8888
,
5801 DRM_FORMAT_RGBA8888
,
5802 DRM_FORMAT_XBGR8888
,
5803 DRM_FORMAT_ABGR8888
,
5807 static const u32 cursor_formats
[] = {
5811 static int get_plane_formats(const struct drm_plane
*plane
,
5812 const struct dc_plane_cap
*plane_cap
,
5813 uint32_t *formats
, int max_formats
)
5815 int i
, num_formats
= 0;
5818 * TODO: Query support for each group of formats directly from
5819 * DC plane caps. This will require adding more formats to the
5823 switch (plane
->type
) {
5824 case DRM_PLANE_TYPE_PRIMARY
:
5825 for (i
= 0; i
< ARRAY_SIZE(rgb_formats
); ++i
) {
5826 if (num_formats
>= max_formats
)
5829 formats
[num_formats
++] = rgb_formats
[i
];
5832 if (plane_cap
&& plane_cap
->pixel_format_support
.nv12
)
5833 formats
[num_formats
++] = DRM_FORMAT_NV12
;
5834 if (plane_cap
&& plane_cap
->pixel_format_support
.p010
)
5835 formats
[num_formats
++] = DRM_FORMAT_P010
;
5836 if (plane_cap
&& plane_cap
->pixel_format_support
.fp16
) {
5837 formats
[num_formats
++] = DRM_FORMAT_XRGB16161616F
;
5838 formats
[num_formats
++] = DRM_FORMAT_ARGB16161616F
;
5839 formats
[num_formats
++] = DRM_FORMAT_XBGR16161616F
;
5840 formats
[num_formats
++] = DRM_FORMAT_ABGR16161616F
;
5844 case DRM_PLANE_TYPE_OVERLAY
:
5845 for (i
= 0; i
< ARRAY_SIZE(overlay_formats
); ++i
) {
5846 if (num_formats
>= max_formats
)
5849 formats
[num_formats
++] = overlay_formats
[i
];
5853 case DRM_PLANE_TYPE_CURSOR
:
5854 for (i
= 0; i
< ARRAY_SIZE(cursor_formats
); ++i
) {
5855 if (num_formats
>= max_formats
)
5858 formats
[num_formats
++] = cursor_formats
[i
];
5866 static int amdgpu_dm_plane_init(struct amdgpu_display_manager
*dm
,
5867 struct drm_plane
*plane
,
5868 unsigned long possible_crtcs
,
5869 const struct dc_plane_cap
*plane_cap
)
5871 uint32_t formats
[32];
5874 unsigned int supported_rotations
;
5876 num_formats
= get_plane_formats(plane
, plane_cap
, formats
,
5877 ARRAY_SIZE(formats
));
5879 res
= drm_universal_plane_init(dm
->adev
->ddev
, plane
, possible_crtcs
,
5880 &dm_plane_funcs
, formats
, num_formats
,
5881 NULL
, plane
->type
, NULL
);
5885 if (plane
->type
== DRM_PLANE_TYPE_OVERLAY
&&
5886 plane_cap
&& plane_cap
->per_pixel_alpha
) {
5887 unsigned int blend_caps
= BIT(DRM_MODE_BLEND_PIXEL_NONE
) |
5888 BIT(DRM_MODE_BLEND_PREMULTI
);
5890 drm_plane_create_alpha_property(plane
);
5891 drm_plane_create_blend_mode_property(plane
, blend_caps
);
5894 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
&&
5896 (plane_cap
->pixel_format_support
.nv12
||
5897 plane_cap
->pixel_format_support
.p010
)) {
5898 /* This only affects YUV formats. */
5899 drm_plane_create_color_properties(
5901 BIT(DRM_COLOR_YCBCR_BT601
) |
5902 BIT(DRM_COLOR_YCBCR_BT709
) |
5903 BIT(DRM_COLOR_YCBCR_BT2020
),
5904 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE
) |
5905 BIT(DRM_COLOR_YCBCR_FULL_RANGE
),
5906 DRM_COLOR_YCBCR_BT709
, DRM_COLOR_YCBCR_LIMITED_RANGE
);
5909 supported_rotations
=
5910 DRM_MODE_ROTATE_0
| DRM_MODE_ROTATE_90
|
5911 DRM_MODE_ROTATE_180
| DRM_MODE_ROTATE_270
;
5913 drm_plane_create_rotation_property(plane
, DRM_MODE_ROTATE_0
,
5914 supported_rotations
);
5916 drm_plane_helper_add(plane
, &dm_plane_helper_funcs
);
5918 /* Create (reset) the plane state */
5919 if (plane
->funcs
->reset
)
5920 plane
->funcs
->reset(plane
);
5925 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager
*dm
,
5926 struct drm_plane
*plane
,
5927 uint32_t crtc_index
)
5929 struct amdgpu_crtc
*acrtc
= NULL
;
5930 struct drm_plane
*cursor_plane
;
5934 cursor_plane
= kzalloc(sizeof(*cursor_plane
), GFP_KERNEL
);
5938 cursor_plane
->type
= DRM_PLANE_TYPE_CURSOR
;
5939 res
= amdgpu_dm_plane_init(dm
, cursor_plane
, 0, NULL
);
5941 acrtc
= kzalloc(sizeof(struct amdgpu_crtc
), GFP_KERNEL
);
5945 res
= drm_crtc_init_with_planes(
5950 &amdgpu_dm_crtc_funcs
, NULL
);
5955 drm_crtc_helper_add(&acrtc
->base
, &amdgpu_dm_crtc_helper_funcs
);
5957 /* Create (reset) the plane state */
5958 if (acrtc
->base
.funcs
->reset
)
5959 acrtc
->base
.funcs
->reset(&acrtc
->base
);
5961 acrtc
->max_cursor_width
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
5962 acrtc
->max_cursor_height
= dm
->adev
->dm
.dc
->caps
.max_cursor_size
;
5964 acrtc
->crtc_id
= crtc_index
;
5965 acrtc
->base
.enabled
= false;
5966 acrtc
->otg_inst
= -1;
5968 dm
->adev
->mode_info
.crtcs
[crtc_index
] = acrtc
;
5969 drm_crtc_enable_color_mgmt(&acrtc
->base
, MAX_COLOR_LUT_ENTRIES
,
5970 true, MAX_COLOR_LUT_ENTRIES
);
5971 drm_mode_crtc_set_gamma_size(&acrtc
->base
, MAX_COLOR_LEGACY_LUT_ENTRIES
);
5977 kfree(cursor_plane
);
5982 static int to_drm_connector_type(enum signal_type st
)
5985 case SIGNAL_TYPE_HDMI_TYPE_A
:
5986 return DRM_MODE_CONNECTOR_HDMIA
;
5987 case SIGNAL_TYPE_EDP
:
5988 return DRM_MODE_CONNECTOR_eDP
;
5989 case SIGNAL_TYPE_LVDS
:
5990 return DRM_MODE_CONNECTOR_LVDS
;
5991 case SIGNAL_TYPE_RGB
:
5992 return DRM_MODE_CONNECTOR_VGA
;
5993 case SIGNAL_TYPE_DISPLAY_PORT
:
5994 case SIGNAL_TYPE_DISPLAY_PORT_MST
:
5995 return DRM_MODE_CONNECTOR_DisplayPort
;
5996 case SIGNAL_TYPE_DVI_DUAL_LINK
:
5997 case SIGNAL_TYPE_DVI_SINGLE_LINK
:
5998 return DRM_MODE_CONNECTOR_DVID
;
5999 case SIGNAL_TYPE_VIRTUAL
:
6000 return DRM_MODE_CONNECTOR_VIRTUAL
;
6003 return DRM_MODE_CONNECTOR_Unknown
;
6007 static struct drm_encoder
*amdgpu_dm_connector_to_encoder(struct drm_connector
*connector
)
6009 struct drm_encoder
*encoder
;
6011 /* There is only one encoder per connector */
6012 drm_connector_for_each_possible_encoder(connector
, encoder
)
6018 static void amdgpu_dm_get_native_mode(struct drm_connector
*connector
)
6020 struct drm_encoder
*encoder
;
6021 struct amdgpu_encoder
*amdgpu_encoder
;
6023 encoder
= amdgpu_dm_connector_to_encoder(connector
);
6025 if (encoder
== NULL
)
6028 amdgpu_encoder
= to_amdgpu_encoder(encoder
);
6030 amdgpu_encoder
->native_mode
.clock
= 0;
6032 if (!list_empty(&connector
->probed_modes
)) {
6033 struct drm_display_mode
*preferred_mode
= NULL
;
6035 list_for_each_entry(preferred_mode
,
6036 &connector
->probed_modes
,
6038 if (preferred_mode
->type
& DRM_MODE_TYPE_PREFERRED
)
6039 amdgpu_encoder
->native_mode
= *preferred_mode
;
6047 static struct drm_display_mode
*
6048 amdgpu_dm_create_common_mode(struct drm_encoder
*encoder
,
6050 int hdisplay
, int vdisplay
)
6052 struct drm_device
*dev
= encoder
->dev
;
6053 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
6054 struct drm_display_mode
*mode
= NULL
;
6055 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
6057 mode
= drm_mode_duplicate(dev
, native_mode
);
6062 mode
->hdisplay
= hdisplay
;
6063 mode
->vdisplay
= vdisplay
;
6064 mode
->type
&= ~DRM_MODE_TYPE_PREFERRED
;
6065 strscpy(mode
->name
, name
, DRM_DISPLAY_MODE_LEN
);
6071 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder
*encoder
,
6072 struct drm_connector
*connector
)
6074 struct amdgpu_encoder
*amdgpu_encoder
= to_amdgpu_encoder(encoder
);
6075 struct drm_display_mode
*mode
= NULL
;
6076 struct drm_display_mode
*native_mode
= &amdgpu_encoder
->native_mode
;
6077 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
6078 to_amdgpu_dm_connector(connector
);
6082 char name
[DRM_DISPLAY_MODE_LEN
];
6085 } common_modes
[] = {
6086 { "640x480", 640, 480},
6087 { "800x600", 800, 600},
6088 { "1024x768", 1024, 768},
6089 { "1280x720", 1280, 720},
6090 { "1280x800", 1280, 800},
6091 {"1280x1024", 1280, 1024},
6092 { "1440x900", 1440, 900},
6093 {"1680x1050", 1680, 1050},
6094 {"1600x1200", 1600, 1200},
6095 {"1920x1080", 1920, 1080},
6096 {"1920x1200", 1920, 1200}
6099 n
= ARRAY_SIZE(common_modes
);
6101 for (i
= 0; i
< n
; i
++) {
6102 struct drm_display_mode
*curmode
= NULL
;
6103 bool mode_existed
= false;
6105 if (common_modes
[i
].w
> native_mode
->hdisplay
||
6106 common_modes
[i
].h
> native_mode
->vdisplay
||
6107 (common_modes
[i
].w
== native_mode
->hdisplay
&&
6108 common_modes
[i
].h
== native_mode
->vdisplay
))
6111 list_for_each_entry(curmode
, &connector
->probed_modes
, head
) {
6112 if (common_modes
[i
].w
== curmode
->hdisplay
&&
6113 common_modes
[i
].h
== curmode
->vdisplay
) {
6114 mode_existed
= true;
6122 mode
= amdgpu_dm_create_common_mode(encoder
,
6123 common_modes
[i
].name
, common_modes
[i
].w
,
6125 drm_mode_probed_add(connector
, mode
);
6126 amdgpu_dm_connector
->num_modes
++;
6130 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector
*connector
,
6133 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
6134 to_amdgpu_dm_connector(connector
);
6137 /* empty probed_modes */
6138 INIT_LIST_HEAD(&connector
->probed_modes
);
6139 amdgpu_dm_connector
->num_modes
=
6140 drm_add_edid_modes(connector
, edid
);
6142 /* sorting the probed modes before calling function
6143 * amdgpu_dm_get_native_mode() since EDID can have
6144 * more than one preferred mode. The modes that are
6145 * later in the probed mode list could be of higher
6146 * and preferred resolution. For example, 3840x2160
6147 * resolution in base EDID preferred timing and 4096x2160
6148 * preferred resolution in DID extension block later.
6150 drm_mode_sort(&connector
->probed_modes
);
6151 amdgpu_dm_get_native_mode(connector
);
6153 amdgpu_dm_connector
->num_modes
= 0;
6157 static int amdgpu_dm_connector_get_modes(struct drm_connector
*connector
)
6159 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
6160 to_amdgpu_dm_connector(connector
);
6161 struct drm_encoder
*encoder
;
6162 struct edid
*edid
= amdgpu_dm_connector
->edid
;
6164 encoder
= amdgpu_dm_connector_to_encoder(connector
);
6166 if (!edid
|| !drm_edid_is_valid(edid
)) {
6167 amdgpu_dm_connector
->num_modes
=
6168 drm_add_modes_noedid(connector
, 640, 480);
6170 amdgpu_dm_connector_ddc_get_modes(connector
, edid
);
6171 amdgpu_dm_connector_add_common_modes(encoder
, connector
);
6173 amdgpu_dm_fbc_init(connector
);
6175 return amdgpu_dm_connector
->num_modes
;
6178 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager
*dm
,
6179 struct amdgpu_dm_connector
*aconnector
,
6181 struct dc_link
*link
,
6184 struct amdgpu_device
*adev
= dm
->ddev
->dev_private
;
6187 * Some of the properties below require access to state, like bpc.
6188 * Allocate some default initial connector state with our reset helper.
6190 if (aconnector
->base
.funcs
->reset
)
6191 aconnector
->base
.funcs
->reset(&aconnector
->base
);
6193 aconnector
->connector_id
= link_index
;
6194 aconnector
->dc_link
= link
;
6195 aconnector
->base
.interlace_allowed
= false;
6196 aconnector
->base
.doublescan_allowed
= false;
6197 aconnector
->base
.stereo_allowed
= false;
6198 aconnector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
6199 aconnector
->hpd
.hpd
= AMDGPU_HPD_NONE
; /* not used */
6200 aconnector
->audio_inst
= -1;
6201 mutex_init(&aconnector
->hpd_lock
);
6204 * configure support HPD hot plug connector_>polled default value is 0
6205 * which means HPD hot plug not supported
6207 switch (connector_type
) {
6208 case DRM_MODE_CONNECTOR_HDMIA
:
6209 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
6210 aconnector
->base
.ycbcr_420_allowed
=
6211 link
->link_enc
->features
.hdmi_ycbcr420_supported
? true : false;
6213 case DRM_MODE_CONNECTOR_DisplayPort
:
6214 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
6215 aconnector
->base
.ycbcr_420_allowed
=
6216 link
->link_enc
->features
.dp_ycbcr420_supported
? true : false;
6218 case DRM_MODE_CONNECTOR_DVID
:
6219 aconnector
->base
.polled
= DRM_CONNECTOR_POLL_HPD
;
6225 drm_object_attach_property(&aconnector
->base
.base
,
6226 dm
->ddev
->mode_config
.scaling_mode_property
,
6227 DRM_MODE_SCALE_NONE
);
6229 drm_object_attach_property(&aconnector
->base
.base
,
6230 adev
->mode_info
.underscan_property
,
6232 drm_object_attach_property(&aconnector
->base
.base
,
6233 adev
->mode_info
.underscan_hborder_property
,
6235 drm_object_attach_property(&aconnector
->base
.base
,
6236 adev
->mode_info
.underscan_vborder_property
,
6239 if (!aconnector
->mst_port
)
6240 drm_connector_attach_max_bpc_property(&aconnector
->base
, 8, 16);
6242 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6243 aconnector
->base
.state
->max_bpc
= (connector_type
== DRM_MODE_CONNECTOR_eDP
) ? 16 : 8;
6244 aconnector
->base
.state
->max_requested_bpc
= aconnector
->base
.state
->max_bpc
;
6246 if (connector_type
== DRM_MODE_CONNECTOR_eDP
&&
6247 (dc_is_dmcu_initialized(adev
->dm
.dc
) || adev
->dm
.dc
->ctx
->dmub_srv
)) {
6248 drm_object_attach_property(&aconnector
->base
.base
,
6249 adev
->mode_info
.abm_level_property
, 0);
6252 if (connector_type
== DRM_MODE_CONNECTOR_HDMIA
||
6253 connector_type
== DRM_MODE_CONNECTOR_DisplayPort
||
6254 connector_type
== DRM_MODE_CONNECTOR_eDP
) {
6255 drm_object_attach_property(
6256 &aconnector
->base
.base
,
6257 dm
->ddev
->mode_config
.hdr_output_metadata_property
, 0);
6259 if (!aconnector
->mst_port
)
6260 drm_connector_attach_vrr_capable_property(&aconnector
->base
);
6262 #ifdef CONFIG_DRM_AMD_DC_HDCP
6263 if (adev
->dm
.hdcp_workqueue
)
6264 drm_connector_attach_content_protection_property(&aconnector
->base
, true);
6269 static int amdgpu_dm_i2c_xfer(struct i2c_adapter
*i2c_adap
,
6270 struct i2c_msg
*msgs
, int num
)
6272 struct amdgpu_i2c_adapter
*i2c
= i2c_get_adapdata(i2c_adap
);
6273 struct ddc_service
*ddc_service
= i2c
->ddc_service
;
6274 struct i2c_command cmd
;
6278 cmd
.payloads
= kcalloc(num
, sizeof(struct i2c_payload
), GFP_KERNEL
);
6283 cmd
.number_of_payloads
= num
;
6284 cmd
.engine
= I2C_COMMAND_ENGINE_DEFAULT
;
6287 for (i
= 0; i
< num
; i
++) {
6288 cmd
.payloads
[i
].write
= !(msgs
[i
].flags
& I2C_M_RD
);
6289 cmd
.payloads
[i
].address
= msgs
[i
].addr
;
6290 cmd
.payloads
[i
].length
= msgs
[i
].len
;
6291 cmd
.payloads
[i
].data
= msgs
[i
].buf
;
6295 ddc_service
->ctx
->dc
,
6296 ddc_service
->ddc_pin
->hw_info
.ddc_channel
,
6300 kfree(cmd
.payloads
);
6304 static u32
amdgpu_dm_i2c_func(struct i2c_adapter
*adap
)
6306 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
;
6309 static const struct i2c_algorithm amdgpu_dm_i2c_algo
= {
6310 .master_xfer
= amdgpu_dm_i2c_xfer
,
6311 .functionality
= amdgpu_dm_i2c_func
,
6314 static struct amdgpu_i2c_adapter
*
6315 create_i2c(struct ddc_service
*ddc_service
,
6319 struct amdgpu_device
*adev
= ddc_service
->ctx
->driver_context
;
6320 struct amdgpu_i2c_adapter
*i2c
;
6322 i2c
= kzalloc(sizeof(struct amdgpu_i2c_adapter
), GFP_KERNEL
);
6325 i2c
->base
.owner
= THIS_MODULE
;
6326 i2c
->base
.class = I2C_CLASS_DDC
;
6327 i2c
->base
.dev
.parent
= &adev
->pdev
->dev
;
6328 i2c
->base
.algo
= &amdgpu_dm_i2c_algo
;
6329 snprintf(i2c
->base
.name
, sizeof(i2c
->base
.name
), "AMDGPU DM i2c hw bus %d", link_index
);
6330 i2c_set_adapdata(&i2c
->base
, i2c
);
6331 i2c
->ddc_service
= ddc_service
;
6332 i2c
->ddc_service
->ddc_pin
->hw_info
.ddc_channel
= link_index
;
6339 * Note: this function assumes that dc_link_detect() was called for the
6340 * dc_link which will be represented by this aconnector.
6342 static int amdgpu_dm_connector_init(struct amdgpu_display_manager
*dm
,
6343 struct amdgpu_dm_connector
*aconnector
,
6344 uint32_t link_index
,
6345 struct amdgpu_encoder
*aencoder
)
6349 struct dc
*dc
= dm
->dc
;
6350 struct dc_link
*link
= dc_get_link_at_index(dc
, link_index
);
6351 struct amdgpu_i2c_adapter
*i2c
;
6353 link
->priv
= aconnector
;
6355 DRM_DEBUG_DRIVER("%s()\n", __func__
);
6357 i2c
= create_i2c(link
->ddc
, link
->link_index
, &res
);
6359 DRM_ERROR("Failed to create i2c adapter data\n");
6363 aconnector
->i2c
= i2c
;
6364 res
= i2c_add_adapter(&i2c
->base
);
6367 DRM_ERROR("Failed to register hw i2c %d\n", link
->link_index
);
6371 connector_type
= to_drm_connector_type(link
->connector_signal
);
6373 res
= drm_connector_init_with_ddc(
6376 &amdgpu_dm_connector_funcs
,
6381 DRM_ERROR("connector_init failed\n");
6382 aconnector
->connector_id
= -1;
6386 drm_connector_helper_add(
6388 &amdgpu_dm_connector_helper_funcs
);
6390 amdgpu_dm_connector_init_helper(
6397 drm_connector_attach_encoder(
6398 &aconnector
->base
, &aencoder
->base
);
6400 if (connector_type
== DRM_MODE_CONNECTOR_DisplayPort
6401 || connector_type
== DRM_MODE_CONNECTOR_eDP
)
6402 amdgpu_dm_initialize_dp_connector(dm
, aconnector
, link
->link_index
);
6407 aconnector
->i2c
= NULL
;
6412 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device
*adev
)
6414 switch (adev
->mode_info
.num_crtc
) {
6431 static int amdgpu_dm_encoder_init(struct drm_device
*dev
,
6432 struct amdgpu_encoder
*aencoder
,
6433 uint32_t link_index
)
6435 struct amdgpu_device
*adev
= dev
->dev_private
;
6437 int res
= drm_encoder_init(dev
,
6439 &amdgpu_dm_encoder_funcs
,
6440 DRM_MODE_ENCODER_TMDS
,
6443 aencoder
->base
.possible_crtcs
= amdgpu_dm_get_encoder_crtc_mask(adev
);
6446 aencoder
->encoder_id
= link_index
;
6448 aencoder
->encoder_id
= -1;
6450 drm_encoder_helper_add(&aencoder
->base
, &amdgpu_dm_encoder_helper_funcs
);
6455 static void manage_dm_interrupts(struct amdgpu_device
*adev
,
6456 struct amdgpu_crtc
*acrtc
,
6460 * We have no guarantee that the frontend index maps to the same
6461 * backend index - some even map to more than one.
6463 * TODO: Use a different interrupt or check DC itself for the mapping.
6466 amdgpu_display_crtc_idx_to_irq_type(
6471 drm_crtc_vblank_on(&acrtc
->base
);
6474 &adev
->pageflip_irq
,
6480 &adev
->pageflip_irq
,
6482 drm_crtc_vblank_off(&acrtc
->base
);
6486 static void dm_update_pflip_irq_state(struct amdgpu_device
*adev
,
6487 struct amdgpu_crtc
*acrtc
)
6490 amdgpu_display_crtc_idx_to_irq_type(adev
, acrtc
->crtc_id
);
6493 * This reads the current state for the IRQ and force reapplies
6494 * the setting to hardware.
6496 amdgpu_irq_update(adev
, &adev
->pageflip_irq
, irq_type
);
6500 is_scaling_state_different(const struct dm_connector_state
*dm_state
,
6501 const struct dm_connector_state
*old_dm_state
)
6503 if (dm_state
->scaling
!= old_dm_state
->scaling
)
6505 if (!dm_state
->underscan_enable
&& old_dm_state
->underscan_enable
) {
6506 if (old_dm_state
->underscan_hborder
!= 0 && old_dm_state
->underscan_vborder
!= 0)
6508 } else if (dm_state
->underscan_enable
&& !old_dm_state
->underscan_enable
) {
6509 if (dm_state
->underscan_hborder
!= 0 && dm_state
->underscan_vborder
!= 0)
6511 } else if (dm_state
->underscan_hborder
!= old_dm_state
->underscan_hborder
||
6512 dm_state
->underscan_vborder
!= old_dm_state
->underscan_vborder
)
6517 #ifdef CONFIG_DRM_AMD_DC_HDCP
6518 static bool is_content_protection_different(struct drm_connector_state
*state
,
6519 const struct drm_connector_state
*old_state
,
6520 const struct drm_connector
*connector
, struct hdcp_workqueue
*hdcp_w
)
6522 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
6524 if (old_state
->hdcp_content_type
!= state
->hdcp_content_type
&&
6525 state
->content_protection
!= DRM_MODE_CONTENT_PROTECTION_UNDESIRED
) {
6526 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6530 /* CP is being re enabled, ignore this */
6531 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
&&
6532 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
) {
6533 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_ENABLED
;
6537 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6538 if (old_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
&&
6539 state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
)
6540 state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
6542 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6543 * hot-plug, headless s3, dpms
6545 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
&& connector
->dpms
== DRM_MODE_DPMS_ON
&&
6546 aconnector
->dc_sink
!= NULL
)
6549 if (old_state
->content_protection
== state
->content_protection
)
6552 if (state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_UNDESIRED
)
6559 static void remove_stream(struct amdgpu_device
*adev
,
6560 struct amdgpu_crtc
*acrtc
,
6561 struct dc_stream_state
*stream
)
6563 /* this is the update mode case */
6565 acrtc
->otg_inst
= -1;
6566 acrtc
->enabled
= false;
6569 static int get_cursor_position(struct drm_plane
*plane
, struct drm_crtc
*crtc
,
6570 struct dc_cursor_position
*position
)
6572 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6574 int xorigin
= 0, yorigin
= 0;
6576 position
->enable
= false;
6580 if (!crtc
|| !plane
->state
->fb
)
6583 if ((plane
->state
->crtc_w
> amdgpu_crtc
->max_cursor_width
) ||
6584 (plane
->state
->crtc_h
> amdgpu_crtc
->max_cursor_height
)) {
6585 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6587 plane
->state
->crtc_w
,
6588 plane
->state
->crtc_h
);
6592 x
= plane
->state
->crtc_x
;
6593 y
= plane
->state
->crtc_y
;
6595 if (x
<= -amdgpu_crtc
->max_cursor_width
||
6596 y
<= -amdgpu_crtc
->max_cursor_height
)
6600 xorigin
= min(-x
, amdgpu_crtc
->max_cursor_width
- 1);
6604 yorigin
= min(-y
, amdgpu_crtc
->max_cursor_height
- 1);
6607 position
->enable
= true;
6608 position
->translate_by_source
= true;
6611 position
->x_hotspot
= xorigin
;
6612 position
->y_hotspot
= yorigin
;
6617 static void handle_cursor_update(struct drm_plane
*plane
,
6618 struct drm_plane_state
*old_plane_state
)
6620 struct amdgpu_device
*adev
= plane
->dev
->dev_private
;
6621 struct amdgpu_framebuffer
*afb
= to_amdgpu_framebuffer(plane
->state
->fb
);
6622 struct drm_crtc
*crtc
= afb
? plane
->state
->crtc
: old_plane_state
->crtc
;
6623 struct dm_crtc_state
*crtc_state
= crtc
? to_dm_crtc_state(crtc
->state
) : NULL
;
6624 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
6625 uint64_t address
= afb
? afb
->address
: 0;
6626 struct dc_cursor_position position
;
6627 struct dc_cursor_attributes attributes
;
6630 if (!plane
->state
->fb
&& !old_plane_state
->fb
)
6633 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6635 amdgpu_crtc
->crtc_id
,
6636 plane
->state
->crtc_w
,
6637 plane
->state
->crtc_h
);
6639 ret
= get_cursor_position(plane
, crtc
, &position
);
6643 if (!position
.enable
) {
6644 /* turn off cursor */
6645 if (crtc_state
&& crtc_state
->stream
) {
6646 mutex_lock(&adev
->dm
.dc_lock
);
6647 dc_stream_set_cursor_position(crtc_state
->stream
,
6649 mutex_unlock(&adev
->dm
.dc_lock
);
6654 amdgpu_crtc
->cursor_width
= plane
->state
->crtc_w
;
6655 amdgpu_crtc
->cursor_height
= plane
->state
->crtc_h
;
6657 memset(&attributes
, 0, sizeof(attributes
));
6658 attributes
.address
.high_part
= upper_32_bits(address
);
6659 attributes
.address
.low_part
= lower_32_bits(address
);
6660 attributes
.width
= plane
->state
->crtc_w
;
6661 attributes
.height
= plane
->state
->crtc_h
;
6662 attributes
.color_format
= CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA
;
6663 attributes
.rotation_angle
= 0;
6664 attributes
.attribute_flags
.value
= 0;
6666 attributes
.pitch
= attributes
.width
;
6668 if (crtc_state
->stream
) {
6669 mutex_lock(&adev
->dm
.dc_lock
);
6670 if (!dc_stream_set_cursor_attributes(crtc_state
->stream
,
6672 DRM_ERROR("DC failed to set cursor attributes\n");
6674 if (!dc_stream_set_cursor_position(crtc_state
->stream
,
6676 DRM_ERROR("DC failed to set cursor position\n");
6677 mutex_unlock(&adev
->dm
.dc_lock
);
6681 static void prepare_flip_isr(struct amdgpu_crtc
*acrtc
)
6684 assert_spin_locked(&acrtc
->base
.dev
->event_lock
);
6685 WARN_ON(acrtc
->event
);
6687 acrtc
->event
= acrtc
->base
.state
->event
;
6689 /* Set the flip status */
6690 acrtc
->pflip_status
= AMDGPU_FLIP_SUBMITTED
;
6692 /* Mark this event as consumed */
6693 acrtc
->base
.state
->event
= NULL
;
6695 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6699 static void update_freesync_state_on_stream(
6700 struct amdgpu_display_manager
*dm
,
6701 struct dm_crtc_state
*new_crtc_state
,
6702 struct dc_stream_state
*new_stream
,
6703 struct dc_plane_state
*surface
,
6704 u32 flip_timestamp_in_us
)
6706 struct mod_vrr_params vrr_params
;
6707 struct dc_info_packet vrr_infopacket
= {0};
6708 struct amdgpu_device
*adev
= dm
->adev
;
6709 unsigned long flags
;
6715 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6716 * For now it's sufficient to just guard against these conditions.
6719 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6722 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6723 vrr_params
= new_crtc_state
->vrr_params
;
6726 mod_freesync_handle_preflip(
6727 dm
->freesync_module
,
6730 flip_timestamp_in_us
,
6733 if (adev
->family
< AMDGPU_FAMILY_AI
&&
6734 amdgpu_dm_vrr_active(new_crtc_state
)) {
6735 mod_freesync_handle_v_update(dm
->freesync_module
,
6736 new_stream
, &vrr_params
);
6738 /* Need to call this before the frame ends. */
6739 dc_stream_adjust_vmin_vmax(dm
->dc
,
6740 new_crtc_state
->stream
,
6741 &vrr_params
.adjust
);
6745 mod_freesync_build_vrr_infopacket(
6746 dm
->freesync_module
,
6750 TRANSFER_FUNC_UNKNOWN
,
6753 new_crtc_state
->freesync_timing_changed
|=
6754 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6756 sizeof(vrr_params
.adjust
)) != 0);
6758 new_crtc_state
->freesync_vrr_info_changed
|=
6759 (memcmp(&new_crtc_state
->vrr_infopacket
,
6761 sizeof(vrr_infopacket
)) != 0);
6763 new_crtc_state
->vrr_params
= vrr_params
;
6764 new_crtc_state
->vrr_infopacket
= vrr_infopacket
;
6766 new_stream
->adjust
= new_crtc_state
->vrr_params
.adjust
;
6767 new_stream
->vrr_infopacket
= vrr_infopacket
;
6769 if (new_crtc_state
->freesync_vrr_info_changed
)
6770 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6771 new_crtc_state
->base
.crtc
->base
.id
,
6772 (int)new_crtc_state
->base
.vrr_enabled
,
6773 (int)vrr_params
.state
);
6775 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6778 static void pre_update_freesync_state_on_stream(
6779 struct amdgpu_display_manager
*dm
,
6780 struct dm_crtc_state
*new_crtc_state
)
6782 struct dc_stream_state
*new_stream
= new_crtc_state
->stream
;
6783 struct mod_vrr_params vrr_params
;
6784 struct mod_freesync_config config
= new_crtc_state
->freesync_config
;
6785 struct amdgpu_device
*adev
= dm
->adev
;
6786 unsigned long flags
;
6792 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6793 * For now it's sufficient to just guard against these conditions.
6795 if (!new_stream
->timing
.h_total
|| !new_stream
->timing
.v_total
)
6798 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
6799 vrr_params
= new_crtc_state
->vrr_params
;
6801 if (new_crtc_state
->vrr_supported
&&
6802 config
.min_refresh_in_uhz
&&
6803 config
.max_refresh_in_uhz
) {
6804 config
.state
= new_crtc_state
->base
.vrr_enabled
?
6805 VRR_STATE_ACTIVE_VARIABLE
:
6808 config
.state
= VRR_STATE_UNSUPPORTED
;
6811 mod_freesync_build_vrr_params(dm
->freesync_module
,
6813 &config
, &vrr_params
);
6815 new_crtc_state
->freesync_timing_changed
|=
6816 (memcmp(&new_crtc_state
->vrr_params
.adjust
,
6818 sizeof(vrr_params
.adjust
)) != 0);
6820 new_crtc_state
->vrr_params
= vrr_params
;
6821 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
6824 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state
*old_state
,
6825 struct dm_crtc_state
*new_state
)
6827 bool old_vrr_active
= amdgpu_dm_vrr_active(old_state
);
6828 bool new_vrr_active
= amdgpu_dm_vrr_active(new_state
);
6830 if (!old_vrr_active
&& new_vrr_active
) {
6831 /* Transition VRR inactive -> active:
6832 * While VRR is active, we must not disable vblank irq, as a
6833 * reenable after disable would compute bogus vblank/pflip
6834 * timestamps if it likely happened inside display front-porch.
6836 * We also need vupdate irq for the actual core vblank handling
6839 dm_set_vupdate_irq(new_state
->base
.crtc
, true);
6840 drm_crtc_vblank_get(new_state
->base
.crtc
);
6841 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6842 __func__
, new_state
->base
.crtc
->base
.id
);
6843 } else if (old_vrr_active
&& !new_vrr_active
) {
6844 /* Transition VRR active -> inactive:
6845 * Allow vblank irq disable again for fixed refresh rate.
6847 dm_set_vupdate_irq(new_state
->base
.crtc
, false);
6848 drm_crtc_vblank_put(new_state
->base
.crtc
);
6849 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6850 __func__
, new_state
->base
.crtc
->base
.id
);
6854 static void amdgpu_dm_commit_cursors(struct drm_atomic_state
*state
)
6856 struct drm_plane
*plane
;
6857 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6861 * TODO: Make this per-stream so we don't issue redundant updates for
6862 * commits with multiple streams.
6864 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
,
6866 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6867 handle_cursor_update(plane
, old_plane_state
);
6870 static void amdgpu_dm_commit_planes(struct drm_atomic_state
*state
,
6871 struct dc_state
*dc_state
,
6872 struct drm_device
*dev
,
6873 struct amdgpu_display_manager
*dm
,
6874 struct drm_crtc
*pcrtc
,
6875 bool wait_for_vblank
)
6878 uint64_t timestamp_ns
;
6879 struct drm_plane
*plane
;
6880 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
6881 struct amdgpu_crtc
*acrtc_attach
= to_amdgpu_crtc(pcrtc
);
6882 struct drm_crtc_state
*new_pcrtc_state
=
6883 drm_atomic_get_new_crtc_state(state
, pcrtc
);
6884 struct dm_crtc_state
*acrtc_state
= to_dm_crtc_state(new_pcrtc_state
);
6885 struct dm_crtc_state
*dm_old_crtc_state
=
6886 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state
, pcrtc
));
6887 int planes_count
= 0, vpos
, hpos
;
6889 unsigned long flags
;
6890 struct amdgpu_bo
*abo
;
6891 uint64_t tiling_flags
;
6892 bool tmz_surface
= false;
6893 uint32_t target_vblank
, last_flip_vblank
;
6894 bool vrr_active
= amdgpu_dm_vrr_active(acrtc_state
);
6895 bool pflip_present
= false;
6897 struct dc_surface_update surface_updates
[MAX_SURFACES
];
6898 struct dc_plane_info plane_infos
[MAX_SURFACES
];
6899 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
6900 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
6901 struct dc_stream_update stream_update
;
6904 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
6907 dm_error("Failed to allocate update bundle\n");
6912 * Disable the cursor first if we're disabling all the planes.
6913 * It'll remain on the screen after the planes are re-enabled
6916 if (acrtc_state
->active_planes
== 0)
6917 amdgpu_dm_commit_cursors(state
);
6919 /* update planes when needed */
6920 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
6921 struct drm_crtc
*crtc
= new_plane_state
->crtc
;
6922 struct drm_crtc_state
*new_crtc_state
;
6923 struct drm_framebuffer
*fb
= new_plane_state
->fb
;
6924 bool plane_needs_flip
;
6925 struct dc_plane_state
*dc_plane
;
6926 struct dm_plane_state
*dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
6928 /* Cursor plane is handled after stream updates */
6929 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
6932 if (!fb
|| !crtc
|| pcrtc
!= crtc
)
6935 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, crtc
);
6936 if (!new_crtc_state
->active
)
6939 dc_plane
= dm_new_plane_state
->dc_state
;
6941 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
6942 if (new_pcrtc_state
->color_mgmt_changed
) {
6943 bundle
->surface_updates
[planes_count
].gamma
= dc_plane
->gamma_correction
;
6944 bundle
->surface_updates
[planes_count
].in_transfer_func
= dc_plane
->in_transfer_func
;
6945 bundle
->surface_updates
[planes_count
].gamut_remap_matrix
= &dc_plane
->gamut_remap_matrix
;
6948 fill_dc_scaling_info(new_plane_state
,
6949 &bundle
->scaling_infos
[planes_count
]);
6951 bundle
->surface_updates
[planes_count
].scaling_info
=
6952 &bundle
->scaling_infos
[planes_count
];
6954 plane_needs_flip
= old_plane_state
->fb
&& new_plane_state
->fb
;
6956 pflip_present
= pflip_present
|| plane_needs_flip
;
6958 if (!plane_needs_flip
) {
6963 abo
= gem_to_amdgpu_bo(fb
->obj
[0]);
6966 * Wait for all fences on this FB. Do limited wait to avoid
6967 * deadlock during GPU reset when this fence will not signal
6968 * but we hold reservation lock for the BO.
6970 r
= dma_resv_wait_timeout_rcu(abo
->tbo
.base
.resv
, true,
6972 msecs_to_jiffies(5000));
6973 if (unlikely(r
<= 0))
6974 DRM_ERROR("Waiting for fences timed out!");
6977 * TODO This might fail and hence better not used, wait
6978 * explicitly on fences instead
6979 * and in general should be called for
6980 * blocking commit to as per framework helpers
6982 r
= amdgpu_bo_reserve(abo
, true);
6983 if (unlikely(r
!= 0))
6984 DRM_ERROR("failed to reserve buffer before flip\n");
6986 amdgpu_bo_get_tiling_flags(abo
, &tiling_flags
);
6988 tmz_surface
= amdgpu_bo_encrypted(abo
);
6990 amdgpu_bo_unreserve(abo
);
6992 fill_dc_plane_info_and_addr(
6993 dm
->adev
, new_plane_state
, tiling_flags
,
6994 &bundle
->plane_infos
[planes_count
],
6995 &bundle
->flip_addrs
[planes_count
].address
,
6999 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7000 new_plane_state
->plane
->index
,
7001 bundle
->plane_infos
[planes_count
].dcc
.enable
);
7003 bundle
->surface_updates
[planes_count
].plane_info
=
7004 &bundle
->plane_infos
[planes_count
];
7007 * Only allow immediate flips for fast updates that don't
7008 * change FB pitch, DCC state, rotation or mirroing.
7010 bundle
->flip_addrs
[planes_count
].flip_immediate
=
7011 crtc
->state
->async_flip
&&
7012 acrtc_state
->update_type
== UPDATE_TYPE_FAST
;
7014 timestamp_ns
= ktime_get_ns();
7015 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
= div_u64(timestamp_ns
, 1000);
7016 bundle
->surface_updates
[planes_count
].flip_addr
= &bundle
->flip_addrs
[planes_count
];
7017 bundle
->surface_updates
[planes_count
].surface
= dc_plane
;
7019 if (!bundle
->surface_updates
[planes_count
].surface
) {
7020 DRM_ERROR("No surface for CRTC: id=%d\n",
7021 acrtc_attach
->crtc_id
);
7025 if (plane
== pcrtc
->primary
)
7026 update_freesync_state_on_stream(
7029 acrtc_state
->stream
,
7031 bundle
->flip_addrs
[planes_count
].flip_timestamp_in_us
);
7033 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7035 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.high_part
,
7036 bundle
->flip_addrs
[planes_count
].address
.grph
.addr
.low_part
);
7042 if (pflip_present
) {
7044 /* Use old throttling in non-vrr fixed refresh rate mode
7045 * to keep flip scheduling based on target vblank counts
7046 * working in a backwards compatible way, e.g., for
7047 * clients using the GLX_OML_sync_control extension or
7048 * DRI3/Present extension with defined target_msc.
7050 last_flip_vblank
= amdgpu_get_vblank_counter_kms(pcrtc
);
7053 /* For variable refresh rate mode only:
7054 * Get vblank of last completed flip to avoid > 1 vrr
7055 * flips per video frame by use of throttling, but allow
7056 * flip programming anywhere in the possibly large
7057 * variable vrr vblank interval for fine-grained flip
7058 * timing control and more opportunity to avoid stutter
7059 * on late submission of flips.
7061 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
7062 last_flip_vblank
= acrtc_attach
->last_flip_vblank
;
7063 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
7066 target_vblank
= last_flip_vblank
+ wait_for_vblank
;
7069 * Wait until we're out of the vertical blank period before the one
7070 * targeted by the flip
7072 while ((acrtc_attach
->enabled
&&
7073 (amdgpu_display_get_crtc_scanoutpos(dm
->ddev
, acrtc_attach
->crtc_id
,
7074 0, &vpos
, &hpos
, NULL
,
7075 NULL
, &pcrtc
->hwmode
)
7076 & (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
)) ==
7077 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_IN_VBLANK
) &&
7078 (int)(target_vblank
-
7079 amdgpu_get_vblank_counter_kms(pcrtc
)) > 0)) {
7080 usleep_range(1000, 1100);
7084 * Prepare the flip event for the pageflip interrupt to handle.
7086 * This only works in the case where we've already turned on the
7087 * appropriate hardware blocks (eg. HUBP) so in the transition case
7088 * from 0 -> n planes we have to skip a hardware generated event
7089 * and rely on sending it from software.
7091 if (acrtc_attach
->base
.state
->event
&&
7092 acrtc_state
->active_planes
> 0) {
7093 drm_crtc_vblank_get(pcrtc
);
7095 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
7097 WARN_ON(acrtc_attach
->pflip_status
!= AMDGPU_FLIP_NONE
);
7098 prepare_flip_isr(acrtc_attach
);
7100 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
7103 if (acrtc_state
->stream
) {
7104 if (acrtc_state
->freesync_vrr_info_changed
)
7105 bundle
->stream_update
.vrr_infopacket
=
7106 &acrtc_state
->stream
->vrr_infopacket
;
7110 /* Update the planes if changed or disable if we don't have any. */
7111 if ((planes_count
|| acrtc_state
->active_planes
== 0) &&
7112 acrtc_state
->stream
) {
7113 bundle
->stream_update
.stream
= acrtc_state
->stream
;
7114 if (new_pcrtc_state
->mode_changed
) {
7115 bundle
->stream_update
.src
= acrtc_state
->stream
->src
;
7116 bundle
->stream_update
.dst
= acrtc_state
->stream
->dst
;
7119 if (new_pcrtc_state
->color_mgmt_changed
) {
7121 * TODO: This isn't fully correct since we've actually
7122 * already modified the stream in place.
7124 bundle
->stream_update
.gamut_remap
=
7125 &acrtc_state
->stream
->gamut_remap_matrix
;
7126 bundle
->stream_update
.output_csc_transform
=
7127 &acrtc_state
->stream
->csc_color_matrix
;
7128 bundle
->stream_update
.out_transfer_func
=
7129 acrtc_state
->stream
->out_transfer_func
;
7132 acrtc_state
->stream
->abm_level
= acrtc_state
->abm_level
;
7133 if (acrtc_state
->abm_level
!= dm_old_crtc_state
->abm_level
)
7134 bundle
->stream_update
.abm_level
= &acrtc_state
->abm_level
;
7137 * If FreeSync state on the stream has changed then we need to
7138 * re-adjust the min/max bounds now that DC doesn't handle this
7139 * as part of commit.
7141 if (amdgpu_dm_vrr_active(dm_old_crtc_state
) !=
7142 amdgpu_dm_vrr_active(acrtc_state
)) {
7143 spin_lock_irqsave(&pcrtc
->dev
->event_lock
, flags
);
7144 dc_stream_adjust_vmin_vmax(
7145 dm
->dc
, acrtc_state
->stream
,
7146 &acrtc_state
->vrr_params
.adjust
);
7147 spin_unlock_irqrestore(&pcrtc
->dev
->event_lock
, flags
);
7149 mutex_lock(&dm
->dc_lock
);
7150 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
7151 acrtc_state
->stream
->link
->psr_settings
.psr_allow_active
)
7152 amdgpu_dm_psr_disable(acrtc_state
->stream
);
7154 dc_commit_updates_for_stream(dm
->dc
,
7155 bundle
->surface_updates
,
7157 acrtc_state
->stream
,
7158 &bundle
->stream_update
,
7162 * Enable or disable the interrupts on the backend.
7164 * Most pipes are put into power gating when unused.
7166 * When power gating is enabled on a pipe we lose the
7167 * interrupt enablement state when power gating is disabled.
7169 * So we need to update the IRQ control state in hardware
7170 * whenever the pipe turns on (since it could be previously
7171 * power gated) or off (since some pipes can't be power gated
7174 if (dm_old_crtc_state
->active_planes
!= acrtc_state
->active_planes
)
7175 dm_update_pflip_irq_state(
7176 (struct amdgpu_device
*)dev
->dev_private
,
7179 if ((acrtc_state
->update_type
> UPDATE_TYPE_FAST
) &&
7180 acrtc_state
->stream
->link
->psr_settings
.psr_version
!= DC_PSR_VERSION_UNSUPPORTED
&&
7181 !acrtc_state
->stream
->link
->psr_settings
.psr_feature_enabled
)
7182 amdgpu_dm_link_setup_psr(acrtc_state
->stream
);
7183 else if ((acrtc_state
->update_type
== UPDATE_TYPE_FAST
) &&
7184 acrtc_state
->stream
->link
->psr_settings
.psr_feature_enabled
&&
7185 !acrtc_state
->stream
->link
->psr_settings
.psr_allow_active
) {
7186 amdgpu_dm_psr_enable(acrtc_state
->stream
);
7189 mutex_unlock(&dm
->dc_lock
);
7193 * Update cursor state *after* programming all the planes.
7194 * This avoids redundant programming in the case where we're going
7195 * to be disabling a single plane - those pipes are being disabled.
7197 if (acrtc_state
->active_planes
)
7198 amdgpu_dm_commit_cursors(state
);
7204 static void amdgpu_dm_commit_audio(struct drm_device
*dev
,
7205 struct drm_atomic_state
*state
)
7207 struct amdgpu_device
*adev
= dev
->dev_private
;
7208 struct amdgpu_dm_connector
*aconnector
;
7209 struct drm_connector
*connector
;
7210 struct drm_connector_state
*old_con_state
, *new_con_state
;
7211 struct drm_crtc_state
*new_crtc_state
;
7212 struct dm_crtc_state
*new_dm_crtc_state
;
7213 const struct dc_stream_status
*status
;
7216 /* Notify device removals. */
7217 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7218 if (old_con_state
->crtc
!= new_con_state
->crtc
) {
7219 /* CRTC changes require notification. */
7223 if (!new_con_state
->crtc
)
7226 new_crtc_state
= drm_atomic_get_new_crtc_state(
7227 state
, new_con_state
->crtc
);
7229 if (!new_crtc_state
)
7232 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7236 aconnector
= to_amdgpu_dm_connector(connector
);
7238 mutex_lock(&adev
->dm
.audio_lock
);
7239 inst
= aconnector
->audio_inst
;
7240 aconnector
->audio_inst
= -1;
7241 mutex_unlock(&adev
->dm
.audio_lock
);
7243 amdgpu_dm_audio_eld_notify(adev
, inst
);
7246 /* Notify audio device additions. */
7247 for_each_new_connector_in_state(state
, connector
, new_con_state
, i
) {
7248 if (!new_con_state
->crtc
)
7251 new_crtc_state
= drm_atomic_get_new_crtc_state(
7252 state
, new_con_state
->crtc
);
7254 if (!new_crtc_state
)
7257 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7260 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7261 if (!new_dm_crtc_state
->stream
)
7264 status
= dc_stream_get_status(new_dm_crtc_state
->stream
);
7268 aconnector
= to_amdgpu_dm_connector(connector
);
7270 mutex_lock(&adev
->dm
.audio_lock
);
7271 inst
= status
->audio_inst
;
7272 aconnector
->audio_inst
= inst
;
7273 mutex_unlock(&adev
->dm
.audio_lock
);
7275 amdgpu_dm_audio_eld_notify(adev
, inst
);
7280 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7281 * @crtc_state: the DRM CRTC state
7282 * @stream_state: the DC stream state.
7284 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7285 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7287 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state
*crtc_state
,
7288 struct dc_stream_state
*stream_state
)
7290 stream_state
->mode_changed
= drm_atomic_crtc_needs_modeset(crtc_state
);
7293 static int amdgpu_dm_atomic_commit(struct drm_device
*dev
,
7294 struct drm_atomic_state
*state
,
7297 struct drm_crtc
*crtc
;
7298 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7299 struct amdgpu_device
*adev
= dev
->dev_private
;
7303 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7304 * a modeset, being disabled, or have no active planes.
7306 * It's done in atomic commit rather than commit tail for now since
7307 * some of these interrupt handlers access the current CRTC state and
7308 * potentially the stream pointer itself.
7310 * Since the atomic state is swapped within atomic commit and not within
7311 * commit tail this would leave to new state (that hasn't been committed yet)
7312 * being accesssed from within the handlers.
7314 * TODO: Fix this so we can do this in commit tail and not have to block
7317 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7318 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7320 if (old_crtc_state
->active
&&
7321 (!new_crtc_state
->active
||
7322 drm_atomic_crtc_needs_modeset(new_crtc_state
)))
7323 manage_dm_interrupts(adev
, acrtc
, false);
7326 * Add check here for SoC's that support hardware cursor plane, to
7327 * unset legacy_cursor_update
7330 return drm_atomic_helper_commit(dev
, state
, nonblock
);
7332 /*TODO Handle EINTR, reenable IRQ*/
7336 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7337 * @state: The atomic state to commit
7339 * This will tell DC to commit the constructed DC state from atomic_check,
7340 * programming the hardware. Any failures here implies a hardware failure, since
7341 * atomic check should have filtered anything non-kosher.
7343 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state
*state
)
7345 struct drm_device
*dev
= state
->dev
;
7346 struct amdgpu_device
*adev
= dev
->dev_private
;
7347 struct amdgpu_display_manager
*dm
= &adev
->dm
;
7348 struct dm_atomic_state
*dm_state
;
7349 struct dc_state
*dc_state
= NULL
, *dc_state_temp
= NULL
;
7351 struct drm_crtc
*crtc
;
7352 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
7353 unsigned long flags
;
7354 bool wait_for_vblank
= true;
7355 struct drm_connector
*connector
;
7356 struct drm_connector_state
*old_con_state
, *new_con_state
;
7357 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7358 int crtc_disable_count
= 0;
7360 drm_atomic_helper_update_legacy_modeset_state(dev
, state
);
7362 dm_state
= dm_atomic_get_new_state(state
);
7363 if (dm_state
&& dm_state
->context
) {
7364 dc_state
= dm_state
->context
;
7366 /* No state changes, retain current state. */
7367 dc_state_temp
= dc_create_state(dm
->dc
);
7368 ASSERT(dc_state_temp
);
7369 dc_state
= dc_state_temp
;
7370 dc_resource_state_copy_construct_current(dm
->dc
, dc_state
);
7373 /* update changed items */
7374 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7375 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7377 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7378 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7381 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7382 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7383 "connectors_changed:%d\n",
7385 new_crtc_state
->enable
,
7386 new_crtc_state
->active
,
7387 new_crtc_state
->planes_changed
,
7388 new_crtc_state
->mode_changed
,
7389 new_crtc_state
->active_changed
,
7390 new_crtc_state
->connectors_changed
);
7392 /* Copy all transient state flags into dc state */
7393 if (dm_new_crtc_state
->stream
) {
7394 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state
->base
,
7395 dm_new_crtc_state
->stream
);
7398 /* handles headless hotplug case, updating new_state and
7399 * aconnector as needed
7402 if (modeset_required(new_crtc_state
, dm_new_crtc_state
->stream
, dm_old_crtc_state
->stream
)) {
7404 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc
->crtc_id
, acrtc
);
7406 if (!dm_new_crtc_state
->stream
) {
7408 * this could happen because of issues with
7409 * userspace notifications delivery.
7410 * In this case userspace tries to set mode on
7411 * display which is disconnected in fact.
7412 * dc_sink is NULL in this case on aconnector.
7413 * We expect reset mode will come soon.
7415 * This can also happen when unplug is done
7416 * during resume sequence ended
7418 * In this case, we want to pretend we still
7419 * have a sink to keep the pipe running so that
7420 * hw state is consistent with the sw state
7422 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7423 __func__
, acrtc
->base
.base
.id
);
7427 if (dm_old_crtc_state
->stream
)
7428 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
7430 pm_runtime_get_noresume(dev
->dev
);
7432 acrtc
->enabled
= true;
7433 acrtc
->hw_mode
= new_crtc_state
->mode
;
7434 crtc
->hwmode
= new_crtc_state
->mode
;
7435 } else if (modereset_required(new_crtc_state
)) {
7436 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc
->crtc_id
, acrtc
);
7437 /* i.e. reset mode */
7438 if (dm_old_crtc_state
->stream
) {
7439 if (dm_old_crtc_state
->stream
->link
->psr_settings
.psr_allow_active
)
7440 amdgpu_dm_psr_disable(dm_old_crtc_state
->stream
);
7442 remove_stream(adev
, acrtc
, dm_old_crtc_state
->stream
);
7445 } /* for_each_crtc_in_state() */
7448 dm_enable_per_frame_crtc_master_sync(dc_state
);
7449 mutex_lock(&dm
->dc_lock
);
7450 WARN_ON(!dc_commit_state(dm
->dc
, dc_state
));
7451 mutex_unlock(&dm
->dc_lock
);
7454 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7455 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7457 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7459 if (dm_new_crtc_state
->stream
!= NULL
) {
7460 const struct dc_stream_status
*status
=
7461 dc_stream_get_status(dm_new_crtc_state
->stream
);
7464 status
= dc_stream_get_status_from_state(dc_state
,
7465 dm_new_crtc_state
->stream
);
7468 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state
->stream
, acrtc
);
7470 acrtc
->otg_inst
= status
->primary_otg_inst
;
7473 #ifdef CONFIG_DRM_AMD_DC_HDCP
7474 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7475 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7476 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7477 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7479 new_crtc_state
= NULL
;
7482 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
7484 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7486 if (dm_new_crtc_state
&& dm_new_crtc_state
->stream
== NULL
&&
7487 connector
->state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_ENABLED
) {
7488 hdcp_reset_display(adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
);
7489 new_con_state
->content_protection
= DRM_MODE_CONTENT_PROTECTION_DESIRED
;
7493 if (is_content_protection_different(new_con_state
, old_con_state
, connector
, adev
->dm
.hdcp_workqueue
))
7494 hdcp_update_display(
7495 adev
->dm
.hdcp_workqueue
, aconnector
->dc_link
->link_index
, aconnector
,
7496 new_con_state
->hdcp_content_type
,
7497 new_con_state
->content_protection
== DRM_MODE_CONTENT_PROTECTION_DESIRED
? true
7502 /* Handle connector state changes */
7503 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
7504 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
7505 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
7506 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
7507 struct dc_surface_update dummy_updates
[MAX_SURFACES
];
7508 struct dc_stream_update stream_update
;
7509 struct dc_info_packet hdr_packet
;
7510 struct dc_stream_status
*status
= NULL
;
7511 bool abm_changed
, hdr_changed
, scaling_changed
;
7513 memset(&dummy_updates
, 0, sizeof(dummy_updates
));
7514 memset(&stream_update
, 0, sizeof(stream_update
));
7517 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, &acrtc
->base
);
7518 old_crtc_state
= drm_atomic_get_old_crtc_state(state
, &acrtc
->base
);
7521 /* Skip any modesets/resets */
7522 if (!acrtc
|| drm_atomic_crtc_needs_modeset(new_crtc_state
))
7525 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7526 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7528 scaling_changed
= is_scaling_state_different(dm_new_con_state
,
7531 abm_changed
= dm_new_crtc_state
->abm_level
!=
7532 dm_old_crtc_state
->abm_level
;
7535 is_hdr_metadata_different(old_con_state
, new_con_state
);
7537 if (!scaling_changed
&& !abm_changed
&& !hdr_changed
)
7540 stream_update
.stream
= dm_new_crtc_state
->stream
;
7541 if (scaling_changed
) {
7542 update_stream_scaling_settings(&dm_new_con_state
->base
.crtc
->mode
,
7543 dm_new_con_state
, dm_new_crtc_state
->stream
);
7545 stream_update
.src
= dm_new_crtc_state
->stream
->src
;
7546 stream_update
.dst
= dm_new_crtc_state
->stream
->dst
;
7550 dm_new_crtc_state
->stream
->abm_level
= dm_new_crtc_state
->abm_level
;
7552 stream_update
.abm_level
= &dm_new_crtc_state
->abm_level
;
7556 fill_hdr_info_packet(new_con_state
, &hdr_packet
);
7557 stream_update
.hdr_static_metadata
= &hdr_packet
;
7560 status
= dc_stream_get_status(dm_new_crtc_state
->stream
);
7562 WARN_ON(!status
->plane_count
);
7565 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7566 * Here we create an empty update on each plane.
7567 * To fix this, DC should permit updating only stream properties.
7569 for (j
= 0; j
< status
->plane_count
; j
++)
7570 dummy_updates
[j
].surface
= status
->plane_states
[0];
7573 mutex_lock(&dm
->dc_lock
);
7574 dc_commit_updates_for_stream(dm
->dc
,
7576 status
->plane_count
,
7577 dm_new_crtc_state
->stream
,
7580 mutex_unlock(&dm
->dc_lock
);
7583 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7584 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
,
7585 new_crtc_state
, i
) {
7586 if (old_crtc_state
->active
&& !new_crtc_state
->active
)
7587 crtc_disable_count
++;
7589 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7590 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7592 /* Update freesync active state. */
7593 pre_update_freesync_state_on_stream(dm
, dm_new_crtc_state
);
7595 /* Handle vrr on->off / off->on transitions */
7596 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state
,
7601 * Enable interrupts for CRTCs that are newly enabled or went through
7602 * a modeset. It was intentionally deferred until after the front end
7603 * state was modified to wait until the OTG was on and so the IRQ
7604 * handlers didn't access stale or invalid state.
7606 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
7607 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(crtc
);
7609 if (new_crtc_state
->active
&&
7610 (!old_crtc_state
->active
||
7611 drm_atomic_crtc_needs_modeset(new_crtc_state
))) {
7612 manage_dm_interrupts(adev
, acrtc
, true);
7613 #ifdef CONFIG_DEBUG_FS
7615 * Frontend may have changed so reapply the CRC capture
7616 * settings for the stream.
7618 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7620 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state
->crc_src
)) {
7621 amdgpu_dm_crtc_configure_crc_source(
7622 crtc
, dm_new_crtc_state
,
7623 dm_new_crtc_state
->crc_src
);
7629 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
)
7630 if (new_crtc_state
->async_flip
)
7631 wait_for_vblank
= false;
7633 /* update planes when needed per crtc*/
7634 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, j
) {
7635 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7637 if (dm_new_crtc_state
->stream
)
7638 amdgpu_dm_commit_planes(state
, dc_state
, dev
,
7639 dm
, crtc
, wait_for_vblank
);
7642 /* Update audio instances for each connector. */
7643 amdgpu_dm_commit_audio(dev
, state
);
7646 * send vblank event on all events not handled in flip and
7647 * mark consumed event for drm_atomic_helper_commit_hw_done
7649 spin_lock_irqsave(&adev
->ddev
->event_lock
, flags
);
7650 for_each_new_crtc_in_state(state
, crtc
, new_crtc_state
, i
) {
7652 if (new_crtc_state
->event
)
7653 drm_send_event_locked(dev
, &new_crtc_state
->event
->base
);
7655 new_crtc_state
->event
= NULL
;
7657 spin_unlock_irqrestore(&adev
->ddev
->event_lock
, flags
);
7659 /* Signal HW programming completion */
7660 drm_atomic_helper_commit_hw_done(state
);
7662 if (wait_for_vblank
)
7663 drm_atomic_helper_wait_for_flip_done(dev
, state
);
7665 drm_atomic_helper_cleanup_planes(dev
, state
);
7668 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7669 * so we can put the GPU into runtime suspend if we're not driving any
7672 for (i
= 0; i
< crtc_disable_count
; i
++)
7673 pm_runtime_put_autosuspend(dev
->dev
);
7674 pm_runtime_mark_last_busy(dev
->dev
);
7677 dc_release_state(dc_state_temp
);
7681 static int dm_force_atomic_commit(struct drm_connector
*connector
)
7684 struct drm_device
*ddev
= connector
->dev
;
7685 struct drm_atomic_state
*state
= drm_atomic_state_alloc(ddev
);
7686 struct amdgpu_crtc
*disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7687 struct drm_plane
*plane
= disconnected_acrtc
->base
.primary
;
7688 struct drm_connector_state
*conn_state
;
7689 struct drm_crtc_state
*crtc_state
;
7690 struct drm_plane_state
*plane_state
;
7695 state
->acquire_ctx
= ddev
->mode_config
.acquire_ctx
;
7697 /* Construct an atomic state to restore previous display setting */
7700 * Attach connectors to drm_atomic_state
7702 conn_state
= drm_atomic_get_connector_state(state
, connector
);
7704 ret
= PTR_ERR_OR_ZERO(conn_state
);
7708 /* Attach crtc to drm_atomic_state*/
7709 crtc_state
= drm_atomic_get_crtc_state(state
, &disconnected_acrtc
->base
);
7711 ret
= PTR_ERR_OR_ZERO(crtc_state
);
7715 /* force a restore */
7716 crtc_state
->mode_changed
= true;
7718 /* Attach plane to drm_atomic_state */
7719 plane_state
= drm_atomic_get_plane_state(state
, plane
);
7721 ret
= PTR_ERR_OR_ZERO(plane_state
);
7726 /* Call commit internally with the state we just constructed */
7727 ret
= drm_atomic_commit(state
);
7732 DRM_ERROR("Restoring old state failed with %i\n", ret
);
7733 drm_atomic_state_put(state
);
7739 * This function handles all cases when set mode does not come upon hotplug.
7740 * This includes when a display is unplugged then plugged back into the
7741 * same port and when running without usermode desktop manager supprot
7743 void dm_restore_drm_connector_state(struct drm_device
*dev
,
7744 struct drm_connector
*connector
)
7746 struct amdgpu_dm_connector
*aconnector
= to_amdgpu_dm_connector(connector
);
7747 struct amdgpu_crtc
*disconnected_acrtc
;
7748 struct dm_crtc_state
*acrtc_state
;
7750 if (!aconnector
->dc_sink
|| !connector
->state
|| !connector
->encoder
)
7753 disconnected_acrtc
= to_amdgpu_crtc(connector
->encoder
->crtc
);
7754 if (!disconnected_acrtc
)
7757 acrtc_state
= to_dm_crtc_state(disconnected_acrtc
->base
.state
);
7758 if (!acrtc_state
->stream
)
7762 * If the previous sink is not released and different from the current,
7763 * we deduce we are in a state where we can not rely on usermode call
7764 * to turn on the display, so we do it here
7766 if (acrtc_state
->stream
->sink
!= aconnector
->dc_sink
)
7767 dm_force_atomic_commit(&aconnector
->base
);
7771 * Grabs all modesetting locks to serialize against any blocking commits,
7772 * Waits for completion of all non blocking commits.
7774 static int do_aquire_global_lock(struct drm_device
*dev
,
7775 struct drm_atomic_state
*state
)
7777 struct drm_crtc
*crtc
;
7778 struct drm_crtc_commit
*commit
;
7782 * Adding all modeset locks to aquire_ctx will
7783 * ensure that when the framework release it the
7784 * extra locks we are locking here will get released to
7786 ret
= drm_modeset_lock_all_ctx(dev
, state
->acquire_ctx
);
7790 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
7791 spin_lock(&crtc
->commit_lock
);
7792 commit
= list_first_entry_or_null(&crtc
->commit_list
,
7793 struct drm_crtc_commit
, commit_entry
);
7795 drm_crtc_commit_get(commit
);
7796 spin_unlock(&crtc
->commit_lock
);
7802 * Make sure all pending HW programming completed and
7805 ret
= wait_for_completion_interruptible_timeout(&commit
->hw_done
, 10*HZ
);
7808 ret
= wait_for_completion_interruptible_timeout(
7809 &commit
->flip_done
, 10*HZ
);
7812 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7813 "timed out\n", crtc
->base
.id
, crtc
->name
);
7815 drm_crtc_commit_put(commit
);
7818 return ret
< 0 ? ret
: 0;
7821 static void get_freesync_config_for_crtc(
7822 struct dm_crtc_state
*new_crtc_state
,
7823 struct dm_connector_state
*new_con_state
)
7825 struct mod_freesync_config config
= {0};
7826 struct amdgpu_dm_connector
*aconnector
=
7827 to_amdgpu_dm_connector(new_con_state
->base
.connector
);
7828 struct drm_display_mode
*mode
= &new_crtc_state
->base
.mode
;
7829 int vrefresh
= drm_mode_vrefresh(mode
);
7831 new_crtc_state
->vrr_supported
= new_con_state
->freesync_capable
&&
7832 vrefresh
>= aconnector
->min_vfreq
&&
7833 vrefresh
<= aconnector
->max_vfreq
;
7835 if (new_crtc_state
->vrr_supported
) {
7836 new_crtc_state
->stream
->ignore_msa_timing_param
= true;
7837 config
.state
= new_crtc_state
->base
.vrr_enabled
?
7838 VRR_STATE_ACTIVE_VARIABLE
:
7840 config
.min_refresh_in_uhz
=
7841 aconnector
->min_vfreq
* 1000000;
7842 config
.max_refresh_in_uhz
=
7843 aconnector
->max_vfreq
* 1000000;
7844 config
.vsif_supported
= true;
7848 new_crtc_state
->freesync_config
= config
;
7851 static void reset_freesync_config_for_crtc(
7852 struct dm_crtc_state
*new_crtc_state
)
7854 new_crtc_state
->vrr_supported
= false;
7856 memset(&new_crtc_state
->vrr_params
, 0,
7857 sizeof(new_crtc_state
->vrr_params
));
7858 memset(&new_crtc_state
->vrr_infopacket
, 0,
7859 sizeof(new_crtc_state
->vrr_infopacket
));
7862 static int dm_update_crtc_state(struct amdgpu_display_manager
*dm
,
7863 struct drm_atomic_state
*state
,
7864 struct drm_crtc
*crtc
,
7865 struct drm_crtc_state
*old_crtc_state
,
7866 struct drm_crtc_state
*new_crtc_state
,
7868 bool *lock_and_validation_needed
)
7870 struct dm_atomic_state
*dm_state
= NULL
;
7871 struct dm_crtc_state
*dm_old_crtc_state
, *dm_new_crtc_state
;
7872 struct dc_stream_state
*new_stream
;
7876 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7877 * update changed items
7879 struct amdgpu_crtc
*acrtc
= NULL
;
7880 struct amdgpu_dm_connector
*aconnector
= NULL
;
7881 struct drm_connector_state
*drm_new_conn_state
= NULL
, *drm_old_conn_state
= NULL
;
7882 struct dm_connector_state
*dm_new_conn_state
= NULL
, *dm_old_conn_state
= NULL
;
7886 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
7887 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
7888 acrtc
= to_amdgpu_crtc(crtc
);
7889 aconnector
= amdgpu_dm_find_first_crtc_matching_connector(state
, crtc
);
7891 /* TODO This hack should go away */
7892 if (aconnector
&& enable
) {
7893 /* Make sure fake sink is created in plug-in scenario */
7894 drm_new_conn_state
= drm_atomic_get_new_connector_state(state
,
7896 drm_old_conn_state
= drm_atomic_get_old_connector_state(state
,
7899 if (IS_ERR(drm_new_conn_state
)) {
7900 ret
= PTR_ERR_OR_ZERO(drm_new_conn_state
);
7904 dm_new_conn_state
= to_dm_connector_state(drm_new_conn_state
);
7905 dm_old_conn_state
= to_dm_connector_state(drm_old_conn_state
);
7907 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7910 new_stream
= create_validate_stream_for_sink(aconnector
,
7911 &new_crtc_state
->mode
,
7913 dm_old_crtc_state
->stream
);
7916 * we can have no stream on ACTION_SET if a display
7917 * was disconnected during S3, in this case it is not an
7918 * error, the OS will be updated after detection, and
7919 * will do the right thing on next atomic commit
7923 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7924 __func__
, acrtc
->base
.base
.id
);
7929 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
7931 ret
= fill_hdr_info_packet(drm_new_conn_state
,
7932 &new_stream
->hdr_static_metadata
);
7937 * If we already removed the old stream from the context
7938 * (and set the new stream to NULL) then we can't reuse
7939 * the old stream even if the stream and scaling are unchanged.
7940 * We'll hit the BUG_ON and black screen.
7942 * TODO: Refactor this function to allow this check to work
7943 * in all conditions.
7945 if (dm_new_crtc_state
->stream
&&
7946 dc_is_stream_unchanged(new_stream
, dm_old_crtc_state
->stream
) &&
7947 dc_is_stream_scaling_unchanged(new_stream
, dm_old_crtc_state
->stream
)) {
7948 new_crtc_state
->mode_changed
= false;
7949 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7950 new_crtc_state
->mode_changed
);
7954 /* mode_changed flag may get updated above, need to check again */
7955 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
))
7959 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7960 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7961 "connectors_changed:%d\n",
7963 new_crtc_state
->enable
,
7964 new_crtc_state
->active
,
7965 new_crtc_state
->planes_changed
,
7966 new_crtc_state
->mode_changed
,
7967 new_crtc_state
->active_changed
,
7968 new_crtc_state
->connectors_changed
);
7970 /* Remove stream for any changed/disabled CRTC */
7973 if (!dm_old_crtc_state
->stream
)
7976 ret
= dm_atomic_get_state(state
, &dm_state
);
7980 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7983 /* i.e. reset mode */
7984 if (dc_remove_stream_from_ctx(
7987 dm_old_crtc_state
->stream
) != DC_OK
) {
7992 dc_stream_release(dm_old_crtc_state
->stream
);
7993 dm_new_crtc_state
->stream
= NULL
;
7995 reset_freesync_config_for_crtc(dm_new_crtc_state
);
7997 *lock_and_validation_needed
= true;
7999 } else {/* Add stream for any updated/enabled CRTC */
8001 * Quick fix to prevent NULL pointer on new_stream when
8002 * added MST connectors not found in existing crtc_state in the chained mode
8003 * TODO: need to dig out the root cause of that
8005 if (!aconnector
|| (!aconnector
->dc_sink
&& aconnector
->mst_port
))
8008 if (modereset_required(new_crtc_state
))
8011 if (modeset_required(new_crtc_state
, new_stream
,
8012 dm_old_crtc_state
->stream
)) {
8014 WARN_ON(dm_new_crtc_state
->stream
);
8016 ret
= dm_atomic_get_state(state
, &dm_state
);
8020 dm_new_crtc_state
->stream
= new_stream
;
8022 dc_stream_retain(new_stream
);
8024 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8027 if (dc_add_stream_to_ctx(
8030 dm_new_crtc_state
->stream
) != DC_OK
) {
8035 *lock_and_validation_needed
= true;
8040 /* Release extra reference */
8042 dc_stream_release(new_stream
);
8045 * We want to do dc stream updates that do not require a
8046 * full modeset below.
8048 if (!(enable
&& aconnector
&& new_crtc_state
->enable
&&
8049 new_crtc_state
->active
))
8052 * Given above conditions, the dc state cannot be NULL because:
8053 * 1. We're in the process of enabling CRTCs (just been added
8054 * to the dc context, or already is on the context)
8055 * 2. Has a valid connector attached, and
8056 * 3. Is currently active and enabled.
8057 * => The dc stream state currently exists.
8059 BUG_ON(dm_new_crtc_state
->stream
== NULL
);
8061 /* Scaling or underscan settings */
8062 if (is_scaling_state_different(dm_old_conn_state
, dm_new_conn_state
))
8063 update_stream_scaling_settings(
8064 &new_crtc_state
->mode
, dm_new_conn_state
, dm_new_crtc_state
->stream
);
8067 dm_new_crtc_state
->abm_level
= dm_new_conn_state
->abm_level
;
8070 * Color management settings. We also update color properties
8071 * when a modeset is needed, to ensure it gets reprogrammed.
8073 if (dm_new_crtc_state
->base
.color_mgmt_changed
||
8074 drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
8075 ret
= amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state
);
8080 /* Update Freesync settings. */
8081 get_freesync_config_for_crtc(dm_new_crtc_state
,
8088 dc_stream_release(new_stream
);
8092 static bool should_reset_plane(struct drm_atomic_state
*state
,
8093 struct drm_plane
*plane
,
8094 struct drm_plane_state
*old_plane_state
,
8095 struct drm_plane_state
*new_plane_state
)
8097 struct drm_plane
*other
;
8098 struct drm_plane_state
*old_other_state
, *new_other_state
;
8099 struct drm_crtc_state
*new_crtc_state
;
8103 * TODO: Remove this hack once the checks below are sufficient
8104 * enough to determine when we need to reset all the planes on
8107 if (state
->allow_modeset
)
8110 /* Exit early if we know that we're adding or removing the plane. */
8111 if (old_plane_state
->crtc
!= new_plane_state
->crtc
)
8114 /* old crtc == new_crtc == NULL, plane not in context. */
8115 if (!new_plane_state
->crtc
)
8119 drm_atomic_get_new_crtc_state(state
, new_plane_state
->crtc
);
8121 if (!new_crtc_state
)
8124 /* CRTC Degamma changes currently require us to recreate planes. */
8125 if (new_crtc_state
->color_mgmt_changed
)
8128 if (drm_atomic_crtc_needs_modeset(new_crtc_state
))
8132 * If there are any new primary or overlay planes being added or
8133 * removed then the z-order can potentially change. To ensure
8134 * correct z-order and pipe acquisition the current DC architecture
8135 * requires us to remove and recreate all existing planes.
8137 * TODO: Come up with a more elegant solution for this.
8139 for_each_oldnew_plane_in_state(state
, other
, old_other_state
, new_other_state
, i
) {
8140 if (other
->type
== DRM_PLANE_TYPE_CURSOR
)
8143 if (old_other_state
->crtc
!= new_plane_state
->crtc
&&
8144 new_other_state
->crtc
!= new_plane_state
->crtc
)
8147 if (old_other_state
->crtc
!= new_other_state
->crtc
)
8150 /* TODO: Remove this once we can handle fast format changes. */
8151 if (old_other_state
->fb
&& new_other_state
->fb
&&
8152 old_other_state
->fb
->format
!= new_other_state
->fb
->format
)
8159 static int dm_update_plane_state(struct dc
*dc
,
8160 struct drm_atomic_state
*state
,
8161 struct drm_plane
*plane
,
8162 struct drm_plane_state
*old_plane_state
,
8163 struct drm_plane_state
*new_plane_state
,
8165 bool *lock_and_validation_needed
)
8168 struct dm_atomic_state
*dm_state
= NULL
;
8169 struct drm_crtc
*new_plane_crtc
, *old_plane_crtc
;
8170 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
8171 struct dm_crtc_state
*dm_new_crtc_state
, *dm_old_crtc_state
;
8172 struct dm_plane_state
*dm_new_plane_state
, *dm_old_plane_state
;
8173 struct amdgpu_crtc
*new_acrtc
;
8178 new_plane_crtc
= new_plane_state
->crtc
;
8179 old_plane_crtc
= old_plane_state
->crtc
;
8180 dm_new_plane_state
= to_dm_plane_state(new_plane_state
);
8181 dm_old_plane_state
= to_dm_plane_state(old_plane_state
);
8183 /*TODO Implement better atomic check for cursor plane */
8184 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
8185 if (!enable
|| !new_plane_crtc
||
8186 drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
8189 new_acrtc
= to_amdgpu_crtc(new_plane_crtc
);
8191 if ((new_plane_state
->crtc_w
> new_acrtc
->max_cursor_width
) ||
8192 (new_plane_state
->crtc_h
> new_acrtc
->max_cursor_height
)) {
8193 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8194 new_plane_state
->crtc_w
, new_plane_state
->crtc_h
);
8201 needs_reset
= should_reset_plane(state
, plane
, old_plane_state
,
8204 /* Remove any changed/removed planes */
8209 if (!old_plane_crtc
)
8212 old_crtc_state
= drm_atomic_get_old_crtc_state(
8213 state
, old_plane_crtc
);
8214 dm_old_crtc_state
= to_dm_crtc_state(old_crtc_state
);
8216 if (!dm_old_crtc_state
->stream
)
8219 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8220 plane
->base
.id
, old_plane_crtc
->base
.id
);
8222 ret
= dm_atomic_get_state(state
, &dm_state
);
8226 if (!dc_remove_plane_from_context(
8228 dm_old_crtc_state
->stream
,
8229 dm_old_plane_state
->dc_state
,
8230 dm_state
->context
)) {
8237 dc_plane_state_release(dm_old_plane_state
->dc_state
);
8238 dm_new_plane_state
->dc_state
= NULL
;
8240 *lock_and_validation_needed
= true;
8242 } else { /* Add new planes */
8243 struct dc_plane_state
*dc_new_plane_state
;
8245 if (drm_atomic_plane_disabling(plane
->state
, new_plane_state
))
8248 if (!new_plane_crtc
)
8251 new_crtc_state
= drm_atomic_get_new_crtc_state(state
, new_plane_crtc
);
8252 dm_new_crtc_state
= to_dm_crtc_state(new_crtc_state
);
8254 if (!dm_new_crtc_state
->stream
)
8260 ret
= dm_plane_helper_check_state(new_plane_state
, new_crtc_state
);
8264 WARN_ON(dm_new_plane_state
->dc_state
);
8266 dc_new_plane_state
= dc_create_plane_state(dc
);
8267 if (!dc_new_plane_state
)
8270 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8271 plane
->base
.id
, new_plane_crtc
->base
.id
);
8273 ret
= fill_dc_plane_attributes(
8274 new_plane_crtc
->dev
->dev_private
,
8279 dc_plane_state_release(dc_new_plane_state
);
8283 ret
= dm_atomic_get_state(state
, &dm_state
);
8285 dc_plane_state_release(dc_new_plane_state
);
8290 * Any atomic check errors that occur after this will
8291 * not need a release. The plane state will be attached
8292 * to the stream, and therefore part of the atomic
8293 * state. It'll be released when the atomic state is
8296 if (!dc_add_plane_to_context(
8298 dm_new_crtc_state
->stream
,
8300 dm_state
->context
)) {
8302 dc_plane_state_release(dc_new_plane_state
);
8306 dm_new_plane_state
->dc_state
= dc_new_plane_state
;
8308 /* Tell DC to do a full surface update every time there
8309 * is a plane change. Inefficient, but works for now.
8311 dm_new_plane_state
->dc_state
->update_flags
.bits
.full_update
= 1;
8313 *lock_and_validation_needed
= true;
8321 dm_determine_update_type_for_commit(struct amdgpu_display_manager
*dm
,
8322 struct drm_atomic_state
*state
,
8323 enum surface_update_type
*out_type
)
8325 struct dc
*dc
= dm
->dc
;
8326 struct dm_atomic_state
*dm_state
= NULL
, *old_dm_state
= NULL
;
8327 int i
, j
, num_plane
, ret
= 0;
8328 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
8329 struct dm_plane_state
*new_dm_plane_state
, *old_dm_plane_state
;
8330 struct drm_crtc
*new_plane_crtc
;
8331 struct drm_plane
*plane
;
8333 struct drm_crtc
*crtc
;
8334 struct drm_crtc_state
*new_crtc_state
, *old_crtc_state
;
8335 struct dm_crtc_state
*new_dm_crtc_state
, *old_dm_crtc_state
;
8336 struct dc_stream_status
*status
= NULL
;
8337 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
8338 struct surface_info_bundle
{
8339 struct dc_surface_update surface_updates
[MAX_SURFACES
];
8340 struct dc_plane_info plane_infos
[MAX_SURFACES
];
8341 struct dc_scaling_info scaling_infos
[MAX_SURFACES
];
8342 struct dc_flip_addrs flip_addrs
[MAX_SURFACES
];
8343 struct dc_stream_update stream_update
;
8346 bundle
= kzalloc(sizeof(*bundle
), GFP_KERNEL
);
8349 DRM_ERROR("Failed to allocate update bundle\n");
8350 /* Set type to FULL to avoid crashing in DC*/
8351 update_type
= UPDATE_TYPE_FULL
;
8355 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8357 memset(bundle
, 0, sizeof(struct surface_info_bundle
));
8359 new_dm_crtc_state
= to_dm_crtc_state(new_crtc_state
);
8360 old_dm_crtc_state
= to_dm_crtc_state(old_crtc_state
);
8363 if (new_dm_crtc_state
->stream
!= old_dm_crtc_state
->stream
) {
8364 update_type
= UPDATE_TYPE_FULL
;
8368 if (!new_dm_crtc_state
->stream
)
8371 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, j
) {
8372 const struct amdgpu_framebuffer
*amdgpu_fb
=
8373 to_amdgpu_framebuffer(new_plane_state
->fb
);
8374 struct dc_plane_info
*plane_info
= &bundle
->plane_infos
[num_plane
];
8375 struct dc_flip_addrs
*flip_addr
= &bundle
->flip_addrs
[num_plane
];
8376 struct dc_scaling_info
*scaling_info
= &bundle
->scaling_infos
[num_plane
];
8377 uint64_t tiling_flags
;
8378 bool tmz_surface
= false;
8380 new_plane_crtc
= new_plane_state
->crtc
;
8381 new_dm_plane_state
= to_dm_plane_state(new_plane_state
);
8382 old_dm_plane_state
= to_dm_plane_state(old_plane_state
);
8384 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8387 if (new_dm_plane_state
->dc_state
!= old_dm_plane_state
->dc_state
) {
8388 update_type
= UPDATE_TYPE_FULL
;
8392 if (crtc
!= new_plane_crtc
)
8395 bundle
->surface_updates
[num_plane
].surface
=
8396 new_dm_plane_state
->dc_state
;
8398 if (new_crtc_state
->mode_changed
) {
8399 bundle
->stream_update
.dst
= new_dm_crtc_state
->stream
->dst
;
8400 bundle
->stream_update
.src
= new_dm_crtc_state
->stream
->src
;
8403 if (new_crtc_state
->color_mgmt_changed
) {
8404 bundle
->surface_updates
[num_plane
].gamma
=
8405 new_dm_plane_state
->dc_state
->gamma_correction
;
8406 bundle
->surface_updates
[num_plane
].in_transfer_func
=
8407 new_dm_plane_state
->dc_state
->in_transfer_func
;
8408 bundle
->surface_updates
[num_plane
].gamut_remap_matrix
=
8409 &new_dm_plane_state
->dc_state
->gamut_remap_matrix
;
8410 bundle
->stream_update
.gamut_remap
=
8411 &new_dm_crtc_state
->stream
->gamut_remap_matrix
;
8412 bundle
->stream_update
.output_csc_transform
=
8413 &new_dm_crtc_state
->stream
->csc_color_matrix
;
8414 bundle
->stream_update
.out_transfer_func
=
8415 new_dm_crtc_state
->stream
->out_transfer_func
;
8418 ret
= fill_dc_scaling_info(new_plane_state
,
8423 bundle
->surface_updates
[num_plane
].scaling_info
= scaling_info
;
8426 ret
= get_fb_info(amdgpu_fb
, &tiling_flags
, &tmz_surface
);
8430 ret
= fill_dc_plane_info_and_addr(
8431 dm
->adev
, new_plane_state
, tiling_flags
,
8433 &flip_addr
->address
, tmz_surface
,
8438 bundle
->surface_updates
[num_plane
].plane_info
= plane_info
;
8439 bundle
->surface_updates
[num_plane
].flip_addr
= flip_addr
;
8448 ret
= dm_atomic_get_state(state
, &dm_state
);
8452 old_dm_state
= dm_atomic_get_old_state(state
);
8453 if (!old_dm_state
) {
8458 status
= dc_stream_get_status_from_state(old_dm_state
->context
,
8459 new_dm_crtc_state
->stream
);
8460 bundle
->stream_update
.stream
= new_dm_crtc_state
->stream
;
8462 * TODO: DC modifies the surface during this call so we need
8463 * to lock here - find a way to do this without locking.
8465 mutex_lock(&dm
->dc_lock
);
8466 update_type
= dc_check_update_surfaces_for_stream(
8467 dc
, bundle
->surface_updates
, num_plane
,
8468 &bundle
->stream_update
, status
);
8469 mutex_unlock(&dm
->dc_lock
);
8471 if (update_type
> UPDATE_TYPE_MED
) {
8472 update_type
= UPDATE_TYPE_FULL
;
8480 *out_type
= update_type
;
8483 #if defined(CONFIG_DRM_AMD_DC_DCN)
8484 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state
*state
, struct drm_crtc
*crtc
)
8486 struct drm_connector
*connector
;
8487 struct drm_connector_state
*conn_state
;
8488 struct amdgpu_dm_connector
*aconnector
= NULL
;
8490 for_each_new_connector_in_state(state
, connector
, conn_state
, i
) {
8491 if (conn_state
->crtc
!= crtc
)
8494 aconnector
= to_amdgpu_dm_connector(connector
);
8495 if (!aconnector
->port
|| !aconnector
->mst_port
)
8504 return drm_dp_mst_add_affected_dsc_crtcs(state
, &aconnector
->mst_port
->mst_mgr
);
8509 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8510 * @dev: The DRM device
8511 * @state: The atomic state to commit
8513 * Validate that the given atomic state is programmable by DC into hardware.
8514 * This involves constructing a &struct dc_state reflecting the new hardware
8515 * state we wish to commit, then querying DC to see if it is programmable. It's
8516 * important not to modify the existing DC state. Otherwise, atomic_check
8517 * may unexpectedly commit hardware changes.
8519 * When validating the DC state, it's important that the right locks are
8520 * acquired. For full updates case which removes/adds/updates streams on one
8521 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8522 * that any such full update commit will wait for completion of any outstanding
8523 * flip using DRMs synchronization events. See
8524 * dm_determine_update_type_for_commit()
8526 * Note that DM adds the affected connectors for all CRTCs in state, when that
8527 * might not seem necessary. This is because DC stream creation requires the
8528 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8529 * be possible but non-trivial - a possible TODO item.
8531 * Return: -Error code if validation failed.
8533 static int amdgpu_dm_atomic_check(struct drm_device
*dev
,
8534 struct drm_atomic_state
*state
)
8536 struct amdgpu_device
*adev
= dev
->dev_private
;
8537 struct dm_atomic_state
*dm_state
= NULL
;
8538 struct dc
*dc
= adev
->dm
.dc
;
8539 struct drm_connector
*connector
;
8540 struct drm_connector_state
*old_con_state
, *new_con_state
;
8541 struct drm_crtc
*crtc
;
8542 struct drm_crtc_state
*old_crtc_state
, *new_crtc_state
;
8543 struct drm_plane
*plane
;
8544 struct drm_plane_state
*old_plane_state
, *new_plane_state
;
8545 enum surface_update_type update_type
= UPDATE_TYPE_FAST
;
8546 enum surface_update_type overall_update_type
= UPDATE_TYPE_FAST
;
8547 enum dc_status status
;
8551 * This bool will be set for true for any modeset/reset
8552 * or plane update which implies non fast surface update.
8554 bool lock_and_validation_needed
= false;
8556 ret
= drm_atomic_helper_check_modeset(dev
, state
);
8560 /* Check connector changes */
8561 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
8562 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
8563 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
8565 /* Skip connectors that are disabled or part of modeset already. */
8566 if (!old_con_state
->crtc
&& !new_con_state
->crtc
)
8569 if (!new_con_state
->crtc
)
8572 new_crtc_state
= drm_atomic_get_crtc_state(state
, new_con_state
->crtc
);
8573 if (IS_ERR(new_crtc_state
)) {
8574 ret
= PTR_ERR(new_crtc_state
);
8578 if (dm_old_con_state
->abm_level
!=
8579 dm_new_con_state
->abm_level
)
8580 new_crtc_state
->connectors_changed
= true;
8583 #if defined(CONFIG_DRM_AMD_DC_DCN)
8584 if (adev
->asic_type
>= CHIP_NAVI10
) {
8585 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8586 if (drm_atomic_crtc_needs_modeset(new_crtc_state
)) {
8587 ret
= add_affected_mst_dsc_crtcs(state
, crtc
);
8594 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8595 if (!drm_atomic_crtc_needs_modeset(new_crtc_state
) &&
8596 !new_crtc_state
->color_mgmt_changed
&&
8597 old_crtc_state
->vrr_enabled
== new_crtc_state
->vrr_enabled
)
8600 if (!new_crtc_state
->enable
)
8603 ret
= drm_atomic_add_affected_connectors(state
, crtc
);
8607 ret
= drm_atomic_add_affected_planes(state
, crtc
);
8613 * Add all primary and overlay planes on the CRTC to the state
8614 * whenever a plane is enabled to maintain correct z-ordering
8615 * and to enable fast surface updates.
8617 drm_for_each_crtc(crtc
, dev
) {
8618 bool modified
= false;
8620 for_each_oldnew_plane_in_state(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8621 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8624 if (new_plane_state
->crtc
== crtc
||
8625 old_plane_state
->crtc
== crtc
) {
8634 drm_for_each_plane_mask(plane
, state
->dev
, crtc
->state
->plane_mask
) {
8635 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
8639 drm_atomic_get_plane_state(state
, plane
);
8641 if (IS_ERR(new_plane_state
)) {
8642 ret
= PTR_ERR(new_plane_state
);
8648 /* Remove exiting planes if they are modified */
8649 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8650 ret
= dm_update_plane_state(dc
, state
, plane
,
8654 &lock_and_validation_needed
);
8659 /* Disable all crtcs which require disable */
8660 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8661 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8665 &lock_and_validation_needed
);
8670 /* Enable all crtcs which require enable */
8671 for_each_oldnew_crtc_in_state(state
, crtc
, old_crtc_state
, new_crtc_state
, i
) {
8672 ret
= dm_update_crtc_state(&adev
->dm
, state
, crtc
,
8676 &lock_and_validation_needed
);
8681 /* Add new/modified planes */
8682 for_each_oldnew_plane_in_state_reverse(state
, plane
, old_plane_state
, new_plane_state
, i
) {
8683 ret
= dm_update_plane_state(dc
, state
, plane
,
8687 &lock_and_validation_needed
);
8692 /* Run this here since we want to validate the streams we created */
8693 ret
= drm_atomic_helper_check_planes(dev
, state
);
8697 if (state
->legacy_cursor_update
) {
8699 * This is a fast cursor update coming from the plane update
8700 * helper, check if it can be done asynchronously for better
8703 state
->async_update
=
8704 !drm_atomic_helper_async_check(dev
, state
);
8707 * Skip the remaining global validation if this is an async
8708 * update. Cursor updates can be done without affecting
8709 * state or bandwidth calcs and this avoids the performance
8710 * penalty of locking the private state object and
8711 * allocating a new dc_state.
8713 if (state
->async_update
)
8717 /* Check scaling and underscan changes*/
8718 /* TODO Removed scaling changes validation due to inability to commit
8719 * new stream into context w\o causing full reset. Need to
8720 * decide how to handle.
8722 for_each_oldnew_connector_in_state(state
, connector
, old_con_state
, new_con_state
, i
) {
8723 struct dm_connector_state
*dm_old_con_state
= to_dm_connector_state(old_con_state
);
8724 struct dm_connector_state
*dm_new_con_state
= to_dm_connector_state(new_con_state
);
8725 struct amdgpu_crtc
*acrtc
= to_amdgpu_crtc(dm_new_con_state
->base
.crtc
);
8727 /* Skip any modesets/resets */
8728 if (!acrtc
|| drm_atomic_crtc_needs_modeset(
8729 drm_atomic_get_new_crtc_state(state
, &acrtc
->base
)))
8732 /* Skip any thing not scale or underscan changes */
8733 if (!is_scaling_state_different(dm_new_con_state
, dm_old_con_state
))
8736 overall_update_type
= UPDATE_TYPE_FULL
;
8737 lock_and_validation_needed
= true;
8740 ret
= dm_determine_update_type_for_commit(&adev
->dm
, state
, &update_type
);
8744 if (overall_update_type
< update_type
)
8745 overall_update_type
= update_type
;
8748 * lock_and_validation_needed was an old way to determine if we need to set
8749 * the global lock. Leaving it in to check if we broke any corner cases
8750 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8751 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8753 if (lock_and_validation_needed
&& overall_update_type
<= UPDATE_TYPE_FAST
)
8754 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8756 if (overall_update_type
> UPDATE_TYPE_FAST
) {
8757 ret
= dm_atomic_get_state(state
, &dm_state
);
8761 ret
= do_aquire_global_lock(dev
, state
);
8765 #if defined(CONFIG_DRM_AMD_DC_DCN)
8766 if (!compute_mst_dsc_configs_for_state(state
, dm_state
->context
))
8769 ret
= dm_update_mst_vcpi_slots_for_dsc(state
, dm_state
->context
);
8775 * Perform validation of MST topology in the state:
8776 * We need to perform MST atomic check before calling
8777 * dc_validate_global_state(), or there is a chance
8778 * to get stuck in an infinite loop and hang eventually.
8780 ret
= drm_dp_mst_atomic_check(state
);
8783 status
= dc_validate_global_state(dc
, dm_state
->context
, false);
8784 if (status
!= DC_OK
) {
8785 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8786 dc_status_to_str(status
), status
);
8792 * The commit is a fast update. Fast updates shouldn't change
8793 * the DC context, affect global validation, and can have their
8794 * commit work done in parallel with other commits not touching
8795 * the same resource. If we have a new DC context as part of
8796 * the DM atomic state from validation we need to free it and
8797 * retain the existing one instead.
8799 * Furthermore, since the DM atomic state only contains the DC
8800 * context and can safely be annulled, we can free the state
8801 * and clear the associated private object now to free
8802 * some memory and avoid a possible use-after-free later.
8805 for (i
= 0; i
< state
->num_private_objs
; i
++) {
8806 struct drm_private_obj
*obj
= state
->private_objs
[i
].ptr
;
8808 if (obj
->funcs
== adev
->dm
.atomic_obj
.funcs
) {
8809 int j
= state
->num_private_objs
-1;
8811 dm_atomic_destroy_state(obj
,
8812 state
->private_objs
[i
].state
);
8814 /* If i is not at the end of the array then the
8815 * last element needs to be moved to where i was
8816 * before the array can safely be truncated.
8819 state
->private_objs
[i
] =
8820 state
->private_objs
[j
];
8822 state
->private_objs
[j
].ptr
= NULL
;
8823 state
->private_objs
[j
].state
= NULL
;
8824 state
->private_objs
[j
].old_state
= NULL
;
8825 state
->private_objs
[j
].new_state
= NULL
;
8827 state
->num_private_objs
= j
;
8833 /* Store the overall update type for use later in atomic check. */
8834 for_each_new_crtc_in_state (state
, crtc
, new_crtc_state
, i
) {
8835 struct dm_crtc_state
*dm_new_crtc_state
=
8836 to_dm_crtc_state(new_crtc_state
);
8838 dm_new_crtc_state
->update_type
= (int)overall_update_type
;
8841 /* Must be success */
8846 if (ret
== -EDEADLK
)
8847 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8848 else if (ret
== -EINTR
|| ret
== -EAGAIN
|| ret
== -ERESTARTSYS
)
8849 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8851 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret
);
8856 static bool is_dp_capable_without_timing_msa(struct dc
*dc
,
8857 struct amdgpu_dm_connector
*amdgpu_dm_connector
)
8860 bool capable
= false;
8862 if (amdgpu_dm_connector
->dc_link
&&
8863 dm_helpers_dp_read_dpcd(
8865 amdgpu_dm_connector
->dc_link
,
8866 DP_DOWN_STREAM_PORT_COUNT
,
8868 sizeof(dpcd_data
))) {
8869 capable
= (dpcd_data
& DP_MSA_TIMING_PAR_IGNORED
) ? true:false;
8874 void amdgpu_dm_update_freesync_caps(struct drm_connector
*connector
,
8878 bool edid_check_required
;
8879 struct detailed_timing
*timing
;
8880 struct detailed_non_pixel
*data
;
8881 struct detailed_data_monitor_range
*range
;
8882 struct amdgpu_dm_connector
*amdgpu_dm_connector
=
8883 to_amdgpu_dm_connector(connector
);
8884 struct dm_connector_state
*dm_con_state
= NULL
;
8886 struct drm_device
*dev
= connector
->dev
;
8887 struct amdgpu_device
*adev
= dev
->dev_private
;
8888 bool freesync_capable
= false;
8890 if (!connector
->state
) {
8891 DRM_ERROR("%s - Connector has no state", __func__
);
8896 dm_con_state
= to_dm_connector_state(connector
->state
);
8898 amdgpu_dm_connector
->min_vfreq
= 0;
8899 amdgpu_dm_connector
->max_vfreq
= 0;
8900 amdgpu_dm_connector
->pixel_clock_mhz
= 0;
8905 dm_con_state
= to_dm_connector_state(connector
->state
);
8907 edid_check_required
= false;
8908 if (!amdgpu_dm_connector
->dc_sink
) {
8909 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8912 if (!adev
->dm
.freesync_module
)
8915 * if edid non zero restrict freesync only for dp and edp
8918 if (amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_DISPLAY_PORT
8919 || amdgpu_dm_connector
->dc_sink
->sink_signal
== SIGNAL_TYPE_EDP
) {
8920 edid_check_required
= is_dp_capable_without_timing_msa(
8922 amdgpu_dm_connector
);
8925 if (edid_check_required
== true && (edid
->version
> 1 ||
8926 (edid
->version
== 1 && edid
->revision
> 1))) {
8927 for (i
= 0; i
< 4; i
++) {
8929 timing
= &edid
->detailed_timings
[i
];
8930 data
= &timing
->data
.other_data
;
8931 range
= &data
->data
.range
;
8933 * Check if monitor has continuous frequency mode
8935 if (data
->type
!= EDID_DETAIL_MONITOR_RANGE
)
8938 * Check for flag range limits only. If flag == 1 then
8939 * no additional timing information provided.
8940 * Default GTF, GTF Secondary curve and CVT are not
8943 if (range
->flags
!= 1)
8946 amdgpu_dm_connector
->min_vfreq
= range
->min_vfreq
;
8947 amdgpu_dm_connector
->max_vfreq
= range
->max_vfreq
;
8948 amdgpu_dm_connector
->pixel_clock_mhz
=
8949 range
->pixel_clock_mhz
* 10;
8953 if (amdgpu_dm_connector
->max_vfreq
-
8954 amdgpu_dm_connector
->min_vfreq
> 10) {
8956 freesync_capable
= true;
8962 dm_con_state
->freesync_capable
= freesync_capable
;
8964 if (connector
->vrr_capable_property
)
8965 drm_connector_set_vrr_capable_property(connector
,
8969 static void amdgpu_dm_set_psr_caps(struct dc_link
*link
)
8971 uint8_t dpcd_data
[EDP_PSR_RECEIVER_CAP_SIZE
];
8973 if (!(link
->connector_signal
& SIGNAL_TYPE_EDP
))
8975 if (link
->type
== dc_connection_none
)
8977 if (dm_helpers_dp_read_dpcd(NULL
, link
, DP_PSR_SUPPORT
,
8978 dpcd_data
, sizeof(dpcd_data
))) {
8979 link
->dpcd_caps
.psr_caps
.psr_version
= dpcd_data
[0];
8981 if (dpcd_data
[0] == 0) {
8982 link
->psr_settings
.psr_version
= DC_PSR_VERSION_UNSUPPORTED
;
8983 link
->psr_settings
.psr_feature_enabled
= false;
8985 link
->psr_settings
.psr_version
= DC_PSR_VERSION_1
;
8986 link
->psr_settings
.psr_feature_enabled
= true;
8989 DRM_INFO("PSR support:%d\n", link
->psr_settings
.psr_feature_enabled
);
8994 * amdgpu_dm_link_setup_psr() - configure psr link
8995 * @stream: stream state
8997 * Return: true if success
8999 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state
*stream
)
9001 struct dc_link
*link
= NULL
;
9002 struct psr_config psr_config
= {0};
9003 struct psr_context psr_context
= {0};
9009 link
= stream
->link
;
9011 psr_config
.psr_version
= link
->dpcd_caps
.psr_caps
.psr_version
;
9013 if (psr_config
.psr_version
> 0) {
9014 psr_config
.psr_exit_link_training_required
= 0x1;
9015 psr_config
.psr_frame_capture_indication_req
= 0;
9016 psr_config
.psr_rfb_setup_time
= 0x37;
9017 psr_config
.psr_sdp_transmit_line_num_deadline
= 0x20;
9018 psr_config
.allow_smu_optimizations
= 0x0;
9020 ret
= dc_link_setup_psr(link
, stream
, &psr_config
, &psr_context
);
9023 DRM_DEBUG_DRIVER("PSR link: %d\n", link
->psr_settings
.psr_feature_enabled
);
9029 * amdgpu_dm_psr_enable() - enable psr f/w
9030 * @stream: stream state
9032 * Return: true if success
9034 bool amdgpu_dm_psr_enable(struct dc_stream_state
*stream
)
9036 struct dc_link
*link
= stream
->link
;
9037 unsigned int vsync_rate_hz
= 0;
9038 struct dc_static_screen_params params
= {0};
9039 /* Calculate number of static frames before generating interrupt to
9042 // Init fail safe of 2 frames static
9043 unsigned int num_frames_static
= 2;
9045 DRM_DEBUG_DRIVER("Enabling psr...\n");
9047 vsync_rate_hz
= div64_u64(div64_u64((
9048 stream
->timing
.pix_clk_100hz
* 100),
9049 stream
->timing
.v_total
),
9050 stream
->timing
.h_total
);
9053 * Calculate number of frames such that at least 30 ms of time has
9056 if (vsync_rate_hz
!= 0) {
9057 unsigned int frame_time_microsec
= 1000000 / vsync_rate_hz
;
9058 num_frames_static
= (30000 / frame_time_microsec
) + 1;
9061 params
.triggers
.cursor_update
= true;
9062 params
.triggers
.overlay_update
= true;
9063 params
.triggers
.surface_update
= true;
9064 params
.num_frames
= num_frames_static
;
9066 dc_stream_set_static_screen_params(link
->ctx
->dc
,
9070 return dc_link_set_psr_allow_active(link
, true, false);
9074 * amdgpu_dm_psr_disable() - disable psr f/w
9075 * @stream: stream state
9077 * Return: true if success
9079 static bool amdgpu_dm_psr_disable(struct dc_stream_state
*stream
)
9082 DRM_DEBUG_DRIVER("Disabling psr...\n");
9084 return dc_link_set_psr_allow_active(stream
->link
, false, true);